diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5b53f517..b5001ee8 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -43,10 +43,6 @@ jobs:
run: poetry run pytest -rP .
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
- REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }}
- GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
- COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
HUMANLOOP_API_KEY: ${{ secrets.HUMANLOOP_API_KEY }}
publish:
diff --git a/poetry.lock b/poetry.lock
index 4ce5d536..cfe8a240 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -78,13 +78,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
-version = "2025.1.31"
+version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
- {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
+ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
+ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.0"
+version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"},
- {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"},
+ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
+ {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
]
[package.dependencies]
@@ -403,29 +403,29 @@ typing-extensions = ">=4.10,<5"
[[package]]
name = "h11"
-version = "0.14.0"
+version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
+ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
-version = "1.0.8"
+version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
- {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
+ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
+ {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
-h11 = ">=0.13,<0.15"
+h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.75.0"
+version = "1.76.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"},
- {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"},
+ {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
+ {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
]
[package.dependencies]
@@ -931,30 +931,30 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"},
- {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"},
- {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
]
[package.dependencies]
@@ -962,77 +962,77 @@ anthropic = ">=0.17.0"
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"},
- {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"},
- {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"},
- {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"},
- {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-proto"
@@ -1081,13 +1081,13 @@ opentelemetry-api = "1.32.1"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.3"
+version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
]
[[package]]
@@ -1320,18 +1320,18 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"]
[[package]]
name = "pydantic"
-version = "2.11.3"
+version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"},
- {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"},
+ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
+ {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.33.1"
+pydantic-core = "2.33.2"
typing-extensions = ">=4.12.2"
typing-inspection = ">=0.4.0"
@@ -1341,110 +1341,110 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.33.1"
+version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"},
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5183e4f6a2d468787243ebcd70cf4098c247e60d73fb7d68d5bc1e1beaa0c4db"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:398a38d323f37714023be1e0285765f0a27243a8b1506b7b7de87b647b517e48"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3776f0001b43acebfa86f8c64019c043b55cc5a6a2e313d728b5c95b46969"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c566dd9c5f63d22226409553531f89de0cac55397f2ab8d97d6f06cfce6d947e"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d5f3acc81452c56895e90643a625302bd6be351e7010664151cc55b7b97f89"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3a07fadec2a13274a8d861d3d37c61e97a816beae717efccaa4b36dfcaadcde"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f99aeda58dce827f76963ee87a0ebe75e648c72ff9ba1174a253f6744f518f65"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:902dbc832141aa0ec374f4310f1e4e7febeebc3256f00dc359a9ac3f264a45dc"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe44d56aa0b00d66640aa84a3cbe80b7a3ccdc6f0b1ca71090696a6d4777c091"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win32.whl", hash = "sha256:ed3eb16d51257c763539bde21e011092f127a2202692afaeaccb50db55a31383"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win_amd64.whl", hash = "sha256:694ad99a7f6718c1a498dc170ca430687a39894a60327f548e02a9c7ee4b6504"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5ab77f45d33d264de66e1884fca158bc920cb5e27fd0764a72f72f5756ae8bdb"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7aaba1b4b03aaea7bb59e1b5856d734be011d3e6d98f5bcaa98cb30f375f2ad"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb66263e9ba8fea2aa85e1e5578980d127fb37d7f2e292773e7bc3a38fb0c7b"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f2648b9262607a7fb41d782cc263b48032ff7a03a835581abbf7a3bec62bcf5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723c5630c4259400818b4ad096735a829074601805d07f8cafc366d95786d331"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d100e3ae783d2167782391e0c1c7a20a31f55f8015f3293647544df3f9c67824"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177d50460bc976a0369920b6c744d927b0ecb8606fb56858ff542560251b19e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3edde68d1a1f9af1273b2fe798997b33f90308fb6d44d8550c89fc6a3647cf6"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a62c3c3ef6a7e2c45f7853b10b5bc4ddefd6ee3cd31024754a1a5842da7d598d"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:c91dbb0ab683fa0cd64a6e81907c8ff41d6497c346890e26b23de7ee55353f96"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f466e8bf0a62dc43e068c12166281c2eca72121dd2adc1040f3aa1e21ef8599"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win32.whl", hash = "sha256:ab0277cedb698749caada82e5d099dc9fed3f906a30d4c382d1a21725777a1e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win_amd64.whl", hash = "sha256:5773da0ee2d17136b1f1c6fbde543398d452a6ad2a7b54ea1033e2daa739b8d2"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c834f54f8f4640fd7e4b193f80eb25a0602bba9e19b3cd2fc7ffe8199f5ae02"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:049e0de24cf23766f12cc5cc71d8abc07d4a9deb9061b334b62093dedc7cb068"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a28239037b3d6f16916a4c831a5a0eadf856bdd6d2e92c10a0da3a59eadcf3e"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3da303ab5f378a268fa7d45f37d7d85c3ec19769f28d2cc0c61826a8de21fe"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25626fb37b3c543818c14821afe0fd3830bc327a43953bc88db924b68c5723f1"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3ab2d36e20fbfcce8f02d73c33a8a7362980cff717926bbae030b93ae46b56c7"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2f9284e11c751b003fd4215ad92d325d92c9cb19ee6729ebd87e3250072cdcde"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:048c01eee07d37cbd066fc512b9d8b5ea88ceeb4e629ab94b3e56965ad655add"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5ccd429694cf26af7997595d627dd2637e7932214486f55b8a357edaac9dae8c"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7edbc454a29fc6aeae1e1eecba4f07b63b8d76e76a748532233c4c167b4cb9ea"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ad05b683963f69a1d5d2c2bdab1274a31221ca737dbbceaa32bcb67359453cdd"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df6a94bf9452c6da9b5d76ed229a5683d0306ccb91cca8e1eea883189780d568"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7965c13b3967909a09ecc91f21d09cfc4576bf78140b988904e94f130f188396"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3f1fdb790440a34f6ecf7679e1863b825cb5ffde858a9197f851168ed08371e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5277aec8d879f8d05168fdd17ae811dd313b8ff894aeeaf7cd34ad28b4d77e33"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8ab581d3530611897d863d1a649fb0644b860286b4718db919bfd51ece41f10b"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0483847fa9ad5e3412265c1bd72aad35235512d9ce9d27d81a56d935ef489672"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:de9e06abe3cc5ec6a2d5f75bc99b0bdca4f5c719a5b34026f8c57efbdecd2ee3"},
- {file = "pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
+ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
]
[package.dependencies]
@@ -1729,13 +1729,13 @@ files = [
[[package]]
name = "replicate"
-version = "1.0.4"
+version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
files = [
- {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"},
- {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"},
+ {file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
+ {file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index ad96beec..9dddf812 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.35"
+version = "0.8.36b1"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 27a32c92..26f361d0 100644
--- a/reference.md
+++ b/reference.md
@@ -56,7 +56,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -1501,7 +1501,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2518,7 @@ client.prompts.update_monitoring(
-## Tools
-client.tools.log(...)
+client.prompts.serialize(...)
-
@@ -2531,15 +2530,13 @@ client.prompts.update_monitoring(
-
-Log to a Tool.
+Serialize a Prompt to the .prompt file format.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Tool. Otherwise the default deployed version will be chosen.
+Useful for storing the Prompt with your code in a version control system,
+or for editing with an AI tool.
-Instead of targeting an existing version explicitly, you can instead pass in
-Tool details in the request body. In this case, we will check if the details correspond
-to an existing version of the Tool, if not we will create a new version. This is helpful
-in the case where you are storing or deriving your Tool details in code.
+By default, the deployed version of the Prompt is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2556,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
+client.prompts.serialize(
+ id="id",
)
```
@@ -2593,7 +2574,7 @@ client.tools.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
+**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2582,7 @@ client.tools.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2590,7 @@ client.tools.log(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2598,72 @@ client.tools.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.prompts.deserialize(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deserialize a Prompt from the .prompt file format.
+
+This returns a subset of the attributes required by a Prompt.
+This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+
+#### 🔌 Usage
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.deserialize(
+ prompt="prompt",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+-
+
+**prompt:** `str`
@@ -2649,15 +2671,78 @@ client.tools.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## Tools
+client.tools.call(...)
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call a Tool.
+
+Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2750,7 @@ client.tools.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2758,7 @@ client.tools.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2766,7 @@ client.tools.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2774,7 @@ client.tools.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2806,7 @@ client.tools.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2814,7 @@ client.tools.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2822,7 @@ client.tools.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2830,7 @@ client.tools.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2838,7 @@ client.tools.log(
-
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2846,7 @@ client.tools.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2854,7 @@ client.tools.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2862,15 @@ client.tools.log(
-
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2890,7 @@ client.tools.log(
-client.tools.update(...)
+client.tools.log(...)
-
@@ -2809,9 +2902,15 @@ client.tools.log(
-
-Update a Log.
+Log to a Tool.
-Update the details of a Log with the given ID.
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool, if not we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2930,24 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update(
- id="id",
- log_id="log_id",
+client.tools.log(
+ path="math-tool",
+ tool={
+ "function": {
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": "number"},
+ },
+ "required": ["a", "b"],
+ },
+ }
+ },
+ inputs={"a": 5, "b": 7},
+ output="35",
)
```
@@ -2850,7 +2964,7 @@ client.tools.update(
-
-**id:** `str` — Unique identifier for Prompt.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2972,7 @@ client.tools.update(
-
-**log_id:** `str` — Unique identifier for the Log.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2980,7 @@ client.tools.update(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +2988,7 @@ client.tools.update(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +2996,7 @@ client.tools.update(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3004,7 @@ client.tools.update(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3012,7 @@ client.tools.update(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3020,7 @@ client.tools.update(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3028,7 @@ client.tools.update(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3036,7 @@ client.tools.update(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3044,7 @@ client.tools.update(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3052,7 @@ client.tools.update(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3060,7 @@ client.tools.update(
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3068,7 @@ client.tools.update(
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3076,7 @@ client.tools.update(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,31 +3084,288 @@ client.tools.update(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.tools.list(...)
-
-#### 📝 Description
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
-
-Get a list of all Tools.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.tools.update(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.update(
+ id="id",
+ log_id="log_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Prompt.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.tools.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all Tools.
@@ -4083,12 +4454,11 @@ client.tools.update_monitoring(
-## Datasets
-client.datasets.list(...)
+client.tools.get_environment_variables(...)
-
-#### 📝 Description
+#### 🔌 Usage
-
@@ -4096,8 +4466,209 @@ client.tools.update_monitoring(
-
-List all Datasets.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.get_environment_variables(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for File.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.tools.add_environment_variable(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Add an environment variable to a Tool.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Tool.
+
+
+
+
+
+-
+
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.tools.delete_environment_variable(...)
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for File.
+
+
+
+
+
+-
+
+**name:** `str` — Name of the Environment Variable to delete.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Datasets
+client.datasets.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Datasets.
+
@@ -6112,7 +6683,2904 @@ client.evaluators.move(
-
-**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of an Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_versions(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for the Evaluator.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.delete_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Evaluator version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Evaluator to an Environment.
+
+Set the deployed version for the specified Environment. This Evaluator
+will be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.set_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+ version_id="evv_012def",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Evaluator from the Environment.
+
+Remove the deployed version for the specified Environment. This Evaluator
+will no longer be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.remove_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_environments(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Evaluator.
+
+An activated Evaluator will automatically be run on all new Logs
+within the Evaluator for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_monitoring(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Flows
+client.flows.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Log to a Flow.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
+If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.log(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ flow={
+ "attributes": {
+ "prompt": {
+ "template": "You are a helpful assistant helping with medical anamnesis",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ }
+ },
+ inputs={
+ "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="incomplete",
+ start_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:35+00:00",
+ ),
+ end_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:39+00:00",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the status, inputs, output of a Flow Log.
+
+Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
+Inputs and output (or error) must be provided in order to mark it as complete.
+
+The end_time log attribute will be set to match the time the log is marked as complete.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_log(
+ log_id="medqa_experiment_0001",
+ inputs={
+ "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — Unique identifier of the Flow Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Flow with the given ID.
+
+By default, the deployed version of the Flow is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.get(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Flow with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Flow to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.move(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Flow.
+
+
+
+
+
+-
+
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of Flows.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.flows.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create or update a Flow.
+
+Flows can also be identified by the `ID` or their `path`.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Flow - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.upsert(
+ path="Personal Projects/MedQA Flow",
+ attributes={
+ "prompt": {
+ "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ "version_name": "medqa-flow-v1",
+ "version_description": "Initial version",
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of a Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_versions(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Flow version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Flow to an Environment.
+
+Set the deployed version for the specified Environment. This Flow
+will be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.set_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+ version_id="flv_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Flow from the Environment.
+
+Remove the deployed version for the specified Environment. This Flow
+will no longer be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.remove_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_environments(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Flow.
+
+An activated Evaluator will automatically be run on all new "completed" Logs
+within the Flow for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_monitoring(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Agents
+client.agents.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Agent Log.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.log(
+ path="Banking/Teller Agent",
+ agent={
+ "provider": "anthropic",
+ "endpoint": "chat",
+ "model": "claude-3-7-sonnet-latest",
+ "reasoning_effort": 1024,
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ "max_iterations": 3,
+ "tools": [
+ {
+ "type": "file",
+ "link": {
+ "file_id": "pr_1234567890",
+ "version_id": "prv_1234567890",
+ },
+ "on_agent_call": "continue",
+ },
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
+ },
+ "on_agent_call": "stop",
+ },
+ ],
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
+
+
+
+
+
+-
+
+**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
+
+
+
+
+
+-
+
+**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
+
+
+
+
+
+-
+
+**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
+
+
+
+
+
+-
+
+**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
+
+
+
+
+
+-
+
+**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
+
+
+
+
+
+-
+
+**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.update_log(
+ id="ag_1234567890",
+ log_id="log_1234567890",
+ messages=[
+ {"role": "user", "content": "I need to withdraw $1000"},
+ {
+ "role": "assistant",
+ "content": "Of course! Would you like to use your savings or checking account?",
+ },
+ ],
+ output_message={
+ "role": "assistant",
+ "content": "I'm sorry, I can't help with that.",
+ },
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+according to its configuration.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. A new version is created if it does not match
+any existing ones. This is helpful in the case where you are storing or deriving
+your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.call_stream()
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6120,7 +9588,7 @@ client.evaluators.move(
-
-**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6140,7 +9608,7 @@ client.evaluators.move(
-client.evaluators.list_versions(...)
+client.agents.call(...)
-
@@ -6152,7 +9620,21 @@ client.evaluators.move(
-
-Get a list of all the versions of an Evaluator.
+Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+according to its configuration.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. A new version is created if it does not match
+any existing ones. This is helpful in the case where you are storing or deriving
+your Agent details in code.
@@ -6172,8 +9654,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_versions(
- id="ev_890bcd",
+client.agents.call(
+ path="Banking/Teller Agent",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'd like to deposit $1000 to my savings account from my checking account.",
+ }
+ ],
)
```
@@ -6190,7 +9678,7 @@ client.evaluators.list_versions(
-
-**id:** `str` — Unique identifier for the Evaluator.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
@@ -6198,7 +9686,7 @@ client.evaluators.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6206,70 +9694,61 @@ client.evaluators.list_versions(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
+
+-
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
-
-client.evaluators.delete_evaluator_version(...)
-
-#### 📝 Description
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
-
-
--
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
-Delete a version of the Evaluator.
-
-
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+
-#### ⚙️ Parameters
-
-
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
@@ -6277,7 +9756,7 @@ client.evaluators.delete_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
@@ -6285,70 +9764,71 @@ client.evaluators.delete_evaluator_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
-
+
+-
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
-
-client.evaluators.update_evaluator_version(...)
-
-#### 📝 Description
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
-
-Update the name or description of the Evaluator version.
-
-
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
-#### 🔌 Usage
-
-
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
-#### ⚙️ Parameters
-
-
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -6356,7 +9836,7 @@ client.evaluators.update_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
@@ -6364,7 +9844,7 @@ client.evaluators.update_evaluator_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6372,7 +9852,7 @@ client.evaluators.update_evaluator_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6392,7 +9872,7 @@ client.evaluators.update_evaluator_version(
-client.evaluators.set_deployment(...)
+client.agents.continue_call_stream(...)
-
@@ -6404,10 +9884,15 @@ client.evaluators.update_evaluator_version(
-
-Deploy Evaluator to an Environment.
+Continue an incomplete Agent call.
-Set the deployed version for the specified Environment. This Evaluator
-will be used for calls made to the Evaluator in this Environment.
+This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+requested by the Agent. The Agent will resume processing from where it left off.
+
+The messages in the request will be appended to the original messages in the Log. You do not
+have to provide the previous conversation history.
+
+The original log must be in an incomplete state to be continued.
@@ -6427,11 +9912,12 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
+response = client.agents.continue_call_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
+for chunk in response.data:
+ yield chunk
```
@@ -6447,7 +9933,7 @@ client.evaluators.set_deployment(
-
-**id:** `str` — Unique identifier for Evaluator.
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6455,7 +9941,7 @@ client.evaluators.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6463,7 +9949,15 @@ client.evaluators.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6483,7 +9977,7 @@ client.evaluators.set_deployment(
-client.evaluators.remove_deployment(...)
+client.agents.continue_call(...)
-
@@ -6495,10 +9989,15 @@ client.evaluators.set_deployment(
-
-Remove deployed Evaluator from the Environment.
+Continue an incomplete Agent call.
-Remove the deployed version for the specified Environment. This Evaluator
-will no longer be used for calls made to the Evaluator in this Environment.
+This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+requested by the Agent. The Agent will resume processing from where it left off.
+
+The messages in the request will be appended to the original messages in the Log. You do not
+have to provide the previous conversation history.
+
+The original log must be in an incomplete state to be continued.
@@ -6518,9 +10017,15 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
+client.agents.continue_call(
+ log_id="log_1234567890",
+ messages=[
+ {
+ "role": "tool",
+ "content": '{"type": "checking", "balance": 5200}',
+ "tool_call_id": "tc_1234567890",
+ }
+ ],
)
```
@@ -6537,7 +10042,7 @@ client.evaluators.remove_deployment(
-
-**id:** `str` — Unique identifier for Evaluator.
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6545,7 +10050,23 @@ client.evaluators.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6565,7 +10086,7 @@ client.evaluators.remove_deployment(
-client.evaluators.list_environments(...)
+client.agents.list(...)
-
@@ -6577,7 +10098,7 @@ client.evaluators.remove_deployment(
-
-List all Environments and their deployed versions for the Evaluator.
+Get a list of all Agents.
@@ -6597,9 +10118,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_environments(
- id="ev_890bcd",
+response = client.agents.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -6615,80 +10141,31 @@ client.evaluators.list_environments(
-
-**id:** `str` — Unique identifier for Evaluator.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
-
-
-
-client.evaluators.update_monitoring(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Activate and deactivate Evaluators for monitoring the Evaluator.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Evaluator for monitoring purposes.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.update_monitoring(
- id="id",
-)
-
-```
-
-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
+
-#### ⚙️ Parameters
-
-
+**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
+
+
+
+
-
-**id:** `str`
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -6696,9 +10173,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -6706,9 +10181,7 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6728,8 +10201,7 @@ client.evaluators.update_monitoring(
-## Flows
-client.flows.log(...)
+client.agents.upsert(...)
-
@@ -6741,13 +10213,14 @@ client.evaluators.update_monitoring(
-
-Log to a Flow.
+Create an Agent or update it with a new version if it already exists.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+tools determine the versions of the Agent.
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Agent - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -6762,40 +10235,48 @@ in order to trigger Evaluators.
-
```python
-import datetime
-
from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+client.agents.upsert(
+ path="Banking/Teller Agent",
+ provider="anthropic",
+ endpoint="chat",
+ model="claude-3-7-sonnet-latest",
+ reasoning_effort=1024,
+ template=[
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ max_iterations=3,
+ tools=[
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
},
+ "on_agent_call": "stop",
}
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
- ),
+ ],
+ version_name="teller-agent-v1",
+ version_description="Initial version",
)
```
@@ -6812,7 +10293,7 @@ client.flows.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -6820,7 +10301,7 @@ client.flows.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6828,7 +10309,7 @@ client.flows.log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6836,7 +10317,7 @@ client.flows.log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -6844,23 +10325,14 @@ client.flows.log(
-
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
-
-
-
-
--
+**template:** `typing.Optional[AgentRequestTemplateParams]`
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
-
--
+For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+For completion models, provide a prompt template as a string.
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -6868,7 +10340,7 @@ client.flows.log(
-
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -6876,7 +10348,7 @@ client.flows.log(
-
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -6884,7 +10356,7 @@ client.flows.log(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
@@ -6892,7 +10364,7 @@ client.flows.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
@@ -6900,7 +10372,7 @@ client.flows.log(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
@@ -6908,7 +10380,7 @@ client.flows.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
@@ -6916,7 +10388,7 @@ client.flows.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
@@ -6924,7 +10396,7 @@ client.flows.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
@@ -6932,7 +10404,7 @@ client.flows.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
@@ -6940,7 +10412,7 @@ client.flows.log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
@@ -6948,7 +10420,7 @@ client.flows.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
@@ -6956,7 +10428,7 @@ client.flows.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -6964,7 +10436,7 @@ client.flows.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
@@ -6972,7 +10444,7 @@ client.flows.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
@@ -6980,7 +10452,7 @@ client.flows.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
@@ -6988,7 +10460,7 @@ client.flows.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
@@ -6996,7 +10468,7 @@ client.flows.log(
-
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -7004,7 +10476,7 @@ client.flows.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7012,7 +10484,7 @@ client.flows.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7020,7 +10492,7 @@ client.flows.log(
-
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7040,7 +10512,7 @@ client.flows.log(
-client.flows.update_log(...)
+client.agents.delete_agent_version(...)
-
@@ -7052,12 +10524,7 @@ client.flows.log(
-
-Update the status, inputs, output of a Flow Log.
-
-Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
-Inputs and output (or error) must be provided in order to mark it as complete.
-
-The end_time log attribute will be set to match the time the log is marked as complete.
+Delete a version of the Agent.
@@ -7077,13 +10544,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
+client.agents.delete_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
)
```
@@ -7100,47 +10563,7 @@ client.flows.update_log(
-
-**log_id:** `str` — Unique identifier of the Flow Log.
-
-
-
-
-
--
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
-
-
-
-
-
--
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
-
-
-
-
--
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
-
-
-
-
-
--
-
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
-
-
-
-
-
--
-
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+**id:** `str` — Unique identifier for Agent.
@@ -7148,7 +10571,7 @@ client.flows.update_log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7168,7 +10591,7 @@ client.flows.update_log(
-client.flows.get(...)
+client.agents.patch_agent_version(...)
-
@@ -7180,10 +10603,7 @@ client.flows.update_log(
-
-Retrieve the Flow with the given ID.
-
-By default, the deployed version of the Flow is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Flow.
+Update the name or description of the Agent version.
@@ -7203,8 +10623,11 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.patch_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
+ name="teller-agent-v2",
+ description="Updated version",
)
```
@@ -7221,15 +10644,7 @@ client.flows.get(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+**id:** `str` — Unique identifier for Agent.
@@ -7237,7 +10652,7 @@ client.flows.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7245,69 +10660,15 @@ client.flows.get(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**name:** `typing.Optional[str]` — Name of the version.
-
-
-
-
-
-
-
-
-client.flows.delete(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Delete the Flow with the given ID.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-**id:** `str` — Unique identifier for Flow.
+**description:** `typing.Optional[str]` — Description of the version.
@@ -7327,7 +10688,7 @@ client.flows.delete(
-client.flows.move(...)
+client.agents.get(...)
-
@@ -7339,7 +10700,10 @@ client.flows.delete(
-
-Move the Flow to a different path or change the name.
+Retrieve the Agent with the given ID.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -7359,9 +10723,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
+client.agents.get(
+ id="ag_1234567890",
)
```
@@ -7378,15 +10741,7 @@ client.flows.move(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Agent.
@@ -7394,7 +10749,7 @@ client.flows.move(
-
-**name:** `typing.Optional[str]` — Name of the Flow.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
@@ -7402,7 +10757,7 @@ client.flows.move(
-
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -7422,7 +10777,7 @@ client.flows.move(
-client.flows.list(...)
+client.agents.delete(...)
-
@@ -7434,7 +10789,7 @@ client.flows.move(
-
-Get a list of Flows.
+Delete the Agent with the given ID.
@@ -7454,14 +10809,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.flows.list(
- size=1,
+client.agents.delete(
+ id="ag_1234567890",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -7477,47 +10827,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
--
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
-
-
-
-
--
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
-
-
-
-
--
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
-
-
-
-
--
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10847,7 @@ for page in response.iter_pages():
-client.flows.upsert(...)
+client.agents.move(...)
-
@@ -7549,13 +10859,7 @@ for page in response.iter_pages():
-
-Create or update a Flow.
-
-Flows can also be identified by the `ID` or their `path`.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Flow - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Move the Agent to a different path or change the name.
@@ -7575,22 +10879,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
+client.agents.move(
+ id="ag_1234567890",
+ path="new directory/new name",
)
```
@@ -7607,15 +10898,7 @@ client.flows.upsert(
-
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10906,7 @@ client.flows.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10914,7 @@ client.flows.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10922,7 @@ client.flows.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10942,7 @@ client.flows.upsert(
-client.flows.list_versions(...)
+client.agents.list_versions(...)
-
@@ -7671,7 +10954,7 @@ client.flows.upsert(
-
-Get a list of all the versions of a Flow.
+Get a list of all the versions of a Agent.
@@ -7691,8 +10974,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.list_versions(
+ id="ag_1234567890",
)
```
@@ -7709,7 +10992,7 @@ client.flows.list_versions(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +11020,7 @@ client.flows.list_versions(
-client.flows.delete_flow_version(...)
+client.agents.set_deployment(...)
-
@@ -7749,7 +11032,10 @@ client.flows.list_versions(
-
-Delete a version of the Flow.
+Deploy Agent to an Environment.
+
+Set the deployed version for the specified Environment. This Agent
+will be used for calls made to the Agent in this Environment.
@@ -7769,8 +11055,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete_flow_version(
+client.agents.set_deployment(
id="id",
+ environment_id="environment_id",
version_id="version_id",
)
@@ -7788,7 +11075,7 @@ client.flows.delete_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +11083,15 @@ client.flows.delete_flow_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +11111,7 @@ client.flows.delete_flow_version(
-client.flows.update_flow_version(...)
+client.agents.remove_deployment(...)
-
@@ -7828,7 +11123,10 @@ client.flows.delete_flow_version(
-
-Update the name or description of the Flow version.
+Remove deployed Agent from the Environment.
+
+Remove the deployed version for the specified Environment. This Agent
+will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11146,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_flow_version(
+client.agents.remove_deployment(
id="id",
- version_id="version_id",
+ environment_id="environment_id",
)
```
@@ -7867,23 +11165,7 @@ client.flows.update_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
+**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11173,7 @@ client.flows.update_flow_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11193,7 @@ client.flows.update_flow_version(
-client.flows.set_deployment(...)
+client.agents.list_environments(...)
-
@@ -7923,10 +11205,7 @@ client.flows.update_flow_version(
-
-Deploy Flow to an Environment.
-
-Set the deployed version for the specified Environment. This Flow
-will be used for calls made to the Flow in this Environment.
+List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11225,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
+client.agents.list_environments(
+ id="ag_1234567890",
)
```
@@ -7966,23 +11243,7 @@ client.flows.set_deployment(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11263,7 @@ client.flows.set_deployment(
-client.flows.remove_deployment(...)
+client.agents.update_monitoring(...)
-
@@ -8014,10 +11275,10 @@ client.flows.set_deployment(
-
-Remove deployed Flow from the Environment.
+Activate and deactivate Evaluators for monitoring the Agent.
-Remove the deployed version for the specified Environment. This Flow
-will no longer be used for calls made to the Flow in this Environment.
+An activated Evaluator will automatically be run on all new Logs
+within the Agent for monitoring purposes.
@@ -8037,9 +11298,13 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
+client.agents.update_monitoring(
+ id="ag_1234567890",
+ activate=[
+ {"evaluator_version_id": "ev_1234567890"},
+ {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
+ ],
+ deactivate=[{"evaluator_version_id": "ev_0987654321"}],
)
```
@@ -8056,7 +11321,7 @@ client.flows.remove_deployment(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str`
@@ -8064,7 +11329,19 @@ client.flows.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11361,7 @@ client.flows.remove_deployment(
-client.flows.list_environments(...)
+client.agents.serialize(...)
-
@@ -8096,7 +11373,13 @@ client.flows.remove_deployment(
-
-List all Environments and their deployed versions for the Flow.
+Serialize an Agent to the .agent file format.
+
+Useful for storing the Agent with your code in a version control system,
+or for editing with an AI tool.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11399,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.serialize(
+ id="id",
)
```
@@ -8134,7 +11417,23 @@ client.flows.list_environments(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11453,7 @@ client.flows.list_environments(
-client.flows.update_monitoring(...)
+client.agents.deserialize(...)
-
@@ -8166,10 +11465,10 @@ client.flows.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Flow.
+Deserialize an Agent from the .agent file format.
-An activated Evaluator will automatically be run on all new "completed" Logs
-within the Flow for monitoring purposes.
+This returns a subset of the attributes required by an Agent.
+This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11488,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.agents.deserialize(
+ agent="agent",
)
```
@@ -8208,27 +11506,7 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
-
-
-
-
--
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
-
-
-
-
--
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**agent:** `str`
@@ -10118,14 +13396,6 @@ for page in response.iter_pages():
-
-**version_status:** `typing.Optional[VersionStatus]` — If provided, only Logs belonging to Versions with the specified status will be returned.
-
-
-
-
-
--
-
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -10190,7 +13460,7 @@ for page in response.iter_pages():
-
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 0c431892..8485d75c 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,16 +1,45 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentCallResponse,
+ AgentCallResponseToolChoice,
+ AgentCallStreamResponse,
+ AgentCallStreamResponsePayload,
AgentConfigResponse,
+ AgentContinueCallResponse,
+ AgentContinueCallResponseToolChoice,
+ AgentContinueCallStreamResponse,
+ AgentContinueCallStreamResponsePayload,
+ AgentInlineTool,
+ AgentKernelRequest,
+ AgentKernelRequestReasoningEffort,
+ AgentKernelRequestStop,
+ AgentKernelRequestTemplate,
+ AgentKernelRequestToolsItem,
+ AgentLinkedFileRequest,
+ AgentLinkedFileResponse,
+ AgentLinkedFileResponseFile,
+ AgentLogResponse,
+ AgentLogResponseToolChoice,
+ AgentLogStreamResponse,
+ AgentResponse,
+ AgentResponseReasoningEffort,
+ AgentResponseStop,
+ AgentResponseTemplate,
+ AgentResponseToolsItem,
+ AnthropicRedactedThinkingContent,
+ AnthropicThinkingContent,
BaseModelsUserResponse,
BooleanEvaluatorStatsResponse,
ChatMessage,
ChatMessageContent,
ChatMessageContentItem,
+ ChatMessageThinkingItem,
ChatRole,
ChatToolType,
CodeEvaluatorRequest,
ConfigToolResponse,
+ CreateAgentLogResponse,
CreateDatapointRequest,
CreateDatapointRequestTargetValue,
CreateEvaluatorLogResponse,
@@ -55,10 +84,12 @@
EvaluatorReturnTypeEnum,
EvaluatorVersionId,
EvaluatorsRequest,
+ EventType,
ExternalEvaluatorRequest,
FeedbackType,
FileEnvironmentResponse,
FileEnvironmentResponseFile,
+ FileEnvironmentVariableRequest,
FileId,
FilePath,
FileRequest,
@@ -76,7 +107,9 @@
ImageUrl,
ImageUrlDetail,
InputResponse,
+ LinkedFileRequest,
LinkedToolResponse,
+ ListAgents,
ListDatasets,
ListEvaluators,
ListFlows,
@@ -85,6 +118,7 @@
LlmEvaluatorRequest,
LogResponse,
LogStatus,
+ LogStreamResponse,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -93,15 +127,18 @@
MonitoringEvaluatorVersionRequest,
NumericEvaluatorStatsResponse,
ObservabilityStatus,
+ OnAgentCallEnum,
+ OpenAiReasoningEffort,
OverallStats,
+ PaginatedDataAgentResponse,
PaginatedDataEvaluationLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
PaginatedDataPromptResponse,
PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
PaginatedDatapointResponse,
PaginatedDatasetResponse,
PaginatedEvaluationResponse,
@@ -110,6 +147,7 @@
PlatformAccessEnum,
PopulateTemplateResponse,
PopulateTemplateResponsePopulatedTemplate,
+ PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
ProjectSortBy,
@@ -118,15 +156,16 @@
PromptCallResponseToolChoice,
PromptCallStreamResponse,
PromptKernelRequest,
+ PromptKernelRequestReasoningEffort,
PromptKernelRequestStop,
PromptKernelRequestTemplate,
PromptLogResponse,
PromptLogResponseToolChoice,
PromptResponse,
+ PromptResponseReasoningEffort,
PromptResponseStop,
PromptResponseTemplate,
ProviderApiKeys,
- ReasoningEffort,
ResponseFormat,
ResponseFormatType,
RunStatsResponse,
@@ -139,6 +178,7 @@
TextEvaluatorStatsResponse,
TimeUnit,
ToolCall,
+ ToolCallResponse,
ToolChoice,
ToolFunction,
ToolKernelRequest,
@@ -162,7 +202,23 @@
VersionStatus,
)
from .errors import UnprocessableEntityError
-from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from .agents import (
+ AgentLogRequestToolChoice,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffort,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStop,
+ AgentRequestStopParams,
+ AgentRequestTemplate,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItem,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestToolChoice,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestToolChoice,
+ AgentsCallStreamRequestToolChoiceParams,
+)
from .client import AsyncHumanloop, Humanloop
from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
from .environment import HumanloopEnvironment
@@ -190,6 +246,8 @@
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffort,
+ PromptRequestReasoningEffortParams,
PromptRequestStop,
PromptRequestStopParams,
PromptRequestTemplate,
@@ -200,12 +258,41 @@
PromptsCallStreamRequestToolChoiceParams,
)
from .requests import (
+ AgentCallResponseParams,
+ AgentCallResponseToolChoiceParams,
+ AgentCallStreamResponseParams,
+ AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
+ AgentContinueCallResponseParams,
+ AgentContinueCallResponseToolChoiceParams,
+ AgentContinueCallStreamResponseParams,
+ AgentContinueCallStreamResponsePayloadParams,
+ AgentInlineToolParams,
+ AgentKernelRequestParams,
+ AgentKernelRequestReasoningEffortParams,
+ AgentKernelRequestStopParams,
+ AgentKernelRequestTemplateParams,
+ AgentKernelRequestToolsItemParams,
+ AgentLinkedFileRequestParams,
+ AgentLinkedFileResponseFileParams,
+ AgentLinkedFileResponseParams,
+ AgentLogResponseParams,
+ AgentLogResponseToolChoiceParams,
+ AgentLogStreamResponseParams,
+ AgentResponseParams,
+ AgentResponseReasoningEffortParams,
+ AgentResponseStopParams,
+ AgentResponseTemplateParams,
+ AgentResponseToolsItemParams,
+ AnthropicRedactedThinkingContentParams,
+ AnthropicThinkingContentParams,
BooleanEvaluatorStatsResponseParams,
ChatMessageContentItemParams,
ChatMessageContentParams,
ChatMessageParams,
+ ChatMessageThinkingItemParams,
CodeEvaluatorRequestParams,
+ CreateAgentLogResponseParams,
CreateDatapointRequestParams,
CreateDatapointRequestTargetValueParams,
CreateEvaluatorLogResponseParams,
@@ -245,6 +332,7 @@
ExternalEvaluatorRequestParams,
FileEnvironmentResponseFileParams,
FileEnvironmentResponseParams,
+ FileEnvironmentVariableRequestParams,
FileIdParams,
FilePathParams,
FileRequestParams,
@@ -258,7 +346,9 @@
ImageChatContentParams,
ImageUrlParams,
InputResponseParams,
+ LinkedFileRequestParams,
LinkedToolResponseParams,
+ ListAgentsParams,
ListDatasetsParams,
ListEvaluatorsParams,
ListFlowsParams,
@@ -266,24 +356,27 @@
ListToolsParams,
LlmEvaluatorRequestParams,
LogResponseParams,
+ LogStreamResponseParams,
MonitoringEvaluatorEnvironmentRequestParams,
MonitoringEvaluatorResponseParams,
MonitoringEvaluatorVersionRequestParams,
NumericEvaluatorStatsResponseParams,
OverallStatsParams,
+ PaginatedDataAgentResponseParams,
PaginatedDataEvaluationLogResponseParams,
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
PaginatedDataPromptResponseParams,
PaginatedDataToolResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
PaginatedDatapointResponseParams,
PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PopulateTemplateResponseParams,
PopulateTemplateResponsePopulatedTemplateParams,
+ PopulateTemplateResponseReasoningEffortParams,
PopulateTemplateResponseStopParams,
PopulateTemplateResponseTemplateParams,
PromptCallLogResponseParams,
@@ -291,11 +384,13 @@
PromptCallResponseToolChoiceParams,
PromptCallStreamResponseParams,
PromptKernelRequestParams,
+ PromptKernelRequestReasoningEffortParams,
PromptKernelRequestStopParams,
PromptKernelRequestTemplateParams,
PromptLogResponseParams,
PromptLogResponseToolChoiceParams,
PromptResponseParams,
+ PromptResponseReasoningEffortParams,
PromptResponseStopParams,
PromptResponseTemplateParams,
ProviderApiKeysParams,
@@ -307,6 +402,7 @@
TextChatContentParams,
TextEvaluatorStatsResponseParams,
ToolCallParams,
+ ToolCallResponseParams,
ToolChoiceParams,
ToolFunctionParams,
ToolKernelRequestParams,
@@ -329,8 +425,76 @@
__all__ = [
"AddEvaluatorsRequestEvaluatorsItem",
"AddEvaluatorsRequestEvaluatorsItemParams",
+ "AgentCallResponse",
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoice",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayload",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
+ "AgentContinueCallResponse",
+ "AgentContinueCallResponseParams",
+ "AgentContinueCallResponseToolChoice",
+ "AgentContinueCallResponseToolChoiceParams",
+ "AgentContinueCallStreamResponse",
+ "AgentContinueCallStreamResponseParams",
+ "AgentContinueCallStreamResponsePayload",
+ "AgentContinueCallStreamResponsePayloadParams",
+ "AgentInlineTool",
+ "AgentInlineToolParams",
+ "AgentKernelRequest",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItem",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentLogResponse",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoice",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponse",
+ "AgentLogStreamResponseParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentResponse",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffort",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStop",
+ "AgentResponseStopParams",
+ "AgentResponseTemplate",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItem",
+ "AgentResponseToolsItemParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContent",
+ "AnthropicThinkingContentParams",
"AsyncHumanloop",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
@@ -341,11 +505,15 @@
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItem",
+ "ChatMessageThinkingItemParams",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"CodeEvaluatorRequestParams",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequest",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValue",
@@ -438,6 +606,7 @@
"EvaluatorVersionId",
"EvaluatorVersionIdParams",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"ExternalEvaluatorRequestParams",
"FeedbackType",
@@ -445,6 +614,8 @@
"FileEnvironmentResponseFile",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequest",
+ "FileEnvironmentVariableRequestParams",
"FileId",
"FileIdParams",
"FilePath",
@@ -477,8 +648,12 @@
"ImageUrlParams",
"InputResponse",
"InputResponseParams",
+ "LinkedFileRequest",
+ "LinkedFileRequestParams",
"LinkedToolResponse",
"LinkedToolResponseParams",
+ "ListAgents",
+ "ListAgentsParams",
"ListDatasets",
"ListDatasetsParams",
"ListEvaluators",
@@ -495,6 +670,8 @@
"LogResponse",
"LogResponseParams",
"LogStatus",
+ "LogStreamResponse",
+ "LogStreamResponseParams",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -507,8 +684,12 @@
"NumericEvaluatorStatsResponse",
"NumericEvaluatorStatsResponseParams",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
"OverallStatsParams",
+ "PaginatedDataAgentResponse",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponse",
@@ -521,10 +702,10 @@
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponse",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponse",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponse",
@@ -538,6 +719,8 @@
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplate",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffort",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
@@ -553,6 +736,8 @@
"PromptCallStreamResponseParams",
"PromptKernelRequest",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffort",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStop",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
@@ -565,12 +750,16 @@
"PromptLogResponseToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
"PromptResponse",
"PromptResponseParams",
+ "PromptResponseReasoningEffort",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStop",
"PromptResponseStopParams",
"PromptResponseTemplate",
@@ -581,7 +770,6 @@
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
"ProviderApiKeysParams",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatParams",
"ResponseFormatType",
@@ -604,6 +792,8 @@
"TimeUnit",
"ToolCall",
"ToolCallParams",
+ "ToolCallResponse",
+ "ToolCallResponseParams",
"ToolChoice",
"ToolChoiceParams",
"ToolFunction",
@@ -643,6 +833,7 @@
"VersionStatsResponseParams",
"VersionStatus",
"__version__",
+ "agents",
"datasets",
"directories",
"evaluations",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
new file mode 100644
index 00000000..04260714
--- /dev/null
+++ b/src/humanloop/agents/__init__.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AgentLogRequestToolChoice,
+ AgentRequestReasoningEffort,
+ AgentRequestStop,
+ AgentRequestTemplate,
+ AgentRequestToolsItem,
+ AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestToolChoice,
+)
+from .requests import (
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStopParams,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestToolChoiceParams,
+)
+
+__all__ = [
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
new file mode 100644
index 00000000..64f3de62
--- /dev/null
+++ b/src/humanloop/agents/client.py
@@ -0,0 +1,3530 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .raw_client import RawAgentsClient
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from ..requests.agent_kernel_request import AgentKernelRequestParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.agent_log_response import AgentLogResponse
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
+from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..core.pagination import SyncPager
+from ..types.agent_response import AgentResponse
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from .raw_client import AsyncRawAgentsClient
+from ..core.pagination import AsyncPager
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._raw_client = RawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> RawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ RawAgentsClient
+ """
+ return self._raw_client
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.log(
+ path="Banking/Teller Agent",
+ agent={
+ "provider": "anthropic",
+ "endpoint": "chat",
+ "model": "claude-3-7-sonnet-latest",
+ "reasoning_effort": 1024,
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ "max_iterations": 3,
+ "tools": [
+ {
+ "type": "file",
+ "link": {
+ "file_id": "pr_1234567890",
+ "version_id": "prv_1234567890",
+ },
+ "on_agent_call": "continue",
+ },
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
+ },
+ "on_agent_call": "stop",
+ },
+ ],
+ },
+ )
+ """
+ response = self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentLogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_log(
+ id="ag_1234567890",
+ log_id="log_1234567890",
+ messages=[
+ {"role": "user", "content": "I need to withdraw $1000"},
+ {
+ "role": "assistant",
+ "content": "Of course! Would you like to use your savings or checking account?",
+ },
+ ],
+ output_message={
+ "role": "assistant",
+ "content": "I'm sorry, I can't help with that.",
+ },
+ log_status="complete",
+ )
+ """
+ response = self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentCallStreamResponse]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.call(
+ path="Banking/Teller Agent",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'd like to deposit $1000 to my savings account from my checking account.",
+ }
+ ],
+ )
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def continue_call_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentContinueCallStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentContinueCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.continue_call_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.continue_call_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def continue_call(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueCallResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.continue_call(
+ log_id="log_1234567890",
+ messages=[
+ {
+ "role": "tool",
+ "content": '{"type": "checking", "balance": 5200}',
+ "tool_call_id": "tc_1234567890",
+ }
+ ],
+ )
+ """
+ response = self._raw_client.continue_call(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[AgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[AgentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.list(
+ size=1,
+ )
+ for item in response:
+ yield item
+ # alternatively, you can paginate page-by-page
+ for page in response.iter_pages():
+ yield page
+ """
+ page = page if page is not None else 1
+ _response = self._raw_client._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ _items = _parsed_response.records
+ return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.upsert(
+ path="Banking/Teller Agent",
+ provider="anthropic",
+ endpoint="chat",
+ model="claude-3-7-sonnet-latest",
+ reasoning_effort=1024,
+ template=[
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ max_iterations=3,
+ tools=[
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
+ },
+ "on_agent_call": "stop",
+ }
+ ],
+ version_name="teller-agent-v1",
+ version_description="Initial version",
+ )
+ """
+ response = self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
+ )
+ """
+ response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.patch_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
+ name="teller-agent-v2",
+ description="Updated version",
+ )
+ """
+ response = self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.get(
+ id="ag_1234567890",
+ )
+ """
+ response = self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete(
+ id="ag_1234567890",
+ )
+ """
+ response = self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.move(
+ id="ag_1234567890",
+ path="new directory/new name",
+ )
+ """
+ response = self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_versions(
+ id="ag_1234567890",
+ )
+ """
+ response = self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+ """
+ response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_environments(
+ id="ag_1234567890",
+ )
+ """
+ response = self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_monitoring(
+ id="ag_1234567890",
+ activate=[
+ {"evaluator_version_id": "ev_1234567890"},
+ {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
+ ],
+ deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ )
+ """
+ response = self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.deserialize(
+ agent="agent",
+ )
+ """
+ response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
+
+
+class AsyncAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> AsyncRawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ AsyncRawAgentsClient
+ """
+ return self._raw_client
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.log(
+ path="Banking/Teller Agent",
+ agent={
+ "provider": "anthropic",
+ "endpoint": "chat",
+ "model": "claude-3-7-sonnet-latest",
+ "reasoning_effort": 1024,
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ "max_iterations": 3,
+ "tools": [
+ {
+ "type": "file",
+ "link": {
+ "file_id": "pr_1234567890",
+ "version_id": "prv_1234567890",
+ },
+ "on_agent_call": "continue",
+ },
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
+ },
+ "on_agent_call": "stop",
+ },
+ ],
+ },
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentLogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_log(
+ id="ag_1234567890",
+ log_id="log_1234567890",
+ messages=[
+ {"role": "user", "content": "I need to withdraw $1000"},
+ {
+ "role": "assistant",
+ "content": "Of course! Would you like to use your savings or checking account?",
+ },
+ ],
+ output_message={
+ "role": "assistant",
+ "content": "I'm sorry, I can't help with that.",
+ },
+ log_status="complete",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentCallStreamResponse]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.call(
+ path="Banking/Teller Agent",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'd like to deposit $1000 to my savings account from my checking account.",
+ }
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def continue_call_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentContinueCallStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentContinueCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.continue_call_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.continue_call_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def continue_call(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueCallResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.continue_call(
+ log_id="log_1234567890",
+ messages=[
+ {
+ "role": "tool",
+ "content": '{"type": "checking", "balance": 5200}',
+ "tool_call_id": "tc_1234567890",
+ }
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.continue_call(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[AgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[AgentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.list(
+ size=1,
+ )
+ async for item in response:
+ yield item
+ # alternatively, you can paginate page-by-page
+ async for page in response.iter_pages():
+ yield page
+
+
+ asyncio.run(main())
+ """
+ page = page if page is not None else 1
+ _response = await self._raw_client._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ _items = _parsed_response.records
+ return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.upsert(
+ path="Banking/Teller Agent",
+ provider="anthropic",
+ endpoint="chat",
+ model="claude-3-7-sonnet-latest",
+ reasoning_effort=1024,
+ template=[
+ {
+ "role": "system",
+ "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
+ }
+ ],
+ max_iterations=3,
+ tools=[
+ {
+ "type": "inline",
+ "json_schema": {
+ "name": "stop",
+ "description": "Call this tool when you have finished your task.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "output": {
+ "type": "string",
+ "description": "The final output to return to the user.",
+ }
+ },
+ "additionalProperties": False,
+ "required": ["output"],
+ },
+ "strict": True,
+ },
+ "on_agent_call": "stop",
+ }
+ ],
+ version_name="teller-agent-v1",
+ version_description="Initial version",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.patch_agent_version(
+ id="ag_1234567890",
+ version_id="agv_1234567890",
+ name="teller-agent-v2",
+ description="Updated version",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.get(
+ id="ag_1234567890",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete(
+ id="ag_1234567890",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.move(
+ id="ag_1234567890",
+ path="new directory/new name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_versions(
+ id="ag_1234567890",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_environments(
+ id="ag_1234567890",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_monitoring(
+ id="ag_1234567890",
+ activate=[
+ {"evaluator_version_id": "ev_1234567890"},
+ {
+ "evaluator_id": "ev_2345678901",
+ "environment_id": "env_1234567890",
+ },
+ ],
+ deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.deserialize(
+ agent="agent",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
new file mode 100644
index 00000000..b13491a6
--- /dev/null
+++ b/src/humanloop/agents/raw_client.py
@@ -0,0 +1,3718 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from ..requests.agent_kernel_request import AgentKernelRequestParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..core.http_response import HttpResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.agent_log_response import AgentLogResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+import httpx_sse
+import contextlib
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
+from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from ..core.http_response import AsyncHttpResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RawAgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentLogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentLogResponse,
+ construct_type(
+ type_=AgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentCallResponse]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def continue_call_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def continue_call(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentContinueCallResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentContinueCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueCallResponse,
+ construct_type(
+ type_=AgentContinueCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[None]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentLogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentLogResponse,
+ construct_type(
+ type_=AgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentCallResponse]:
+ """
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentKernelRequestParams]
+ Details of your Agent. A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def continue_call_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def continue_call(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentContinueCallResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
+ requested by the Agent. The Agent will resume processing from where it left off.
+
+ The messages in the request will be appended to the original messages in the Log. You do not
+ have to provide the previous conversation history.
+
+ The original log must be in an incomplete state to be continued.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentContinueCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueCallResponse,
+ construct_type(
+ type_=AgentContinueCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[None]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
new file mode 100644
index 00000000..78a8f9ec
--- /dev/null
+++ b/src/humanloop/agents/requests/__init__.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .agent_request_stop import AgentRequestStopParams
+from .agent_request_template import AgentRequestTemplateParams
+from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+
+__all__ = [
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStopParams",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..584112aa
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentLogRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..98a991cd
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py
new file mode 100644
index 00000000..3970451c
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
new file mode 100644
index 00000000..c251ce8e
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.chat_message import ChatMessageParams
+
+AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
new file mode 100644
index 00000000..20cde136
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+from ...requests.agent_inline_tool import AgentInlineToolParams
+
+AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..1e468fa0
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..bd068b6f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallStreamRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
new file mode 100644
index 00000000..73d98669
--- /dev/null
+++ b/src/humanloop/agents/types/__init__.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_tool_choice import AgentLogRequestToolChoice
+from .agent_request_reasoning_effort import AgentRequestReasoningEffort
+from .agent_request_stop import AgentRequestStop
+from .agent_request_template import AgentRequestTemplate
+from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
+
+__all__ = [
+ "AgentLogRequestToolChoice",
+ "AgentRequestReasoningEffort",
+ "AgentRequestStop",
+ "AgentRequestTemplate",
+ "AgentRequestToolsItem",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestToolChoice",
+]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..bfb576c2
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentLogRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..b4267202
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py
new file mode 100644
index 00000000..325a6b2e
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
new file mode 100644
index 00000000..f6474824
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.chat_message import ChatMessage
+
+AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
new file mode 100644
index 00000000..e6c54b88
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
+from ...types.agent_inline_tool import AgentInlineTool
+
+AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..6dee5a04
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..83d264f0
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallStreamRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index bf72be6a..a11298b8 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -11,6 +11,7 @@
from .datasets.client import DatasetsClient
from .evaluators.client import EvaluatorsClient
from .flows.client import FlowsClient
+from .agents.client import AgentsClient
from .directories.client import DirectoriesClient
from .files.client import FilesClient
from .evaluations.client import EvaluationsClient
@@ -21,6 +22,7 @@
from .datasets.client import AsyncDatasetsClient
from .evaluators.client import AsyncEvaluatorsClient
from .flows.client import AsyncFlowsClient
+from .agents.client import AsyncAgentsClient
from .directories.client import AsyncDirectoriesClient
from .files.client import AsyncFilesClient
from .evaluations.client import AsyncEvaluationsClient
@@ -96,6 +98,7 @@ def __init__(
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = FlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AgentsClient(client_wrapper=self._client_wrapper)
self.directories = DirectoriesClient(client_wrapper=self._client_wrapper)
self.files = FilesClient(client_wrapper=self._client_wrapper)
self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper)
@@ -171,6 +174,7 @@ def __init__(
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper)
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 2daa7769..74cd6c97 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -10,7 +10,12 @@
from humanloop.core.client_wrapper import SyncClientWrapper
from humanloop.evals import run_eval
-from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.evals.types import (
+ DatasetEvalConfig,
+ EvaluatorEvalConfig,
+ EvaluatorCheck,
+ FileEvalConfig,
+)
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
from humanloop.overload import overload_call, overload_log
@@ -42,10 +47,10 @@ def __init__(
def run(
self,
- file: File,
+ file: FileEvalConfig,
name: Optional[str],
- dataset: Dataset,
- evaluators: Optional[Sequence[Evaluator]] = None,
+ dataset: DatasetEvalConfig,
+ evaluators: Optional[Sequence[EvaluatorEvalConfig]] = None,
workers: int = 4,
) -> List[EvaluatorCheck]:
"""Evaluate your function for a given `Dataset` and set of `Evaluators`.
@@ -144,9 +149,7 @@ def __init__(
)
if opentelemetry_tracer is None:
- self._opentelemetry_tracer = self._tracer_provider.get_tracer(
- "humanloop.sdk"
- )
+ self._opentelemetry_tracer = self._tracer_provider.get_tracer("humanloop.sdk")
else:
self._opentelemetry_tracer = opentelemetry_tracer
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index f25dc2ca..71036800 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.35",
+ "User-Agent": "humanloop/0.8.36b1",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.35",
+ "X-Fern-SDK-Version": "0.8.36b1",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index b43b1b7d..f35a699c 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -15,7 +15,7 @@
from humanloop.evals.run import HumanloopRuntimeError
from humanloop.types.chat_message import ChatMessage
from humanloop.decorators.helpers import bind_args
-from humanloop.evals.types import File
+from humanloop.evals.types import FileEvalConfig
from humanloop.otel.constants import (
HUMANLOOP_FILE_TYPE_KEY,
HUMANLOOP_LOG_KEY,
@@ -121,7 +121,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
# Return the output of the decorated function
return func_output # type: ignore [return-value]
- wrapper.file = File( # type: ignore
+ wrapper.file = FileEvalConfig( # type: ignore
path=decorator_path,
type=file_type, # type: ignore [arg-type, typeddict-item]
version=FlowDict(**flow_kernel), # type: ignore
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 5e99cab0..fa4a62ae 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -5,7 +5,7 @@
from typing import Callable, TypeVar
from humanloop.context import DecoratorContext, set_decorator_context
-from humanloop.evals.types import File
+from humanloop.evals.types import FileEvalConfig
logger = logging.getLogger("humanloop.sdk")
@@ -30,7 +30,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
output = func(*args, **kwargs)
return output
- wrapper.file = File( # type: ignore [attr-defined]
+ wrapper.file = FileEvalConfig( # type: ignore [attr-defined]
path=path,
type="prompt",
version={ # type: ignore [typeddict-item]
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 1fddc3d6..43a73182 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -14,7 +14,7 @@
from humanloop.context import get_evaluation_context, get_trace_id
from humanloop.decorators.helpers import bind_args
-from humanloop.evals import File
+from humanloop.evals import FileEvalConfig
from humanloop.evals.run import HumanloopRuntimeError
from humanloop.otel.constants import (
HUMANLOOP_FILE_KEY,
@@ -112,7 +112,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
# Return the output of the decorated function
return func_output
- wrapper.file = File( # type: ignore
+ wrapper.file = FileEvalConfig( # type: ignore
path=path,
type=file_type, # type: ignore [arg-type, typeddict-item]
version=tool_kernel,
diff --git a/src/humanloop/evals/__init__.py b/src/humanloop/evals/__init__.py
index 61f97716..02430ca2 100644
--- a/src/humanloop/evals/__init__.py
+++ b/src/humanloop/evals/__init__.py
@@ -1,4 +1,4 @@
from .run import run_eval
-from .types import File
+from .types import FileEvalConfig
-__all__ = ["run_eval", "File"]
+__all__ = ["run_eval", "FileEvalConfig"]
diff --git a/src/humanloop/evals/run.py b/src/humanloop/evals/run.py
index 25c2228b..4d641836 100644
--- a/src/humanloop/evals/run.py
+++ b/src/humanloop/evals/run.py
@@ -35,6 +35,7 @@
)
from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
+from humanloop.agents.client import AgentsClient
from humanloop.core.api_error import ApiError
from humanloop.context import (
EvaluationContext,
@@ -42,7 +43,12 @@
set_evaluation_context,
)
from humanloop.error import HumanloopRuntimeError
-from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.evals.types import (
+ DatasetEvalConfig,
+ EvaluatorEvalConfig,
+ EvaluatorCheck,
+ FileEvalConfig,
+)
# We use TypedDicts for requests, which is consistent with the rest of the SDK
from humanloop.evaluators.client import EvaluatorsClient
@@ -65,10 +71,11 @@
from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
from humanloop.types import PromptKernelRequest as Prompt
from humanloop.types import ToolKernelRequest as Tool
+from humanloop.types.agent_response import AgentResponse
+from humanloop.types.agent_kernel_request import AgentKernelRequest as Agent
from humanloop.types.datapoint_response import DatapointResponse
from humanloop.types.dataset_response import DatasetResponse
from humanloop.types.evaluation_run_response import EvaluationRunResponse
-from humanloop.types.flow_log_response import FlowLogResponse
from humanloop.types.log_response import LogResponse
from humanloop.types.run_stats_response import RunStatsResponse
from pydantic import ValidationError
@@ -89,7 +96,7 @@
EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
-FileType = Literal["flow", "prompt", "tool", "evaluator"]
+FileType = Literal["flow", "prompt", "agent"]
# ANSI escape codes for logging colors
@@ -100,15 +107,37 @@
RESET = "\033[0m"
-CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient)
+CLIENT_TYPE = TypeVar(
+ "CLIENT_TYPE",
+ PromptsClient,
+ ToolsClient,
+ FlowsClient,
+ EvaluatorsClient,
+ AgentsClient,
+)
+
+
+def print_error(message: str) -> None:
+ """Print a formatted error message to stdout."""
+ sys.stdout.write(f"{RED}{message}{RESET}")
+
+
+def print_warning(message: str) -> None:
+ """Print a formatted warning message to stdout."""
+ sys.stdout.write(f"{YELLOW}{message}{RESET}\n")
+
+
+def print_info(message: str) -> None:
+ """Print a formatted info message to stdout."""
+ sys.stdout.write(f"{CYAN}{message}{RESET}\n")
def run_eval(
client: "BaseHumanloop",
- file: File,
+ file: FileEvalConfig,
name: Optional[str],
- dataset: Dataset,
- evaluators: Optional[Sequence[Evaluator]] = None,
+ dataset: DatasetEvalConfig,
+ evaluators: Optional[Sequence[EvaluatorEvalConfig]] = None,
workers: int = 4,
) -> List[EvaluatorCheck]:
"""
@@ -124,29 +153,13 @@ def run_eval(
"""
evaluators_worker_pool = ThreadPoolExecutor(max_workers=workers)
- file_ = _file_or_file_inside_hl_utility(file)
- type_ = _get_file_type(file_)
- function_ = _get_file_callable(file_, type_)
- if hasattr(function_, "file"):
- decorator_type = function_.file["type"] # type: ignore [attr-defined, union-attr]
- if decorator_type != type_:
- raise HumanloopRuntimeError(
- "The type of the decorated function does not match the type of the file. Expected `%s`, got `%s`."
- % (type_.capitalize(), decorator_type.capitalize())
- )
-
- try:
- hl_file = _upsert_file(file=file_, type=type_, client=client)
- except ValidationError as e:
- sys.stdout.write(f"{RED}Error in your `file` argument:\n\n{e}{RESET}")
- return []
- except Exception as e:
- sys.stdout.write(f"{RED}Error in your `file` argument:\n\n{e}{RESET}")
- return []
+ hl_file, function_ = _get_hl_file(client=client, file_config=file)
+ # cast is safe, we can only fetch Files allowed by FileType
+ type_ = typing.cast(FileType, hl_file.type)
try:
hl_dataset = _upsert_dataset(dataset=dataset, client=client)
except Exception as e:
- sys.stdout.write(f"{RED}Error in your `dataset` argument:\n\n{e}{RESET}")
+ print_error(f"Error in your `dataset` argument:\n\n{e}")
return []
try:
local_evaluators = _upsert_local_evaluators(
@@ -156,7 +169,7 @@ def run_eval(
type=type_,
)
except Exception as e:
- sys.stdout.write(f"{RED}Error in your `evaluators` argument:\n\n{e}{RESET}")
+ print_error(f"Error in your `evaluators` argument:\n\n{e}")
return []
_assert_dataset_evaluators_fit(hl_dataset, local_evaluators)
@@ -170,6 +183,7 @@ def run_eval(
)
def _cancel_evaluation():
+ """Mark current Evaluation run as cancelled."""
client.evaluations.update_evaluation_run(
id=evaluation.id,
run_id=run.id,
@@ -178,6 +192,7 @@ def _cancel_evaluation():
evaluators_worker_pool.shutdown(wait=False)
def handle_exit_signal(signum, frame):
+ """Handle user exit signal by cancelling the Run and shutting down threads."""
sys.stderr.write(
f"\n{RED}Received signal {signum}, cancelling Evaluation and shutting down threads...{RESET}\n"
)
@@ -188,9 +203,9 @@ def handle_exit_signal(signum, frame):
signal.signal(signal.SIGTERM, handle_exit_signal)
# Header of the CLI Report
- sys.stdout.write(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n\n")
- sys.stdout.write(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}\n")
- sys.stdout.write(f"{CYAN}Run ID: {run.id}{RESET}\n")
+ print_info(f"\nNavigate to your Evaluation:\n{evaluation.url}\n")
+ print_info(f"{type_.capitalize()} Version ID: {hl_file.version_id}")
+ print_info(f"Run ID: {run.id}")
# This will apply apply the local callable to each datapoint
# and log the results to Humanloop
@@ -198,11 +213,11 @@ def handle_exit_signal(signum, frame):
# Generate locally if a file `callable` is provided
if function_ is None:
# TODO: trigger run when updated API is available
- sys.stdout.write(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}\n")
+ print_info(f"\nRunning '{hl_file.name}' {type_.capitalize()} over the '{hl_dataset.name}' Dataset")
else:
# Running the evaluation locally
- sys.stdout.write(
- f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers...{RESET}\n\n"
+ print_info(
+ f"\nRunning '{hl_file.name}' {type_.capitalize()} over the '{hl_dataset.name}' Dataset using {workers} workers...\n"
)
_PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints))
@@ -366,9 +381,132 @@ class _LocalEvaluator:
function: Callable
-def _callable_is_hl_utility(file: File) -> bool:
+EvaluatedFile = Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse, AgentResponse]
+HumanloopSubclient = Union[PromptsClient, FlowsClient, ToolsClient, EvaluatorsClient, AgentsClient]
+
+
+def _get_subclient(client: "BaseHumanloop", file_config: FileEvalConfig) -> HumanloopSubclient:
+ """Get the appropriate subclient based on file type."""
+ type_ = file_config.get("type")
+ if type_ == "prompt":
+ return client.prompts
+ elif type_ == "flow":
+ return client.flows
+ elif type_ == "tool":
+ return client.tools
+ elif type_ == "evaluator":
+ return client.evaluators
+ elif type_ == "agent":
+ return client.agents
+ else:
+ raise HumanloopRuntimeError(f"Unsupported File type: {type_}")
+
+
+def _safe_get_default_file_version(client: "BaseHumanloop", file_config: FileEvalConfig) -> EvaluatedFile:
+ """Get default version of a File from remote workspace.
+
+ Uses either the File path or id from the config.
+
+ Raise error if the File is not of the expected type, or if the user has provided both a path and an id.
+ """
+ path = file_config.get("path")
+ type = file_config.get("type")
+ file_id = file_config.get("id")
+
+ if path is None and file_id is None:
+ raise HumanloopRuntimeError("You must provide a path or id in your `file`.")
+
+ if path is not None:
+ hl_file = client.files.retrieve_by_path(path=path)
+ if hl_file.type != type:
+ raise HumanloopRuntimeError(
+ f"File in Humanloop workspace at {path} is not of type {type}, but {hl_file.type}."
+ )
+ # cast is safe, we can only fetch Files that can be evaluated
+ return typing.cast(EvaluatedFile, hl_file)
+ elif file_id is not None:
+ subclient = _get_subclient(client=client, file_config=file_config)
+ return subclient.get(id=file_id)
+ else:
+ raise HumanloopRuntimeError("You must provide either the path or the id in your `file` config.")
+
+
+def _resolve_file(client: "BaseHumanloop", file_config: FileEvalConfig) -> tuple[EvaluatedFile, Optional[Callable]]:
+ """Resolve the File to be evaluated. Will return a FileResponse and an optional callable.
+
+ If the callable is null, the File will be evaluated on Humanloop. Otherwise, the File will be evaluated locally.
+ """
+ file_id = file_config.get("id")
+ path = file_config.get("path")
+ version_id = file_config.get("version_id")
+ environment = file_config.get("environment")
+ callable = _get_file_callable(file_config=file_config)
+ version = file_config.get("version")
+
+ if callable and path is None and file_id is None:
+ raise HumanloopRuntimeError(
+ "You are trying to create a new version of the File by passing the `version` argument. "
+ "You must pass either the `file.path` or `file.id` argument and provider proper `file.version` for upserting the File."
+ )
+ try:
+ hl_file = _safe_get_default_file_version(client=client, file_config=file_config)
+ except ApiError:
+ if not version or not path or file_id:
+ raise HumanloopRuntimeError(
+ "File does not exist on Humanloop. Please provide a `file.path` and a version to create a new version.",
+ )
+ return _upsert_file(file_config=file_config, client=client), callable or None
+
+ if (version_id or environment) and (callable or version):
+ raise HumanloopRuntimeError(
+ "You are trying to create a local Evaluation while requesting a specific File version by version ID or environment."
+ )
+
+ if version:
+ # User responsibility to provide adequate file.version for upserting the file
+ print_info(
+ "Upserting a new File version based on `file.version`. Will use provided callable for generating Logs."
+ )
+ try:
+ return (_upsert_file(file_config=file_config, client=client), callable or None)
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Error upserting the File. Please ensure `file.version` is valid: {e}") from e
+
+ if version_id is None and environment is None:
+ # Return default version of the File
+ return hl_file, callable
+
+ if file_id is None and (version_id or environment):
+ raise HumanloopRuntimeError(
+ "You must provide the `file.id` when addressing a file by version ID or environment"
+ )
+
+ # Use version_id or environment to retrieve specific version of the File
+ subclient = _get_subclient(client=client, file_config=file_config)
+ # Let backend handle case where both or none of version_id and environment are provided
+ return subclient.get(
+ # Earlier if checked that file_id is not None
+ id=file_id, # type: ignore [arg-type]
+ version_id=version_id,
+ environment=environment,
+ ), None
+
+
+def _get_hl_file(client: "BaseHumanloop", file_config: FileEvalConfig) -> tuple[EvaluatedFile, Optional[Callable]]:
+ """Check if the config object is valid, and resolve the File to be evaluated.
+
+ The callable will be null if the evaluation will happen on Humanloop runtime.
+ Otherwise, the evaluation will happen locally.
+ """
+ file_ = _file_or_file_inside_hl_decorator(file_config)
+ file_ = _check_file_type(file_)
+
+ return _resolve_file(client=client, file_config=file_)
+
+
+def _callable_is_hl_utility(file_config: FileEvalConfig) -> bool:
"""Check if a File is a decorated function."""
- return hasattr(file.get("callable", {}), "file")
+ return hasattr(file_config.get("callable", {}), "file")
def _wait_for_evaluation_to_complete(
@@ -404,7 +542,7 @@ def _get_checks(
client: "BaseHumanloop",
evaluation: EvaluationResponse,
stats: EvaluationStats,
- evaluators: list[Evaluator],
+ evaluators: list[EvaluatorEvalConfig],
run: EvaluationRunResponse,
):
checks: List[EvaluatorCheck] = []
@@ -445,29 +583,37 @@ def _get_checks(
return checks
-def _file_or_file_inside_hl_utility(file: File) -> File:
- if _callable_is_hl_utility(file):
- inner_file: File = file["callable"].file # type: ignore [misc, attr-defined]
- if "path" in file and inner_file["path"] != file["path"]:
+def _file_or_file_inside_hl_decorator(file_config: FileEvalConfig) -> FileEvalConfig:
+ if _callable_is_hl_utility(file_config):
+ inner_file: FileEvalConfig = file_config["callable"].file # type: ignore [misc, attr-defined]
+ function_ = file_config["callable"]
+ type_ = file_config["type"]
+ decorator_type = function_.file["type"] # type: ignore [attr-defined, union-attr]
+ if decorator_type != type_:
+ raise HumanloopRuntimeError(
+ "The type of the decorated function does not match the type of the file. Expected `%s`, got `%s`."
+ % (type_.capitalize(), decorator_type.capitalize())
+ )
+ if "path" in file_config and inner_file["path"] != file_config["path"]:
raise HumanloopRuntimeError(
"`path` attribute specified in the `file` does not match the path of the decorated function. "
- f"Expected `{inner_file['path']}`, got `{file['path']}`."
+ f"Expected `{inner_file['path']}`, got `{file_config['path']}`."
)
- if "id" in file:
+ if "id" in file_config:
raise HumanloopRuntimeError(
"Do not specify an `id` attribute in `file` argument when using a decorated function."
)
- if "version" in file:
+ if "version" in file_config:
if inner_file["type"] != "prompt":
raise HumanloopRuntimeError(
f"Do not specify a `version` attribute in `file` argument when using a {inner_file['type'].capitalize()} decorated function."
)
- if "type" in file and inner_file["type"] != file["type"]:
+ if "type" in file_config and inner_file["type"] != file_config["type"]:
raise HumanloopRuntimeError(
"Attribute `type` of `file` argument does not match the file type of the decorated function. "
- f"Expected `{inner_file['type']}`, got `{file['type']}`."
+ f"Expected `{inner_file['type']}`, got `{file_config['type']}`."
)
- if "id" in file:
+ if "id" in file_config:
raise HumanloopRuntimeError(
"Do not specify an `id` attribute in `file` argument when using a decorated function."
)
@@ -482,9 +628,9 @@ def _file_or_file_inside_hl_utility(file: File) -> File:
f"{RESET}"
)
# TODO: document this
- file_["version"] = file["version"]
+ file_["version"] = file_config["version"]
else:
- file_ = copy.deepcopy(file)
+ file_ = copy.deepcopy(file_config)
# Raise error if neither path nor id is provided
if not file_.get("path") and not file_.get("id"):
@@ -493,69 +639,69 @@ def _file_or_file_inside_hl_utility(file: File) -> File:
return file_
-def _get_file_type(file: File) -> FileType:
- # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow`
+def _check_file_type(file_config: FileEvalConfig) -> FileEvalConfig:
+ """Check that the file type is provided, or set it to `flow` if not provided."""
try:
- type_ = typing.cast(FileType, file.pop("type")) # type: ignore [arg-type, misc]
- sys.stdout.write(
- f"{CYAN}Evaluating your {type_} function corresponding to `{file.get('path') or file.get('id')}` on Humanloop{RESET}\n\n"
+ type_ = typing.cast(FileType, file_config.pop("type")) # type: ignore [arg-type, misc]
+ print_info(
+ f"Evaluating your {type_} function corresponding to `{file_config.get('path') or file_config.get('id')}` on Humanloop\n\n"
)
- return type_ or "flow"
+ file_config["type"] = type_ or "flow"
except KeyError as _:
type_ = "flow"
- sys.stdout.write(f"{YELLOW}No `file` type specified, defaulting to flow.{RESET}\n")
- return type_
+ print_warning("No `file` type specified, defaulting to flow.")
+ file_config["type"] = type_
+ return file_config
-def _get_file_callable(file: File, type_: FileType) -> Optional[Callable]:
- # Get the `callable` from the `file` to Evaluate
- function_ = typing.cast(Optional[Callable], file.pop("callable", None))
+def _get_file_callable(file_config: FileEvalConfig) -> Optional[Callable]:
+ """Get the callable of the File to be evaluated, or throw if None was provided for Flows."""
+ type_ = file_config.get("type")
+ function_ = typing.cast(Optional[Callable], file_config.pop("callable", None))
if function_ is None:
if type_ == "flow":
- raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.")
+ raise HumanloopRuntimeError("You must provide a `callable` for your Flow `file` to run a local eval.")
else:
- sys.stdout.write(
- f"{CYAN}No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.\n\n{RESET}"
+ print_info(
+ f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.\n\n"
)
+ elif type_ == "agent":
+ raise ValueError("Agent evaluation is only possible on the Humanloop runtime, do not provide a `callable`.")
return function_
-def _upsert_file(
- file: File, type: FileType, client: "BaseHumanloop"
-) -> Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]:
+def _upsert_file(client: "BaseHumanloop", file_config: FileEvalConfig) -> EvaluatedFile:
# Get or create the file on Humanloop
- version = file.pop("version", {})
- file_dict = {**file, **version}
+ version = file_config.pop("version", {})
+ file_dict = {**file_config, **version}
+ del file_dict["type"]
+ type_ = file_config.get("type")
+ subclient = _get_subclient(client=client, file_config=file_config)
- if type == "flow":
+ if type_ == "flow":
# Be more lenient with Flow versions as they are arbitrary json
try:
Flow.model_validate(version)
except ValidationError:
flow_version = {"attributes": version}
- file_dict = {**file, **flow_version}
- hl_file = client.flows.upsert(**file_dict) # type: ignore [arg-type, assignment]
-
- elif type == "prompt":
+ file_dict = {**file_config, **flow_version}
+ elif type_ == "prompt":
# Will throw error if version is invalid
Prompt.model_validate(version)
- hl_file = client.prompts.upsert(**file_dict) # type: ignore [arg-type, assignment]
-
- elif type == "tool":
+ elif type_ == "tool":
# Will throw error if version is invalid
Tool.model_validate(version)
- hl_file = client.tools.upsert(**file_dict) # type: ignore [arg-type, assignment]
-
- elif type == "evaluator":
- hl_file = client.evaluators.upsert(**file_dict) # type: ignore [arg-type, assignment]
-
+ elif type_ == "agent":
+ # Will throw error if version is invalid
+ Agent.model_validate(version)
else:
- raise NotImplementedError(f"Unsupported File type: {type}")
+ raise NotImplementedError(f"Unsupported File type: {type_}")
- return hl_file
+ # mypy complains about the polymorphic subclient
+ return subclient.upsert(**file_dict) # type: ignore [arg-type]
-def _upsert_dataset(dataset: Dataset, client: "BaseHumanloop"):
+def _upsert_dataset(dataset: DatasetEvalConfig, client: "BaseHumanloop"):
# Upsert the Dataset
if "action" not in dataset:
dataset["action"] = "set"
@@ -574,7 +720,7 @@ def _upsert_dataset(dataset: Dataset, client: "BaseHumanloop"):
def _upsert_local_evaluators(
- evaluators: list[Evaluator],
+ evaluators: list[EvaluatorEvalConfig],
function: Optional[Callable],
type: FileType,
client: "BaseHumanloop",
@@ -639,8 +785,8 @@ def _assert_dataset_evaluators_fit(
def _get_new_run(
client: "BaseHumanloop",
evaluation_name: Optional[str],
- evaluators: list[Evaluator],
- hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse],
+ evaluators: list[EvaluatorEvalConfig],
+ hl_file: EvaluatedFile,
hl_dataset: DatasetResponse,
function: Optional[Callable],
):
@@ -691,7 +837,7 @@ def _call_function(
try:
output = json.dumps(output)
except Exception:
- # throw error if it fails to serialize
+ # throw error if output fails to serialize
raise ValueError(f"Your {type}'s `callable` must return a string or a JSON serializable object.")
return output
diff --git a/src/humanloop/evals/types.py b/src/humanloop/evals/types.py
index 86c3dc64..1e40c49a 100644
--- a/src/humanloop/evals/types.py
+++ b/src/humanloop/evals/types.py
@@ -23,9 +23,7 @@
UpdateDatesetAction as UpdateDatasetAction,
) # TODO: fix original type typo
-EvaluatorDict = Union[
- CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator
-]
+EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
FileType = Literal["flow", "prompt", "tool", "evaluator"]
@@ -37,12 +35,16 @@ class Identifiers(TypedDict):
"""The ID of the File on Humanloop."""
path: NotRequired[str]
"""The path of the File on Humanloop."""
+ version_id: NotRequired[str]
+ """The ID of the version of the File on Humanloop."""
+ environment: NotRequired[str]
+ """The environment of the File on Humanloop."""
-class File(Identifiers):
+class FileEvalConfig(Identifiers):
"""A File on Humanloop (Flow, Prompt, Tool, Evaluator)."""
- type: Literal["flow", "prompt"]
+ type: Literal["flow", "prompt", "agent"]
"""The type of File this callable relates to on Humanloop."""
version: NotRequired[Version]
"""The contents uniquely define the version of the File on Humanloop."""
@@ -54,7 +56,7 @@ class File(Identifiers):
"""
-class Dataset(Identifiers):
+class DatasetEvalConfig(Identifiers):
datapoints: NotRequired[Sequence[DatapointDict]]
"""The datapoints to map your function over to produce the outputs required by the evaluation."""
action: NotRequired[UpdateDatasetAction]
@@ -62,7 +64,7 @@ class Dataset(Identifiers):
`set` replaces the existing Datapoints and `add` appends to the existing Datapoints."""
-class Evaluator(Identifiers):
+class EvaluatorEvalConfig(Identifiers):
"""The Evaluator to provide judgments for this Evaluation."""
args_type: NotRequired[EvaluatorArgumentsType]
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index c07358d0..693b46cb 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -7,8 +7,8 @@
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -45,7 +45,7 @@ def list_files(
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -80,7 +80,7 @@ def list_files(
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -175,7 +175,7 @@ async def list_files(
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -210,7 +210,7 @@ async def list_files(
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 19f52cf2..01b48e03 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -7,8 +7,8 @@
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -39,7 +39,9 @@ def list_files(
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> HttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -74,7 +76,7 @@ def list_files(
Returns
-------
- HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -95,9 +97,9 @@ def list_files(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -200,7 +202,9 @@ async def list_files(
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> AsyncHttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -235,7 +239,7 @@ async def list_files(
Returns
-------
- AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -256,9 +260,9 @@ async def list_files(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index c1618edb..8c070ab3 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,13 @@
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
+from ...requests.agent_response import AgentResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 48415fc9..46ea271a 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,8 @@
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
+from ...types.agent_response import AgentResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index a11776fc..bcb9491c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -214,10 +214,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1128,10 +1128,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 17007c1b..8733ed37 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -42,7 +42,6 @@ def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
- version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -71,9 +70,6 @@ def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
- version_status : typing.Optional[VersionStatus]
- If provided, only Logs belonging to Versions with the specified status will be returned.
-
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -99,7 +95,7 @@ def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -135,7 +131,6 @@ def list(
"page": page,
"size": size,
"version_id": version_id,
- "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -163,7 +158,6 @@ def list(
page=page + 1,
size=size,
version_id=version_id,
- version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
@@ -281,7 +275,6 @@ async def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
- version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -310,9 +303,6 @@ async def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
- version_status : typing.Optional[VersionStatus]
- If provided, only Logs belonging to Versions with the specified status will be returned.
-
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -338,7 +328,7 @@ async def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -382,7 +372,6 @@ async def main() -> None:
"page": page,
"size": size,
"version_id": version_id,
- "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -410,7 +399,6 @@ async def main() -> None:
page=page + 1,
size=size,
version_id=version_id,
- version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
diff --git a/src/humanloop/otel/exporter/__init__.py b/src/humanloop/otel/exporter/__init__.py
index 561d4b1f..6596d152 100644
--- a/src/humanloop/otel/exporter/__init__.py
+++ b/src/humanloop/otel/exporter/__init__.py
@@ -74,7 +74,7 @@ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
for span in spans:
# only process spans that are relevant to Humanloop
- if not is_humanloop_span(span) or not is_llm_provider_call(span):
+ if not is_humanloop_span(span) and not is_llm_provider_call(span):
continue
file_type = span.attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore [union-attr]
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index c1147ff2..ae141d57 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -3,6 +3,7 @@
from .types import (
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
+ PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
PromptsCallRequestToolChoice,
@@ -11,6 +12,7 @@
from .requests import (
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
PromptsCallRequestToolChoiceParams,
@@ -22,6 +24,8 @@
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e2fff4c3..d5de327b 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -33,7 +33,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.populate_template_response import PopulateTemplateResponse
from ..types.list_prompts import ListPrompts
@@ -44,6 +44,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawPromptsClient
from ..core.pagination import AsyncPager
@@ -256,7 +257,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -962,7 +963,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -1037,8 +1038,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1599,6 +1600,92 @@ def update_monitoring(
)
return response.data
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.deserialize(
+ prompt="prompt",
+ )
+ """
+ response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
+
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1810,7 +1897,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2552,7 +2639,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2627,8 +2714,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -3284,3 +3371,105 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.deserialize(
+ prompt="prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index b5334c82..2b907d91 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -32,7 +32,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.prompt_response import PromptResponse
from ..types.populate_template_response import PopulateTemplateResponse
@@ -44,6 +44,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -915,7 +916,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -990,8 +991,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1051,7 +1052,9 @@ def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -1744,6 +1747,126 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[None]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncRawPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -2609,7 +2732,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2684,8 +2807,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -2745,7 +2868,9 @@ async def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -3439,3 +3564,123 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[None]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index c5119552..3971e252 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -2,6 +2,7 @@
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
@@ -10,6 +11,7 @@
__all__ = [
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
"PromptsCallRequestToolChoiceParams",
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..080a107e
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 644cf6b5..1b849e7d 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -2,6 +2,7 @@
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
@@ -10,6 +11,7 @@
__all__ = [
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
+ "PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
"PromptsCallRequestToolChoice",
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..33f35288
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index bd9458ba..fb1580df 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,11 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponseParams
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_call_stream_response import AgentCallStreamResponseParams
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
+from .agent_continue_call_response import AgentContinueCallResponseParams
+from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
+from .agent_continue_call_stream_response import AgentContinueCallStreamResponseParams
+from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
+from .agent_inline_tool import AgentInlineToolParams
+from .agent_kernel_request import AgentKernelRequestParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_linked_file_response import AgentLinkedFileResponseParams
+from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+from .agent_log_response import AgentLogResponseParams
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+from .agent_response import AgentResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .agent_response_tools_item import AgentResponseToolsItemParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
from .chat_message import ChatMessageParams
from .chat_message_content import ChatMessageContentParams
from .chat_message_content_item import ChatMessageContentItemParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
from .code_evaluator_request import CodeEvaluatorRequestParams
+from .create_agent_log_response import CreateAgentLogResponseParams
from .create_datapoint_request import CreateDatapointRequestParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
from .create_evaluator_log_response import CreateEvaluatorLogResponseParams
@@ -51,6 +80,7 @@
from .external_evaluator_request import ExternalEvaluatorRequestParams
from .file_environment_response import FileEnvironmentResponseParams
from .file_environment_response_file import FileEnvironmentResponseFileParams
+from .file_environment_variable_request import FileEnvironmentVariableRequestParams
from .file_id import FileIdParams
from .file_path import FilePathParams
from .file_request import FileRequestParams
@@ -64,7 +94,9 @@
from .image_chat_content import ImageChatContentParams
from .image_url import ImageUrlParams
from .input_response import InputResponseParams
+from .linked_file_request import LinkedFileRequestParams
from .linked_tool_response import LinkedToolResponseParams
+from .list_agents import ListAgentsParams
from .list_datasets import ListDatasetsParams
from .list_evaluators import ListEvaluatorsParams
from .list_flows import ListFlowsParams
@@ -72,28 +104,31 @@
from .list_tools import ListToolsParams
from .llm_evaluator_request import LlmEvaluatorRequestParams
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .overall_stats import OverallStatsParams
+from .paginated_data_agent_response import PaginatedDataAgentResponseParams
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams
from .paginated_data_flow_response import PaginatedDataFlowResponseParams
from .paginated_data_log_response import PaginatedDataLogResponseParams
from .paginated_data_prompt_response import PaginatedDataPromptResponseParams
from .paginated_data_tool_response import PaginatedDataToolResponseParams
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
from .paginated_datapoint_response import PaginatedDatapointResponseParams
from .paginated_dataset_response import PaginatedDatasetResponseParams
from .paginated_evaluation_response import PaginatedEvaluationResponseParams
from .populate_template_response import PopulateTemplateResponseParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .populate_template_response_stop import PopulateTemplateResponseStopParams
from .populate_template_response_template import PopulateTemplateResponseTemplateParams
from .prompt_call_log_response import PromptCallLogResponseParams
@@ -101,11 +136,13 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_call_stream_response import PromptCallStreamResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .prompt_log_response import PromptLogResponseParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
from .prompt_response_template import PromptResponseTemplateParams
from .provider_api_keys import ProviderApiKeysParams
@@ -117,6 +154,7 @@
from .text_chat_content import TextChatContentParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
from .tool_call import ToolCallParams
+from .tool_call_response import ToolCallResponseParams
from .tool_choice import ToolChoiceParams
from .tool_function import ToolFunctionParams
from .tool_kernel_request import ToolKernelRequestParams
@@ -135,12 +173,41 @@
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
__all__ = [
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
+ "AgentContinueCallResponseParams",
+ "AgentContinueCallResponseToolChoiceParams",
+ "AgentContinueCallStreamResponseParams",
+ "AgentContinueCallStreamResponsePayloadParams",
+ "AgentInlineToolParams",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponseParams",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStopParams",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItemParams",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContentParams",
"BooleanEvaluatorStatsResponseParams",
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItemParams",
"CodeEvaluatorRequestParams",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValueParams",
"CreateEvaluatorLogResponseParams",
@@ -180,6 +247,7 @@
"ExternalEvaluatorRequestParams",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequestParams",
"FileIdParams",
"FilePathParams",
"FileRequestParams",
@@ -193,7 +261,9 @@
"ImageChatContentParams",
"ImageUrlParams",
"InputResponseParams",
+ "LinkedFileRequestParams",
"LinkedToolResponseParams",
+ "ListAgentsParams",
"ListDatasetsParams",
"ListEvaluatorsParams",
"ListFlowsParams",
@@ -201,24 +271,27 @@
"ListToolsParams",
"LlmEvaluatorRequestParams",
"LogResponseParams",
+ "LogStreamResponseParams",
"MonitoringEvaluatorEnvironmentRequestParams",
"MonitoringEvaluatorResponseParams",
"MonitoringEvaluatorVersionRequestParams",
"NumericEvaluatorStatsResponseParams",
"OverallStatsParams",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponseParams",
"PaginatedDataFlowResponseParams",
"PaginatedDataLogResponseParams",
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponseParams",
"PaginatedEvaluationResponseParams",
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplateParams",
"PromptCallLogResponseParams",
@@ -226,11 +299,13 @@
"PromptCallResponseToolChoiceParams",
"PromptCallStreamResponseParams",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplateParams",
"PromptLogResponseParams",
"PromptLogResponseToolChoiceParams",
"PromptResponseParams",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStopParams",
"PromptResponseTemplateParams",
"ProviderApiKeysParams",
@@ -242,6 +317,7 @@
"TextChatContentParams",
"TextEvaluatorStatsResponseParams",
"ToolCallParams",
+ "ToolCallResponseParams",
"ToolChoiceParams",
"ToolFunctionParams",
"ToolKernelRequestParams",
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
new file mode 100644
index 00000000..ffc925ec
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..6cc9f9c4
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
new file mode 100644
index 00000000..9555925d
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..0e08a6f3
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py
new file mode 100644
index 00000000..90938dea
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
new file mode 100644
index 00000000..4722dd2e
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py
new file mode 100644
index 00000000..3eb2b498
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
new file mode 100644
index 00000000..87e1562b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
new file mode 100644
index 00000000..8300667b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..24b044cc
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
new file mode 100644
index 00000000..1038e000
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..ddd74c10
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
new file mode 100644
index 00000000..31f9401a
--- /dev/null
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .tool_function import ToolFunctionParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentInlineToolParams(typing_extensions.TypedDict):
+ type: typing.Literal["inline"]
+ json_schema: ToolFunctionParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
new file mode 100644
index 00000000..0ca76571
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+
+
+class AgentKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentKernelRequestStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]]
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..ea32bc11
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py
new file mode 100644
index 00000000..eae95d35
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
new file mode 100644
index 00000000..7261667d
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..27b63984
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_inline_tool import AgentInlineToolParams
+
+AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
new file mode 100644
index 00000000..18fc2274
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
new file mode 100644
index 00000000..8a690a77
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+
+
+class AgentLinkedFileResponseParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
+ file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"]
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
new file mode 100644
index 00000000..bb328de2
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponseParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
+ from .evaluator_response import EvaluatorResponseParams
+ from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
+AgentLinkedFileResponseFileParams = typing.Union[
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
+]
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
new file mode 100644
index 00000000..0cb24b8a
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response.py
@@ -0,0 +1,201 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+import typing
+
+if typing.TYPE_CHECKING:
+ from .evaluator_log_response import EvaluatorLogResponseParams
+ from .log_response import LogResponseParams
+
+
+class AgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..e239a69c
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentLogResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
new file mode 100644
index 00000000..710d55cf
--- /dev/null
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .chat_message import ChatMessageParams
+
+
+class AgentLogStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ agent_id: str
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str
+ """
+ ID of the specific version of the Agent.
+ """
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
new file mode 100644
index 00000000..047904a7
--- /dev/null
+++ b/src/humanloop/requests/agent_response.py
@@ -0,0 +1,237 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .environment_response import EnvironmentResponseParams
+import datetime as dt
+from ..types.user_response import UserResponse
+from ..types.version_status import VersionStatus
+from .input_response import InputResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_response_tools_item import AgentResponseToolsItemParams
+ from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
+
+
+class AgentResponseParams(typing_extensions.TypedDict):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing_extensions.NotRequired[str]
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentResponseTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentResponseStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Sequence["AgentResponseToolsItemParams"]
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing_extensions.NotRequired[str]
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing_extensions.NotRequired[str]
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Description of the Agent.
+ """
+
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing_extensions.NotRequired[str]
+ """
+ Long description of the file.
+ """
+
+ name: str
+ """
+ Name of the Agent.
+ """
+
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing_extensions.NotRequired[typing.Literal["agent"]]
+ environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]]
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.Sequence[InputResponseParams]
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]]
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]]
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..de1b969f
--- /dev/null
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py
new file mode 100644
index 00000000..a395ee73
--- /dev/null
+++ b/src/humanloop/requests/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
new file mode 100644
index 00000000..94be65f1
--- /dev/null
+++ b/src/humanloop/requests/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
new file mode 100644
index 00000000..5181579b
--- /dev/null
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineToolParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponseParams
+AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams]
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..3b328f7f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["redacted_thinking"]
+ data: str
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
new file mode 100644
index 00000000..34f6f99f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["thinking"]
+ thinking: str
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index cab8466d..6011653a 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -6,6 +6,7 @@
from ..types.chat_role import ChatRole
import typing
from .tool_call import ToolCallParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
class ChatMessageParams(typing_extensions.TypedDict):
@@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict):
"""
A list of tool calls requested by the assistant.
"""
+
+ thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]]
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
new file mode 100644
index 00000000..0691f4d8
--- /dev/null
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContentParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+
+ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
new file mode 100644
index 00000000..b1715517
--- /dev/null
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.log_status import LogStatus
+
+
+class CreateAgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1d59ed4b..1cffd2b2 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict):
Description of the Dataset.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index f101bf15..db9370b9 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,13 @@
from .evaluator_response import EvaluatorResponseParams
from .dataset_response import DatasetResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ EvaluatorResponseParams,
+ DatasetResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 908eeb2d..1ff836fb 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict):
Description of the Evaluator.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 4ac6b0c3..04c0b51d 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -6,7 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
FileEnvironmentResponseFileParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py
new file mode 100644
index 00000000..bb70bda4
--- /dev/null
+++ b/src/humanloop/requests/file_environment_variable_request.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict):
+ name: str
+ """
+ Name of the environment variable.
+ """
+
+ value: str
+ """
+ Value of the environment variable.
+ """
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 18a26d10..eebc9fd7 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict):
Description of the Flow.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
new file mode 100644
index 00000000..2bbba19c
--- /dev/null
+++ b/src/humanloop/requests/linked_file_request.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class LinkedFileRequestParams(typing_extensions.TypedDict):
+ file_id: str
+ environment_id: typing_extensions.NotRequired[str]
+ version_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
new file mode 100644
index 00000000..4a72f1db
--- /dev/null
+++ b/src/humanloop/requests/list_agents.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class ListAgentsParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ """
+ The list of Agents.
+ """
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index 15a4cff6..cb3ce212 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -9,6 +9,11 @@
from .tool_log_response import ToolLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
LogResponseParams = typing.Union[
- "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams"
+ "PromptLogResponseParams",
+ "ToolLogResponseParams",
+ "EvaluatorLogResponseParams",
+ "FlowLogResponseParams",
+ "AgentLogResponseParams",
]
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
new file mode 100644
index 00000000..e142e7fb
--- /dev/null
+++ b/src/humanloop/requests/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponseParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+
+LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
new file mode 100644
index 00000000..c8d67533
--- /dev/null
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class PaginatedDataAgentResponseParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ page: int
+ size: int
+ total: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 65%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index cf8bc4bf..0e7adb64 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -2,16 +2,16 @@
import typing_extensions
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams(
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams(
typing_extensions.TypedDict
):
records: typing.Sequence[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams
]
page: int
size: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 58%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 1ba74108..b43a5521 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,9 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = (
- typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
- ]
-)
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
+]
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 190341b0..491cacd3 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -9,7 +9,7 @@
from .populate_template_response_stop import PopulateTemplateResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..6b1dd46a
--- /dev/null
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 61355166..1e4f56de 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -9,11 +9,17 @@
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
class PromptKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..0c3d194b
--- /dev/null
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index 912866c5..b6ff03df 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -10,7 +10,7 @@
from .prompt_response_stop import PromptResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..4d019051
--- /dev/null
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 879ea25c..569d0d76 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -5,7 +5,8 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
RunVersionResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
new file mode 100644
index 00000000..1c92b28f
--- /dev/null
+++ b/src/humanloop/requests/tool_call_response.py
@@ -0,0 +1,146 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .tool_response import ToolResponseParams
+import typing
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class ToolCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponseParams
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index bac9dbbb..1aa0daea 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -7,6 +7,7 @@
import typing
from ..types.log_status import LogStatus
from .tool_response import ToolResponseParams
+from .chat_message import ChatMessageParams
import typing
if typing.TYPE_CHECKING:
@@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
"""
Tool used to generate the Log.
"""
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the Tool.
+ """
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 8a16af00..9659cb49 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionDeploymentResponseFileParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 50ecf7bc..9c317679 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionIdResponseVersionParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index 16d75bd7..ea6b14a2 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -3,10 +3,11 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawToolsClient
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
+from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..types.project_sort_by import ProjectSortBy
@@ -29,6 +30,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawToolsClient
from ..core.pagination import AsyncPager
@@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient:
"""
return self._raw_client
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
def log(
self,
*,
@@ -59,6 +189,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -78,7 +209,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -106,6 +236,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -163,9 +296,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -206,6 +336,7 @@ def log(
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -225,7 +356,6 @@ def log(
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -966,6 +1096,112 @@ def update_monitoring(
)
return response.data
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.get_environment_variables(
+ id="id",
+ )
+ """
+ response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+ """
+ response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+ """
+ response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
+
class AsyncToolsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient:
"""
return self._raw_client
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
async def log(
self,
*,
@@ -989,6 +1360,7 @@ async def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1008,7 +1380,6 @@ async def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -1036,6 +1407,9 @@ async def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1093,9 +1467,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1144,6 +1515,7 @@ async def main() -> None:
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -1163,7 +1535,6 @@ async def main() -> None:
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -2010,3 +2381,133 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.get_environment_variables(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 4a1f29e9..b412b771 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -2,18 +2,19 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..requests.tool_function import ToolFunctionParams
@@ -27,6 +28,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -38,6 +41,159 @@ class RawToolsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def log(
self,
*,
@@ -45,6 +201,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -64,7 +221,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateToolLogResponse]:
"""
@@ -92,6 +248,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -149,9 +308,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -170,6 +326,9 @@ def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -189,9 +348,6 @@ def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -1038,75 +1194,387 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
-class AsyncRawToolsClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- async def log(
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_environment_variable(
self,
+ id: str,
*,
- version_id: typing.Optional[str] = None,
- environment: typing.Optional[str] = None,
- path: typing.Optional[str] = OMIT,
- id: typing.Optional[str] = OMIT,
- start_time: typing.Optional[dt.datetime] = OMIT,
- end_time: typing.Optional[dt.datetime] = OMIT,
- output: typing.Optional[str] = OMIT,
- created_at: typing.Optional[dt.datetime] = OMIT,
- error: typing.Optional[str] = OMIT,
- provider_latency: typing.Optional[float] = OMIT,
- stdout: typing.Optional[str] = OMIT,
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- source: typing.Optional[str] = OMIT,
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
- source_datapoint_id: typing.Optional[str] = OMIT,
- trace_parent_id: typing.Optional[str] = OMIT,
- user: typing.Optional[str] = OMIT,
- tool_log_request_environment: typing.Optional[str] = OMIT,
- save: typing.Optional[bool] = OMIT,
- log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
"""
- Log to a Tool.
-
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Tool. Otherwise the default deployed version will be chosen.
-
- Instead of targeting an existing version explicitly, you can instead pass in
- Tool details in the request body. In this case, we will check if the details correspond
- to an existing version of the Tool, if not we will create a new version. This is helpful
- in the case where you are storing or deriving your Tool details in code.
+ Add an environment variable to a Tool.
Parameters
----------
- version_id : typing.Optional[str]
- A specific Version ID of the Tool to log to.
-
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
-
- path : typing.Optional[str]
- Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
- id : typing.Optional[str]
- ID for an existing Tool.
-
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ id : str
+ Unique identifier for Tool.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- created_at : typing.Optional[dt.datetime]
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawToolsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ """
+ Log to a Tool.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool, if not we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
User defined timestamp for when the log was created.
error : typing.Optional[str]
@@ -1154,9 +1622,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1175,6 +1640,9 @@ async def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -1194,9 +1662,6 @@ async def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -2044,3 +2509,159 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 156f4e9a..7c1d30f5 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,15 +1,44 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponse
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+from .agent_call_stream_response import AgentCallStreamResponse
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
+from .agent_continue_call_response import AgentContinueCallResponse
+from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
+from .agent_continue_call_stream_response import AgentContinueCallStreamResponse
+from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
+from .agent_inline_tool import AgentInlineTool
+from .agent_kernel_request import AgentKernelRequest
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile
+from .agent_log_response import AgentLogResponse
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+from .agent_log_stream_response import AgentLogStreamResponse
+from .agent_response import AgentResponse
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
+from .agent_response_tools_item import AgentResponseToolsItem
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
from .base_models_user_response import BaseModelsUserResponse
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
from .chat_message import ChatMessage
from .chat_message_content import ChatMessageContent
from .chat_message_content_item import ChatMessageContentItem
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .chat_tool_type import ChatToolType
from .code_evaluator_request import CodeEvaluatorRequest
from .config_tool_response import ConfigToolResponse
+from .create_agent_log_response import CreateAgentLogResponse
from .create_datapoint_request import CreateDatapointRequest
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
from .create_evaluator_log_response import CreateEvaluatorLogResponse
@@ -56,10 +85,12 @@
from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .evaluator_version_id import EvaluatorVersionId
from .evaluators_request import EvaluatorsRequest
+from .event_type import EventType
from .external_evaluator_request import ExternalEvaluatorRequest
from .feedback_type import FeedbackType
from .file_environment_response import FileEnvironmentResponse
from .file_environment_response_file import FileEnvironmentResponseFile
+from .file_environment_variable_request import FileEnvironmentVariableRequest
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
@@ -77,7 +108,9 @@
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .input_response import InputResponse
+from .linked_file_request import LinkedFileRequest
from .linked_tool_response import LinkedToolResponse
+from .list_agents import ListAgents
from .list_datasets import ListDatasets
from .list_evaluators import ListEvaluators
from .list_flows import ListFlows
@@ -86,6 +119,7 @@
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
from .log_status import LogStatus
+from .log_stream_response import LogStreamResponse
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -94,18 +128,21 @@
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .observability_status import ObservabilityStatus
+from .on_agent_call_enum import OnAgentCallEnum
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
from .overall_stats import OverallStats
+from .paginated_data_agent_response import PaginatedDataAgentResponse
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from .paginated_data_flow_response import PaginatedDataFlowResponse
from .paginated_data_log_response import PaginatedDataLogResponse
from .paginated_data_prompt_response import PaginatedDataPromptResponse
from .paginated_data_tool_response import PaginatedDataToolResponse
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from .paginated_datapoint_response import PaginatedDatapointResponse
from .paginated_dataset_response import PaginatedDatasetResponse
@@ -115,6 +152,7 @@
from .platform_access_enum import PlatformAccessEnum
from .populate_template_response import PopulateTemplateResponse
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
from .project_sort_by import ProjectSortBy
@@ -123,15 +161,16 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .prompt_call_stream_response import PromptCallStreamResponse
from .prompt_kernel_request import PromptKernelRequest
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .prompt_log_response import PromptLogResponse
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
from .prompt_response import PromptResponse
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
from .prompt_response_template import PromptResponseTemplate
from .provider_api_keys import ProviderApiKeys
-from .reasoning_effort import ReasoningEffort
from .response_format import ResponseFormat
from .response_format_type import ResponseFormatType
from .run_stats_response import RunStatsResponse
@@ -144,6 +183,7 @@
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
from .time_unit import TimeUnit
from .tool_call import ToolCall
+from .tool_call_response import ToolCallResponse
from .tool_choice import ToolChoice
from .tool_function import ToolFunction
from .tool_kernel_request import ToolKernelRequest
@@ -167,16 +207,45 @@
from .version_status import VersionStatus
__all__ = [
+ "AgentCallResponse",
+ "AgentCallResponseToolChoice",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponsePayload",
"AgentConfigResponse",
+ "AgentContinueCallResponse",
+ "AgentContinueCallResponseToolChoice",
+ "AgentContinueCallStreamResponse",
+ "AgentContinueCallStreamResponsePayload",
+ "AgentInlineTool",
+ "AgentKernelRequest",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestToolsItem",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLogResponse",
+ "AgentLogResponseToolChoice",
+ "AgentLogStreamResponse",
+ "AgentResponse",
+ "AgentResponseReasoningEffort",
+ "AgentResponseStop",
+ "AgentResponseTemplate",
+ "AgentResponseToolsItem",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicThinkingContent",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
"ChatMessage",
"ChatMessageContent",
"ChatMessageContentItem",
+ "ChatMessageThinkingItem",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
"CreateDatapointRequest",
"CreateDatapointRequestTargetValue",
"CreateEvaluatorLogResponse",
@@ -221,10 +290,12 @@
"EvaluatorReturnTypeEnum",
"EvaluatorVersionId",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"FeedbackType",
"FileEnvironmentResponse",
"FileEnvironmentResponseFile",
+ "FileEnvironmentVariableRequest",
"FileId",
"FilePath",
"FileRequest",
@@ -242,7 +313,9 @@
"ImageUrl",
"ImageUrlDetail",
"InputResponse",
+ "LinkedFileRequest",
"LinkedToolResponse",
+ "ListAgents",
"ListDatasets",
"ListEvaluators",
"ListFlows",
@@ -251,6 +324,7 @@
"LlmEvaluatorRequest",
"LogResponse",
"LogStatus",
+ "LogStreamResponse",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -259,15 +333,18 @@
"MonitoringEvaluatorVersionRequest",
"NumericEvaluatorStatsResponse",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
+ "PaginatedDataAgentResponse",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluatorResponse",
"PaginatedDataFlowResponse",
"PaginatedDataLogResponse",
"PaginatedDataPromptResponse",
"PaginatedDataToolResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
"PaginatedDatapointResponse",
"PaginatedDatasetResponse",
"PaginatedEvaluationResponse",
@@ -276,6 +353,7 @@
"PlatformAccessEnum",
"PopulateTemplateResponse",
"PopulateTemplateResponsePopulatedTemplate",
+ "PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
"ProjectSortBy",
@@ -284,15 +362,16 @@
"PromptCallResponseToolChoice",
"PromptCallStreamResponse",
"PromptKernelRequest",
+ "PromptKernelRequestReasoningEffort",
"PromptKernelRequestStop",
"PromptKernelRequestTemplate",
"PromptLogResponse",
"PromptLogResponseToolChoice",
"PromptResponse",
+ "PromptResponseReasoningEffort",
"PromptResponseStop",
"PromptResponseTemplate",
"ProviderApiKeys",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatType",
"RunStatsResponse",
@@ -305,6 +384,7 @@
"TextEvaluatorStatsResponse",
"TimeUnit",
"ToolCall",
+ "ToolCallResponse",
"ToolChoice",
"ToolFunction",
"ToolKernelRequest",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
new file mode 100644
index 00000000..ba3bbfec
--- /dev/null
+++ b/src/humanloop/types/agent_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..95eca73e
--- /dev/null
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
new file mode 100644
index 00000000..673d3738
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..85422047
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py
new file mode 100644
index 00000000..c98af953
--- /dev/null
+++ b/src/humanloop/types/agent_continue_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueCallResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py
new file mode 100644
index 00000000..5b90e98d
--- /dev/null
+++ b/src/humanloop/types/agent_continue_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py
new file mode 100644
index 00000000..cdd34dce
--- /dev/null
+++ b/src/humanloop/types/agent_continue_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py
new file mode 100644
index 00000000..8e23829b
--- /dev/null
+++ b/src/humanloop/types/agent_continue_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
new file mode 100644
index 00000000..0bbd7858
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..20f3fb75
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
new file mode 100644
index 00000000..ff7a0fac
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..0e5f8a58
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
new file mode 100644
index 00000000..dc618c35
--- /dev/null
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tool_function import ToolFunction
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentInlineTool(UncheckedBaseModel):
+ type: typing.Literal["inline"] = "inline"
+ json_schema: ToolFunction
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
new file mode 100644
index 00000000..6503b104
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -0,0 +1,122 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .response_format import ResponseFormat
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..a8e8e98b
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py
new file mode 100644
index 00000000..e38c12e2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
new file mode 100644
index 00000000..31a351f2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..82c2fecf
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_inline_tool import AgentInlineTool
+
+AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
new file mode 100644
index 00000000..9efd4b6a
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentLinkedFileRequest(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
new file mode 100644
index 00000000..d85d682e
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLinkedFileResponse(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+ file: typing.Optional["AgentLinkedFileResponseFile"] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_response import AgentResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+
+update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
new file mode 100644
index 00000000..42d38fe4
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponse
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
+ from .evaluator_response import EvaluatorResponse
+ from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
+AgentLinkedFileResponseFile = typing.Union[
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
+]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
new file mode 100644
index 00000000..f5b5e8e8
--- /dev/null
+++ b/src/humanloop/types/agent_log_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLogResponse(UncheckedBaseModel):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
+from .flow_log_response import FlowLogResponse # noqa: E402
+from .prompt_log_response import PromptLogResponse # noqa: E402
+from .tool_log_response import ToolLogResponse # noqa: E402
+from .log_response import LogResponse # noqa: E402
+
+update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..5cb07628
--- /dev/null
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentLogResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
new file mode 100644
index 00000000..91547189
--- /dev/null
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+import datetime as dt
+from .chat_message import ChatMessage
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentLogStreamResponse(UncheckedBaseModel):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ ID of the specific version of the Agent.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
new file mode 100644
index 00000000..e58aaeba
--- /dev/null
+++ b/src/humanloop/types/agent_response.py
@@ -0,0 +1,260 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStop
+from .response_format import ResponseFormat
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+import typing_extensions
+from ..core.serialization import FieldMetadata
+from .environment_response import EnvironmentResponse
+import datetime as dt
+from .user_response import UserResponse
+from .version_status import VersionStatus
+from .input_response import InputResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentResponse(UncheckedBaseModel):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str = pydantic.Field()
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.List["AgentResponseToolsItem"] = pydantic.Field()
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the Agent.
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Long description of the file.
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the Agent.
+ """
+
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing.Optional[typing.Literal["agent"]] = None
+ environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None)
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus = pydantic.Field()
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.List[InputResponse] = pydantic.Field()
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None)
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+
+update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..59254f38
--- /dev/null
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py
new file mode 100644
index 00000000..5c3b6a48
--- /dev/null
+++ b/src/humanloop/types/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
new file mode 100644
index 00000000..4c084dc8
--- /dev/null
+++ b/src/humanloop/types/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
new file mode 100644
index 00000000..8095608f
--- /dev/null
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineTool
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponse
+AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool]
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..ebac897b
--- /dev/null
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicRedactedThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["redacted_thinking"] = "redacted_thinking"
+ data: str = pydantic.Field()
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
new file mode 100644
index 00000000..bf7fc808
--- /dev/null
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["thinking"] = "thinking"
+ thinking: str = pydantic.Field()
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str = pydantic.Field()
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c09f2768..c72bc90d 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -6,6 +6,7 @@
import pydantic
from .chat_role import ChatRole
from .tool_call import ToolCall
+from .chat_message_thinking_item import ChatMessageThinkingItem
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel):
A list of tool calls requested by the assistant.
"""
+ thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None)
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
new file mode 100644
index 00000000..0a507724
--- /dev/null
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContent
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+
+ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
new file mode 100644
index 00000000..9dc66629
--- /dev/null
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CreateAgentLogResponse(UncheckedBaseModel):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index af79f597..2c614521 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel):
Description of the Dataset.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 5828a678..51f879b8 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 0bfeebf7..9d0d5fc4 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,8 @@
from .evaluator_response import EvaluatorResponse
from .dataset_response import DatasetResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
- PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse
+ PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 9ba9fe4d..4332aa12 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 413081c6..0c7de27e 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 6c931db0..84d117e2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index f113fff5..bcda94a4 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 1203ce2c..74d59e4c 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index d91e1ee9..e09b2a73 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e457d580..71ca76c0 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -189,6 +191,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 175f456d..712ca698 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -5,6 +5,8 @@
import pydantic
import typing
from .evaluator_response_spec import EvaluatorResponseSpec
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel):
Description of the Evaluator.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -124,6 +133,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
new file mode 100644
index 00000000..128eed92
--- /dev/null
+++ b/src/humanloop/types/event_type.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EventType = typing.Union[
+ typing.Literal[
+ "agent_turn_start",
+ "agent_turn_suspend",
+ "agent_turn_continue",
+ "agent_turn_end",
+ "agent_start",
+ "agent_update",
+ "agent_end",
+ "tool_start",
+ "tool_update",
+ "tool_end",
+ "error",
+ "agent_generation_error",
+ ],
+ typing.Any,
+]
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 70ed322f..7f34b7b3 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 2a105c9d..0254c2b8 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
FileEnvironmentResponseFile = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
new file mode 100644
index 00000000..8108245b
--- /dev/null
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class FileEnvironmentVariableRequest(UncheckedBaseModel):
+ name: str = pydantic.Field()
+ """
+ Name of the environment variable.
+ """
+
+ value: str = pydantic.Field()
+ """
+ Value of the environment variable.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py
index 7a870b84..f235825b 100644
--- a/src/humanloop/types/file_type.py
+++ b/src/humanloop/types/file_type.py
@@ -2,4 +2,4 @@
import typing
-FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any]
+FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any]
diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py
index c32b9755..753d9ba2 100644
--- a/src/humanloop/types/files_tool_type.py
+++ b/src/humanloop/types/files_tool_type.py
@@ -3,5 +3,5 @@
import typing
FilesToolType = typing.Union[
- typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any
+ typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index ba1e1cf6..58a87fac 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -173,6 +175,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 4017b3b7..7768778e 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -4,6 +4,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel):
Description of the Flow.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -111,6 +120,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
new file mode 100644
index 00000000..ee45ffdf
--- /dev/null
+++ b/src/humanloop/types/linked_file_request.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class LinkedFileRequest(UncheckedBaseModel):
+ file_id: str
+ environment_id: typing.Optional[str] = None
+ version_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
new file mode 100644
index 00000000..36481f41
--- /dev/null
+++ b/src/humanloop/types/list_agents.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ListAgents(UncheckedBaseModel):
+ records: typing.List[AgentResponse] = pydantic.Field()
+ """
+ The list of Agents.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 61edbec5..7b736e14 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 686dab26..41ec4008 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index 94cda05e..f773d3f9 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 4080a6a1..84ddc89c 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index 0ba81dd3..cd7a0a26 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -9,4 +9,7 @@
from .tool_log_response import ToolLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
-LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"]
+ from .agent_log_response import AgentLogResponse
+LogResponse = typing.Union[
+ "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
+]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
new file mode 100644
index 00000000..69ffacf4
--- /dev/null
+++ b/src/humanloop/types/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponse
+from .agent_log_stream_response import AgentLogStreamResponse
+
+LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py
index 8473d2ae..3f2c99fb 100644
--- a/src/humanloop/types/model_providers.py
+++ b/src/humanloop/types/model_providers.py
@@ -4,7 +4,7 @@
ModelProviders = typing.Union[
typing.Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek"
+ "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate"
],
typing.Any,
]
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index e70dc4fb..1809af57 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -39,6 +39,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py
new file mode 100644
index 00000000..3730256e
--- /dev/null
+++ b/src/humanloop/types/on_agent_call_enum.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any]
diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py
new file mode 100644
index 00000000..d8c48547
--- /dev/null
+++ b/src/humanloop/types/open_ai_reasoning_effort.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
new file mode 100644
index 00000000..0febbadd
--- /dev/null
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedDataAgentResponse(UncheckedBaseModel):
+ records: typing.List[AgentResponse]
+ page: int
+ size: int
+ total: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index 9e3c568e..c508f8a6 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 275f0528..2e82c736 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 990d58be..6cfcf9ae 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index 57bae587..f41ca9ba 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index ff71e584..d9e1d914 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index 0e52b361..e2962e87 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 76%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index bd7082b3..87d5b603 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -9,16 +11,18 @@
from .version_deployment_response import VersionDeploymentResponse
from .version_id_response import VersionIdResponse
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel):
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
+ UncheckedBaseModel
+):
records: typing.List[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem
]
page: int
size: int
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 63%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 65c4f324..a1b4f056 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-]
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
+ typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
+)
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 78e177e8..16232e0b 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index d587d175..efcd1d0c 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -16,9 +18,11 @@
from .model_providers import ModelProviders
from .populate_template_response_stop import PopulateTemplateResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..8dd9f7f6
--- /dev/null
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index 4e1ae69c..ec74437f 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 6461bb19..80ba5ed5 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -9,12 +9,18 @@
from .model_providers import ModelProviders
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .tool_function import ToolFunction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str = pydantic.Field()
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..dda61bb4
--- /dev/null
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 2a1bad11..a9e26318 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -213,6 +215,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 07f4755d..5d6ff870 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -10,9 +10,11 @@
from .model_providers import ModelProviders
from .prompt_response_stop import PromptResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -224,6 +233,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..e136637f
--- /dev/null
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py
deleted file mode 100644
index da0a0354..00000000
--- a/src/humanloop/types/reasoning_effort.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index d94b1178..770dc487 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -5,5 +5,6 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse]
+RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
new file mode 100644
index 00000000..55bf2712
--- /dev/null
+++ b/src/humanloop/types/tool_call_response.py
@@ -0,0 +1,168 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+import datetime as dt
+import pydantic
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ToolCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponse = pydantic.Field()
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 1b6081c3..251223af 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -13,6 +15,7 @@
import datetime as dt
import pydantic
from .log_status import LogStatus
+from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.pydantic_utilities import update_forward_refs
@@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel):
Tool used to generate the Log.
"""
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the Tool.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -162,6 +170,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 0b835918..70537215 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -152,6 +152,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index e2e82d9f..0db57d69 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -36,6 +36,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index e0f73573..4fadcff0 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionDeploymentResponseFile = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index 877851a9..e3f5dc27 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -30,6 +30,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 2f56346c..1b74199f 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -3,13 +3,18 @@
from __future__ import annotations
import typing
from .dataset_response import DatasetResponse
-import typing
if typing.TYPE_CHECKING:
from .prompt_response import PromptResponse
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse",
+ "ToolResponse",
+ DatasetResponse,
+ "EvaluatorResponse",
+ "FlowResponse",
+ "AgentResponse",
]
diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py
index 399361c8..a6a7783c 100644
--- a/src/humanloop/types/version_reference_response.py
+++ b/src/humanloop/types/version_reference_response.py
@@ -2,7 +2,6 @@
from __future__ import annotations
import typing
-import typing
if typing.TYPE_CHECKING:
from .version_deployment_response import VersionDeploymentResponse
diff --git a/tests/custom/test_client.py b/tests/custom/test_client.py
index 73f811f5..3e7c8334 100644
--- a/tests/custom/test_client.py
+++ b/tests/custom/test_client.py
@@ -4,4 +4,4 @@
# Get started with writing tests with pytest at https://docs.pytest.org
@pytest.mark.skip(reason="Unimplemented")
def test_client() -> None:
- assert True == True
+ assert True is True
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 4f2e1fdb..72611b5d 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -1,8 +1,19 @@
from contextlib import contextmanager, redirect_stdout
-from typing import ContextManager
+from dataclasses import dataclass
+import os
+from typing import Any, ContextManager, Generator
import io
from typing import TextIO
+import uuid
import pytest
+import dotenv
+from humanloop.client import Humanloop
+
+
+@dataclass
+class TestIdentifiers:
+ file_id: str
+ file_path: str
@pytest.fixture()
@@ -14,3 +25,128 @@ def _context_manager():
yield f
return _context_manager # type: ignore [return-value]
+
+
+@pytest.fixture(scope="session")
+def openai_key() -> str:
+ dotenv.load_dotenv()
+ if not os.getenv("OPENAI_API_KEY"):
+ pytest.fail("OPENAI_API_KEY is not set for integration tests")
+ return os.getenv("OPENAI_API_KEY") # type: ignore [return-value]
+
+
+@pytest.fixture(scope="session")
+def humanloop_test_client() -> Humanloop:
+ dotenv.load_dotenv()
+ if not os.getenv("HUMANLOOP_API_KEY"):
+ pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
+ return Humanloop(api_key=os.getenv("HUMANLOOP_API_KEY")) # type: ignore [return-value]
+
+
+@pytest.fixture(scope="function")
+def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]:
+ path = f"SDK_INTEGRATION_TEST_{uuid.uuid4()}"
+ try:
+ response = humanloop_test_client.directories.create(path=path)
+ yield response.path
+ humanloop_test_client.directories.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create directory {path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def test_prompt_config() -> dict[str, Any]:
+ return {
+ "provider": "openai",
+ "model": "gpt-4o-mini",
+ "temperature": 0.5,
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.",
+ },
+ {
+ "role": "user",
+ "content": "{{question}}",
+ },
+ ],
+ }
+
+
+@pytest.fixture(scope="function")
+def eval_dataset(humanloop_test_client: Humanloop, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
+ dataset_path = f"{sdk_test_dir}/eval_dataset"
+ try:
+ response = humanloop_test_client.datasets.upsert(
+ path=dataset_path,
+ datapoints=[
+ {
+ "inputs": {
+ "question": "What is the capital of the France?",
+ },
+ },
+ {
+ "inputs": {
+ "question": "What is the capital of the Germany?",
+ },
+ },
+ {
+ "inputs": {
+ "question": "What is 2+2?",
+ },
+ },
+ ],
+ )
+ yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_test_client.datasets.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def eval_prompt(
+ humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
+) -> Generator[TestIdentifiers, None, None]:
+ prompt_path = f"{sdk_test_dir}/eval_prompt"
+ try:
+ response = humanloop_test_client.prompts.upsert(
+ path=prompt_path,
+ **test_prompt_config,
+ )
+ yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_test_client.prompts.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def output_not_null_evaluator(
+ humanloop_test_client: Humanloop, sdk_test_dir: str
+) -> Generator[TestIdentifiers, None, None]:
+ evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
+ try:
+ response = humanloop_test_client.evaluators.upsert(
+ path=evaluator_path,
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "boolean",
+ "code": """
+def output_not_null(log: dict) -> bool:
+ return log["output"] is not None
+ """,
+ "evaluator_type": "python",
+ },
+ )
+ yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_test_client.evaluators.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def id_for_staging_environment(humanloop_test_client: Humanloop, eval_prompt: TestIdentifiers) -> str:
+ response = humanloop_test_client.prompts.list_environments(id=eval_prompt.file_id)
+ for environment in response:
+ if environment.name == "staging":
+ return environment.id
+ pytest.fail("Staging environment not found")
diff --git a/tests/integration/test_decorators.py b/tests/integration/test_decorators.py
new file mode 100644
index 00000000..218453a6
--- /dev/null
+++ b/tests/integration/test_decorators.py
@@ -0,0 +1,154 @@
+import time
+from typing import Any
+
+from openai import OpenAI
+from humanloop.client import Humanloop
+from humanloop.types.chat_message import ChatMessage
+
+
+def test_prompt_decorator(
+ humanloop_test_client: Humanloop,
+ sdk_test_dir: str,
+ test_prompt_config: dict[str, Any],
+ openai_key: str,
+):
+ try:
+ prompt_path = f"{sdk_test_dir}/test_prompt"
+ prompt_response = humanloop_test_client.prompts.upsert(
+ path=prompt_path,
+ **test_prompt_config,
+ )
+
+ prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ assert len(prompt_versions_response.records) == 1
+
+ @humanloop_test_client.prompt(path=prompt_path)
+ def my_prompt(question: str) -> str:
+ openai_client = OpenAI(api_key=openai_key)
+
+ response = openai_client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=[{"role": "user", "content": question}],
+ )
+
+ assert response.choices[0].message.content is not None
+ return response.choices[0].message.content
+
+ assert "paris" in my_prompt("What is the capital of the France?").lower()
+
+ time.sleep(5)
+ prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ assert len(prompt_versions_response.records) == 2
+
+ logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+
+ assert logs_response.items is not None and len(logs_response.items) == 1
+ finally:
+ humanloop_test_client.prompts.delete(id=prompt_response.id)
+
+
+def test_call_prompt_in_flow_decorator(
+ humanloop_test_client: Humanloop,
+ sdk_test_dir: str,
+ openai_key: str,
+):
+ try:
+
+ @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow")
+ def my_flow(question: str) -> str:
+ response = humanloop_test_client.prompts.call(
+ path=f"{sdk_test_dir}/test_prompt",
+ prompt={
+ "provider": "openai",
+ "model": "gpt-4o-mini",
+ "temperature": 0,
+ },
+ messages=[{"role": "user", "content": question}],
+ provider_api_keys={"openai": openai_key},
+ )
+
+ assert response.logs[0].output is not None
+ return response.logs[0].output
+
+ assert "paris" in my_flow("What is the capital of the France?").lower()
+ time.sleep(5)
+ prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ assert prompt_response is not None
+ prompt_logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+ assert prompt_logs_response.items is not None and len(prompt_logs_response.items) == 1
+ prompt_log = prompt_logs_response.items[0]
+
+ flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ assert flow_response is not None
+ flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
+ flow_log = flow_logs_response.items[0]
+ assert prompt_log.trace_parent_id == flow_log.id
+ finally:
+ flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ if flow_response is not None:
+ humanloop_test_client.flows.delete(id=flow_response.id)
+ prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ if prompt_response is not None:
+ humanloop_test_client.prompts.delete(id=prompt_response.id)
+
+
+def test_flow_decorator_logs_exceptions(
+ humanloop_test_client: Humanloop,
+ sdk_test_dir: str,
+):
+ try:
+
+ @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_error")
+ def my_flow(question: str) -> str:
+ raise ValueError("This is a test exception")
+
+ my_flow("test")
+
+ time.sleep(5)
+
+ flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ assert flow_response is not None
+ flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
+ flow_log = flow_logs_response.items[0]
+ assert flow_log.error is not None
+ assert flow_log.output is None
+
+ finally:
+ flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ if flow_response is not None:
+ humanloop_test_client.flows.delete(id=flow_response.id)
+
+
+def test_flow_decorator_populates_output_message(
+ humanloop_test_client: Humanloop,
+ sdk_test_dir: str,
+):
+ try:
+
+ @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_output_message")
+ def my_flow(question: str) -> dict[str, Any]:
+ return {"role": "user", "content": question}
+
+ assert "france" in my_flow("What is the capital of the France?")["content"].lower()
+
+ time.sleep(5)
+
+ flow_response = humanloop_test_client.files.retrieve_by_path(
+ path=f"{sdk_test_dir}/test_flow_log_output_message"
+ )
+ assert flow_response is not None
+ flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
+ flow_log = flow_logs_response.items[0]
+ assert flow_log.output_message is not None
+ assert flow_log.output is None
+ assert flow_log.error is None
+
+ finally:
+ flow_response = humanloop_test_client.files.retrieve_by_path(
+ path=f"{sdk_test_dir}/test_flow_log_output_message"
+ )
+ if flow_response is not None:
+ humanloop_test_client.flows.delete(id=flow_response.id)
diff --git a/tests/integration/test_evals.py b/tests/integration/test_evals.py
new file mode 100644
index 00000000..49bbb6dc
--- /dev/null
+++ b/tests/integration/test_evals.py
@@ -0,0 +1,402 @@
+import time
+from typing import Any
+
+import pytest
+from humanloop.client import Humanloop
+from humanloop.error import HumanloopRuntimeError
+from tests.integration.conftest import TestIdentifiers
+
+
+def test_eval_run_works_on_online_files(
+ humanloop_test_client: Humanloop,
+ output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ eval_prompt: TestIdentifiers,
+) -> None:
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": eval_prompt.file_path,
+ "type": "prompt",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ time.sleep(5)
+ response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ assert response.items and len(response.items) == 1
+ evaluation_id = response.items[0].id
+ run_evaluation_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined]
+ assert run_evaluation_response.runs[0].status == "completed"
+
+
+def test_eval_run_version_id(
+ humanloop_test_client: Humanloop,
+ output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ eval_prompt: TestIdentifiers,
+ test_prompt_config: dict[str, Any],
+) -> None:
+ # GIVEN a prompt where a non-default version is created
+ new_test_prompt_config = test_prompt_config.copy()
+ new_test_prompt_config["temperature"] = 1
+ new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ path=eval_prompt.file_path,
+ **new_test_prompt_config,
+ )
+ # WHEN creating an evaluation using version_id
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "id": new_prompt_version_response.id,
+ "version_id": new_prompt_version_response.version_id,
+ "type": "prompt",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN we evaluate the version created in the test
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ assert runs_response.runs[0].status == "completed"
+ assert (
+ runs_response.runs[0].version
+ and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
+ )
+ list_versions_response = humanloop_test_client.prompts.list_versions(id=new_prompt_version_response.id)
+ assert list_versions_response.records and len(list_versions_response.records) == 2
+ # THEN the version used in evaluation is not the default version
+ response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ assert response.version_id != new_prompt_version_response.version_id
+
+
+def test_eval_run_environment(
+ humanloop_test_client: Humanloop,
+ output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ eval_prompt: TestIdentifiers,
+ test_prompt_config: dict[str, Any],
+ id_for_staging_environment: str,
+) -> None:
+ # GIVEN a prompt deployed to staging environment
+ new_test_prompt_config = test_prompt_config.copy()
+ new_test_prompt_config["temperature"] = 1
+ new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ path=eval_prompt.file_path,
+ **new_test_prompt_config,
+ )
+ humanloop_test_client.prompts.set_deployment(
+ id=new_prompt_version_response.id,
+ environment_id=id_for_staging_environment,
+ version_id=new_prompt_version_response.version_id,
+ )
+ # WHEN creating an evaluation using environment
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "id": new_prompt_version_response.id,
+ "type": "prompt",
+ "environment": "staging",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN evaluation is done with the version deployed to staging environment
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ assert runs_response.runs[0].status == "completed"
+ assert (
+ runs_response.runs[0].version
+ and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
+ )
+ default_prompt_version_response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ assert default_prompt_version_response.version_id != new_prompt_version_response.version_id
+
+
+@pytest.mark.parametrize("version_lookup", ["version_id", "environment"])
+def test_eval_run_version_lookup_fails_with_path(
+ humanloop_test_client: Humanloop,
+ eval_prompt: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+ version_lookup: str,
+):
+ # GIVEN an eval run where we try to evaluate a non-default version
+ with pytest.raises(HumanloopRuntimeError) as e:
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": eval_prompt.file_path,
+ "type": "prompt",
+ # WHEN the File id is not passed in file
+ version_lookup: "will_not_work",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN an error is raised
+ assert "You must provide the `file.id` when addressing a file by version ID or environment" in str(e.value)
+
+
+def test_eval_run_with_version_upsert(
+ humanloop_test_client: Humanloop,
+ eval_prompt: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+ test_prompt_config: dict[str, Any],
+):
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": eval_prompt.file_path,
+ "type": "prompt",
+ "version": {
+ **test_prompt_config,
+ "temperature": 1,
+ },
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN the version is upserted and evaluation finishes successfully
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ assert runs_response.runs[0].status == "completed"
+ # THEN a version was upserted based on file.version
+ list_prompt_versions_response = humanloop_test_client.prompts.list_versions(id=eval_prompt.file_id)
+ assert list_prompt_versions_response.records and len(list_prompt_versions_response.records) == 2
+
+
+def test_flow_eval_does_not_work_without_callable(
+ humanloop_test_client: Humanloop,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+):
+ with pytest.raises(HumanloopRuntimeError) as e:
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": "Test Flow",
+ "type": "flow",
+ "version": {
+ "attributes": {
+ "foo": "bar",
+ }
+ },
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN an error is raised
+ assert "You must provide a `callable` for your Flow `file` to run a local eval." in str(e.value)
+
+
+def test_flow_eval_works_with_callable(
+ humanloop_test_client: Humanloop,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+ sdk_test_dir: str,
+):
+ flow_path = f"{sdk_test_dir}/Test Flow"
+ # GIVEN a flow with a callable
+ flow_response = humanloop_test_client.flows.upsert(
+ path=flow_path,
+ attributes={
+ "foo": "bar",
+ },
+ )
+ try:
+ flow = humanloop_test_client.flows.upsert(
+ path=flow_path,
+ attributes={
+ "foo": "bar",
+ },
+ )
+ # WHEN we run an evaluation with the flow
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "id": flow.id,
+ "type": "flow",
+ "callable": lambda question: "bar",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN the evaluation finishes successfully
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=flow.id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ assert runs_response.runs[0].status == "completed"
+ finally:
+ humanloop_test_client.flows.delete(id=flow_response.id)
+
+
+def test_cannot_evaluate_agent_with_callable(
+ humanloop_test_client: Humanloop,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+):
+ with pytest.raises(ValueError) as e:
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": "Test Agent",
+ "type": "agent",
+ "callable": lambda question: "bar",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ assert str(e.value) == "Agent evaluation is only possible on the Humanloop runtime, do not provide a `callable`."
+
+
+def test_flow_eval_resolves_to_default_with_callable(
+ humanloop_test_client: Humanloop,
+ output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: TestIdentifiers,
+ sdk_test_dir: str,
+) -> None:
+ # GIVEN a flow with some attributes
+ flow_path = f"{sdk_test_dir}/Test Flow"
+ flow_response = humanloop_test_client.flows.upsert(
+ path=flow_path,
+ attributes={
+ "foo": "bar",
+ },
+ )
+ try:
+ # WHEN running an evaluation with the flow's callable but no version
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "id": flow_response.id,
+ "type": "flow",
+ "callable": lambda question: "It's complicated don't worry about it",
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ # THEN the evaluation finishes successfully
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=flow_response.id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items and evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ assert runs_response.runs[0].status == "completed"
+ finally:
+ # Clean up test resources
+ humanloop_test_client.flows.delete(id=flow_response.id)
+
+
+@pytest.mark.skip(reason="Skip until agents are in prod")
+def test_agent_eval_works_upserting(
+ humanloop_test_client: Humanloop,
+ eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: TestIdentifiers,
+ sdk_test_dir: str,
+):
+ humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ name="test_eval_run",
+ file={
+ "path": f"{sdk_test_dir}/Test Agent",
+ "type": "agent",
+ "version": {
+ "model": "gpt-4o",
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant, offering very short answers.",
+ },
+ {
+ "role": "user",
+ "content": "{{question}}",
+ },
+ ],
+ "provider": "openai",
+ "temperature": 0,
+ "max_iterations": 5,
+ },
+ },
+ dataset={
+ "path": eval_dataset.file_path,
+ },
+ evaluators=[
+ {
+ "path": output_not_null_evaluator.file_path,
+ }
+ ],
+ )
+ files_response = humanloop_test_client.files.list_files(page=1, size=100)
+ eval_agent = None
+ for file in files_response.records:
+ if file.path == f"{sdk_test_dir}/Test Agent":
+ eval_agent = file
+ break
+ assert eval_agent and eval_agent.type == "agent"
+ # THEN the evaluation finishes successfully
+ evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_agent.id)
+ assert evaluations_response.items and len(evaluations_response.items) == 1
+ evaluation_id = evaluations_response.items[0].id
+ runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ assert runs_response.runs[0].status == "completed"
diff --git a/tests/utils/assets/models/circle.py b/tests/utils/assets/models/circle.py
index 3395545e..759fe3eb 100644
--- a/tests/utils/assets/models/circle.py
+++ b/tests/utils/assets/models/circle.py
@@ -2,7 +2,6 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/assets/models/object_with_optional_field.py b/tests/utils/assets/models/object_with_optional_field.py
index d6ab74e8..dc3e3eb7 100644
--- a/tests/utils/assets/models/object_with_optional_field.py
+++ b/tests/utils/assets/models/object_with_optional_field.py
@@ -4,7 +4,6 @@
import typing_extensions
import typing
-import typing_extensions
from humanloop.core.serialization import FieldMetadata
import datetime as dt
import uuid
diff --git a/tests/utils/assets/models/shape.py b/tests/utils/assets/models/shape.py
index 0160cdbd..540ccabd 100644
--- a/tests/utils/assets/models/shape.py
+++ b/tests/utils/assets/models/shape.py
@@ -4,7 +4,6 @@
from __future__ import annotations
import typing_extensions
-import typing_extensions
import typing
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/assets/models/square.py b/tests/utils/assets/models/square.py
index c7d6cfaf..da4a2111 100644
--- a/tests/utils/assets/models/square.py
+++ b/tests/utils/assets/models/square.py
@@ -2,7 +2,6 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/test_query_encoding.py b/tests/utils/test_query_encoding.py
index 128c967e..a52b87c1 100644
--- a/tests/utils/test_query_encoding.py
+++ b/tests/utils/test_query_encoding.py
@@ -34,4 +34,4 @@ def test_query_encoding_deep_object_arrays() -> None:
def test_encode_query_with_none() -> None:
encoded = encode_query(None)
- assert encoded == None
+ assert encoded is None