diff --git a/.fernignore b/.fernignore
index 112f779b..d52ed17e 100644
--- a/.fernignore
+++ b/.fernignore
@@ -13,6 +13,8 @@ mypy.ini
README.md
src/humanloop/decorators
src/humanloop/otel
+src/humanloop/sync
+src/humanloop/cli/
## Tests
diff --git a/poetry.lock b/poetry.lock
index afa2f8a6..3c068936 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -17,6 +18,7 @@ version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
{file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
@@ -41,6 +43,7 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -63,18 +66,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "certifi"
@@ -82,6 +86,7 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -93,6 +98,7 @@ version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
{file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
@@ -188,12 +194,28 @@ files = [
{file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
]
+[[package]]
+name = "click"
+version = "8.1.8"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+groups = ["main"]
+files = [
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
[[package]]
name = "cohere"
version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -216,10 +238,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -227,6 +251,7 @@ version = "8.4.2"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"},
{file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"},
@@ -245,6 +270,7 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -254,7 +280,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
@@ -262,6 +288,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -273,6 +300,8 @@ version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -287,6 +316,7 @@ version = "1.10.0"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
{file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
@@ -333,6 +363,7 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -341,7 +372,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fsspec"
@@ -349,6 +380,7 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -388,6 +420,7 @@ version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
{file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
@@ -438,6 +471,7 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -459,6 +493,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -471,7 +506,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -483,6 +518,7 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -494,6 +530,7 @@ version = "0.31.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main", "dev"]
files = [
{file = "huggingface_hub-0.31.1-py3-none-any.whl", hash = "sha256:43f73124819b48b42d140cbc0d7a2e6bd15b2853b1b9d728d4d55ad1750cac5b"},
{file = "huggingface_hub-0.31.1.tar.gz", hash = "sha256:492bb5f545337aa9e2f59b75ef4c5f535a371e8958a6ce90af056387e67f1180"},
@@ -530,6 +567,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -544,6 +582,7 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -553,12 +592,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -567,6 +606,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -578,6 +618,7 @@ version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
{file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
@@ -663,6 +704,7 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -684,6 +726,7 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -698,6 +741,7 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -796,6 +840,7 @@ version = "1.0.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
{file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
@@ -842,6 +887,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -853,6 +899,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -898,6 +945,7 @@ version = "1.77.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "openai-1.77.0-py3-none-any.whl", hash = "sha256:07706e91eb71631234996989a8ea991d5ee56f0744ef694c961e0824d4f39218"},
{file = "openai-1.77.0.tar.gz", hash = "sha256:897969f927f0068b8091b4b041d1f8175bcf124f7ea31bab418bf720971223bc"},
@@ -924,6 +972,7 @@ version = "1.32.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"},
{file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"},
@@ -939,6 +988,7 @@ version = "0.53b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"},
{file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"},
@@ -956,6 +1006,7 @@ version = "0.40.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_anthropic-0.40.3-py3-none-any.whl", hash = "sha256:152a7968d86ade48ffb4df526129def598e8e3eeb1c4fb11a5e6a3bbc94c0fd4"},
{file = "opentelemetry_instrumentation_anthropic-0.40.3.tar.gz", hash = "sha256:5e40a9d3342d800180d29e028f3d1aa5db3e0ec482362a7eef054ee500fb8d4f"},
@@ -973,6 +1024,7 @@ version = "0.40.3"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_bedrock-0.40.3-py3-none-any.whl", hash = "sha256:cc8ea0358f57876ad12bbcbc9b1ace4b97bf9d8d5d7703a7a55135811b0b433a"},
{file = "opentelemetry_instrumentation_bedrock-0.40.3.tar.gz", hash = "sha256:bcb5060060d0ec25bd8c08332eadd23d57ceea1e042fed5e7a7664e5a2fcb817"},
@@ -992,6 +1044,7 @@ version = "0.40.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_cohere-0.40.3-py3-none-any.whl", hash = "sha256:d39d058ae5cffe02908c1c242b71dc449d07df71c60d7a37159a898e38c6a15c"},
{file = "opentelemetry_instrumentation_cohere-0.40.3.tar.gz", hash = "sha256:23f6f237f7cdef661549b3f7d02dc8b1c69ce0cbf3e0050c05ce3f443458fe35"},
@@ -1009,6 +1062,7 @@ version = "0.40.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_groq-0.40.3-py3-none-any.whl", hash = "sha256:3c3c324ab0b49323f268dc54b60fe06aaee04b5bac0f901a70251d4931b611bc"},
{file = "opentelemetry_instrumentation_groq-0.40.3.tar.gz", hash = "sha256:b246b258d28ac5af429688b9948c37ced88cba4a7f8f99f629f9b42fcbe36e47"},
@@ -1026,6 +1080,7 @@ version = "0.40.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_openai-0.40.3-py3-none-any.whl", hash = "sha256:77e55609fef78d1a81a61aeac667b6423d19f3f3936c4a219963fba0559dae44"},
{file = "opentelemetry_instrumentation_openai-0.40.3.tar.gz", hash = "sha256:8e7f260f3c3e25f445281238552c80cbd724c2d61fee4ad9360a86a2e0015114"},
@@ -1044,6 +1099,7 @@ version = "0.40.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_replicate-0.40.3-py3-none-any.whl", hash = "sha256:24a3ae27137521a4f4cfa523c36be7cedf2ad57eed9fef5f1bfb2a3e2c8aee9e"},
{file = "opentelemetry_instrumentation_replicate-0.40.3.tar.gz", hash = "sha256:b62dcee6b8afe6dc30ad98b18014fdd1b6851fc5bd6d0c4dc6c40bff16ce407d"},
@@ -1061,6 +1117,7 @@ version = "1.32.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"},
{file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"},
@@ -1075,6 +1132,7 @@ version = "1.32.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"},
{file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"},
@@ -1091,6 +1149,7 @@ version = "0.53b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"},
{file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"},
@@ -1106,6 +1165,7 @@ version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
{file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
@@ -1117,6 +1177,7 @@ version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
{file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
@@ -1128,6 +1189,7 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1139,6 +1201,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1225,6 +1288,7 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
+groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1236,6 +1300,7 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1246,9 +1311,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1256,6 +1321,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1271,6 +1337,7 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1291,6 +1358,7 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1345,6 +1413,7 @@ version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -1358,7 +1427,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1366,6 +1435,7 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -1477,6 +1547,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1499,6 +1570,7 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1513,13 +1585,14 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "pytest-retry"
-version = "1.7.0"
+version = "1.6.3"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
- {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
- {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
+ {file = "pytest_retry-1.6.3-py3-none-any.whl", hash = "sha256:e96f7df77ee70b0838d1085f9c3b8b5b7d74bf8947a0baf32e2b8c71b27683c8"},
+ {file = "pytest_retry-1.6.3.tar.gz", hash = "sha256:36ccfa11c8c8f9ddad5e20375182146d040c20c4a791745139c5a99ddf1b557d"},
]
[package.dependencies]
@@ -1534,6 +1607,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1548,6 +1622,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1562,6 +1637,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1573,6 +1649,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1635,6 +1712,7 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1651,6 +1729,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1754,6 +1833,7 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1771,6 +1851,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1792,6 +1873,7 @@ version = "0.24.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"},
{file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"},
@@ -1915,6 +1997,7 @@ version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
@@ -1942,6 +2025,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1953,6 +2037,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -1964,6 +2049,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -2011,6 +2097,7 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2043,6 +2130,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2084,6 +2173,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2105,6 +2195,7 @@ version = "4.23.0.20241208"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
@@ -2119,6 +2210,7 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2130,6 +2222,7 @@ version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
{file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
@@ -2141,6 +2234,7 @@ version = "2.32.0.20250328"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
@@ -2155,6 +2249,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2166,6 +2261,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2180,6 +2276,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2191,13 +2288,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2208,6 +2306,7 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2296,20 +2395,21 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d"
+content-hash = "2c46b60972d2abc4e2b2a6b03d82ab32d2af74e9c5932a84d0ea6758fac32f72"
diff --git a/pyproject.toml b/pyproject.toml
index 73f2c3d4..8dc69a73 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,13 +1,20 @@
+# This section is used by PyPI and follows PEP 621 for package metadata
[project]
name = "humanloop"
+description = "The Humanloop Python Library"
+authors = []
+# This section is used by Poetry for development and building
+# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
-version = "0.8.36"
-description = ""
+version = "0.8.36b1"
+description = "Humanloop Python SDK"
readme = "README.md"
authors = []
-keywords = []
+packages = [
+ { include = "humanloop", from = "src" },
+]
classifiers = [
"Intended Audience :: Developers",
@@ -26,9 +33,6 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed"
]
-packages = [
- { include = "humanloop", from = "src"}
-]
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -53,8 +57,9 @@ protobuf = ">=5.29.3"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
+click = "^8.0.0"
-[tool.poetry.dev-dependencies]
+[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
@@ -86,7 +91,10 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.poetry.scripts]
+humanloop = "humanloop.cli.__main__:cli"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
+
diff --git a/reference.md b/reference.md
index beb8e609..342215cf 100644
--- a/reference.md
+++ b/reference.md
@@ -1,5 +1,7 @@
# Reference
+
## Prompts
+
client.prompts.log(...)
-
@@ -21,6 +23,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -71,6 +74,7 @@ client.prompts.log(
)
```
+
@@ -85,7 +89,7 @@ client.prompts.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -93,7 +97,7 @@ client.prompts.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -101,7 +105,7 @@ client.prompts.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
@@ -109,7 +113,7 @@ client.prompts.log(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -117,7 +121,7 @@ client.prompts.log(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -125,7 +129,7 @@ client.prompts.log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
-
+
@@ -133,7 +137,7 @@ client.prompts.log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
-
+
@@ -141,7 +145,7 @@ client.prompts.log(
**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
-
+
@@ -149,7 +153,7 @@ client.prompts.log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
-
+
@@ -157,7 +161,7 @@ client.prompts.log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
-
+
@@ -165,7 +169,7 @@ client.prompts.log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
-
+
@@ -173,7 +177,7 @@ client.prompts.log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
-
+
@@ -181,34 +185,36 @@ client.prompts.log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
The Prompt configuration to use. Two formats are supported:
+
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-A new Prompt version will be created if the provided details do not match any existing version.
-
+ A new Prompt version will be created if the provided details do not match any existing version.
+
@@ -216,7 +222,7 @@ A new Prompt version will be created if the provided details do not match any ex
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -224,7 +230,7 @@ A new Prompt version will be created if the provided details do not match any ex
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -232,15 +238,15 @@ A new Prompt version will be created if the provided details do not match any ex
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -248,7 +254,7 @@ A new Prompt version will be created if the provided details do not match any ex
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -256,7 +262,7 @@ A new Prompt version will be created if the provided details do not match any ex
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -264,7 +270,7 @@ A new Prompt version will be created if the provided details do not match any ex
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -272,7 +278,7 @@ A new Prompt version will be created if the provided details do not match any ex
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -280,7 +286,7 @@ A new Prompt version will be created if the provided details do not match any ex
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -288,7 +294,7 @@ A new Prompt version will be created if the provided details do not match any ex
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -296,7 +302,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -304,7 +310,7 @@ A new Prompt version will be created if the provided details do not match any ex
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -312,7 +318,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -320,7 +326,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -328,7 +334,7 @@ A new Prompt version will be created if the provided details do not match any ex
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -336,7 +342,7 @@ A new Prompt version will be created if the provided details do not match any ex
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -344,7 +350,7 @@ A new Prompt version will be created if the provided details do not match any ex
**prompt_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -352,7 +358,7 @@ A new Prompt version will be created if the provided details do not match any ex
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -360,7 +366,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -368,13 +374,12 @@ A new Prompt version will be created if the provided details do not match any ex
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -394,6 +399,7 @@ A new Prompt version will be created if the provided details do not match any ex
Update a Log.
Update the details of a Log with the given ID.
+
@@ -419,6 +425,7 @@ client.prompts.update_log(
)
```
+
@@ -433,7 +440,7 @@ client.prompts.update_log(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -441,7 +448,7 @@ client.prompts.update_log(
**log_id:** `str` — Unique identifier for the Log.
-
+
@@ -449,7 +456,7 @@ client.prompts.update_log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
-
+
@@ -457,7 +464,7 @@ client.prompts.update_log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
-
+
@@ -465,7 +472,7 @@ client.prompts.update_log(
**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
-
+
@@ -473,7 +480,7 @@ client.prompts.update_log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
-
+
@@ -481,7 +488,7 @@ client.prompts.update_log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
-
+
@@ -489,7 +496,7 @@ client.prompts.update_log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
-
+
@@ -497,7 +504,7 @@ client.prompts.update_log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
-
+
@@ -505,21 +512,22 @@ client.prompts.update_log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
@@ -527,15 +535,15 @@ Controls how the model uses tools. The following options are supported:
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -543,7 +551,7 @@ Controls how the model uses tools. The following options are supported:
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -551,7 +559,7 @@ Controls how the model uses tools. The following options are supported:
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -559,7 +567,7 @@ Controls how the model uses tools. The following options are supported:
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -567,7 +575,7 @@ Controls how the model uses tools. The following options are supported:
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -575,7 +583,7 @@ Controls how the model uses tools. The following options are supported:
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -583,7 +591,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -591,7 +599,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -599,7 +607,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -607,7 +615,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -615,7 +623,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -623,7 +631,7 @@ Controls how the model uses tools. The following options are supported:
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -631,13 +639,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -666,6 +673,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -690,6 +698,7 @@ for chunk in response.data:
yield chunk
```
+
@@ -704,7 +713,7 @@ for chunk in response.data:
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -712,7 +721,7 @@ for chunk in response.data:
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -720,7 +729,7 @@ for chunk in response.data:
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -728,7 +737,7 @@ for chunk in response.data:
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -736,34 +745,36 @@ for chunk in response.data:
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
The Prompt configuration to use. Two formats are supported:
+
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-A new Prompt version will be created if the provided details do not match any existing version.
-
+ A new Prompt version will be created if the provided details do not match any existing version.
+
@@ -771,7 +782,7 @@ A new Prompt version will be created if the provided details do not match any ex
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -779,7 +790,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -787,7 +798,7 @@ A new Prompt version will be created if the provided details do not match any ex
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -795,7 +806,7 @@ A new Prompt version will be created if the provided details do not match any ex
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -803,7 +814,7 @@ A new Prompt version will be created if the provided details do not match any ex
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -811,7 +822,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -819,7 +830,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -827,7 +838,7 @@ A new Prompt version will be created if the provided details do not match any ex
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -835,7 +846,7 @@ A new Prompt version will be created if the provided details do not match any ex
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -843,7 +854,7 @@ A new Prompt version will be created if the provided details do not match any ex
**prompts_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -851,7 +862,7 @@ A new Prompt version will be created if the provided details do not match any ex
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -859,7 +870,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -867,7 +878,7 @@ A new Prompt version will be created if the provided details do not match any ex
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -875,7 +886,7 @@ A new Prompt version will be created if the provided details do not match any ex
**num_samples:** `typing.Optional[int]` — The number of generations.
-
+
@@ -883,7 +894,7 @@ A new Prompt version will be created if the provided details do not match any ex
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -891,7 +902,7 @@ A new Prompt version will be created if the provided details do not match any ex
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response
-
+
@@ -899,7 +910,7 @@ A new Prompt version will be created if the provided details do not match any ex
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
-
+
@@ -907,13 +918,12 @@ A new Prompt version will be created if the provided details do not match any ex
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -942,6 +952,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -969,6 +980,7 @@ client.prompts.call(
)
```
+
@@ -983,7 +995,7 @@ client.prompts.call(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -991,7 +1003,7 @@ client.prompts.call(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -999,7 +1011,7 @@ client.prompts.call(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -1007,7 +1019,7 @@ client.prompts.call(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -1015,34 +1027,36 @@ client.prompts.call(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
The Prompt configuration to use. Two formats are supported:
+
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-A new Prompt version will be created if the provided details do not match any existing version.
-
+ A new Prompt version will be created if the provided details do not match any existing version.
+
@@ -1050,7 +1064,7 @@ A new Prompt version will be created if the provided details do not match any ex
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -1058,7 +1072,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -1066,7 +1080,7 @@ A new Prompt version will be created if the provided details do not match any ex
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -1074,7 +1088,7 @@ A new Prompt version will be created if the provided details do not match any ex
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -1082,7 +1096,7 @@ A new Prompt version will be created if the provided details do not match any ex
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -1090,7 +1104,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -1098,7 +1112,7 @@ A new Prompt version will be created if the provided details do not match any ex
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -1106,7 +1120,7 @@ A new Prompt version will be created if the provided details do not match any ex
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -1114,7 +1128,7 @@ A new Prompt version will be created if the provided details do not match any ex
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -1122,7 +1136,7 @@ A new Prompt version will be created if the provided details do not match any ex
**prompts_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -1130,7 +1144,7 @@ A new Prompt version will be created if the provided details do not match any ex
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -1138,7 +1152,7 @@ A new Prompt version will be created if the provided details do not match any ex
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -1146,7 +1160,7 @@ A new Prompt version will be created if the provided details do not match any ex
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -1154,7 +1168,7 @@ A new Prompt version will be created if the provided details do not match any ex
**num_samples:** `typing.Optional[int]` — The number of generations.
-
+
@@ -1162,7 +1176,7 @@ A new Prompt version will be created if the provided details do not match any ex
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -1170,7 +1184,7 @@ A new Prompt version will be created if the provided details do not match any ex
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response
-
+
@@ -1178,7 +1192,7 @@ A new Prompt version will be created if the provided details do not match any ex
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
-
+
@@ -1186,13 +1200,12 @@ A new Prompt version will be created if the provided details do not match any ex
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1210,6 +1223,7 @@ A new Prompt version will be created if the provided details do not match any ex
Get a list of all Prompts.
+
@@ -1239,6 +1253,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -1253,7 +1268,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -1261,7 +1276,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Prompts to fetch.
-
+
@@ -1269,7 +1284,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Prompt name.
-
+
@@ -1277,7 +1292,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
-
+
@@ -1285,7 +1300,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Prompts by
-
+
@@ -1293,7 +1308,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -1301,13 +1316,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1331,6 +1345,7 @@ Prompts are identified by the `ID` or their `path`. The parameters (i.e. the pro
You can provide `version_name` and `version_description` to identify and describe your versions.
Version names must be unique within a Prompt - attempting to create a version with a name
that already exists will result in a 409 Conflict error.
+
@@ -1368,6 +1383,7 @@ client.prompts.upsert(
)
```
+
@@ -1382,7 +1398,7 @@ client.prompts.upsert(
**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
-
+
@@ -1390,7 +1406,7 @@ client.prompts.upsert(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -1398,7 +1414,7 @@ client.prompts.upsert(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -1406,22 +1422,22 @@ client.prompts.upsert(
**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
-
+
-
-**template:** `typing.Optional[PromptRequestTemplateParams]`
+**template:** `typing.Optional[PromptRequestTemplateParams]`
-The template contains the main structure and instructions for the model, including input variables for dynamic values.
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
-For completion models, provide a prompt template as a string.
+For completion models, provide a prompt template as a string.
Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
-
+
@@ -1429,7 +1445,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
-
+
@@ -1437,7 +1453,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
-
+
@@ -1445,7 +1461,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
-
+
@@ -1453,7 +1469,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
-
+
@@ -1461,7 +1477,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
-
+
@@ -1469,7 +1485,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**stop:** `typing.Optional[PromptRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
-
+
@@ -1477,7 +1493,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
-
+
@@ -1485,7 +1501,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
-
+
@@ -1493,7 +1509,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
-
+
@@ -1501,7 +1517,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
-
+
@@ -1509,7 +1525,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
-
+
@@ -1517,7 +1533,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
-
+
@@ -1525,7 +1541,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**tools:** `typing.Optional[typing.Sequence[ToolFunctionParams]]` — The tool specification that the model can choose to call if Tool calling is supported.
-
+
@@ -1533,7 +1549,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**linked_tools:** `typing.Optional[typing.Sequence[str]]` — The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called.
-
+
@@ -1541,7 +1557,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
-
+
@@ -1549,7 +1565,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Version names must be unique for a given Prompt.
-
+
@@ -1557,7 +1573,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
-
+
@@ -1565,7 +1581,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**description:** `typing.Optional[str]` — Description of the Prompt.
-
+
@@ -1573,7 +1589,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
-
+
@@ -1581,7 +1597,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**readme:** `typing.Optional[str]` — Long description of the Prompt.
-
+
@@ -1589,13 +1605,12 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1616,6 +1631,7 @@ Retrieve the Prompt with the given ID.
By default, the deployed version of the Prompt is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Prompt.
+
@@ -1640,6 +1656,7 @@ client.prompts.get(
)
```
+
@@ -1654,7 +1671,7 @@ client.prompts.get(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1662,7 +1679,7 @@ client.prompts.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
-
+
@@ -1670,7 +1687,7 @@ client.prompts.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -1678,13 +1695,12 @@ client.prompts.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1702,6 +1718,7 @@ client.prompts.get(
Delete the Prompt with the given ID.
+
@@ -1726,6 +1743,7 @@ client.prompts.delete(
)
```
+
@@ -1740,7 +1758,7 @@ client.prompts.delete(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1748,13 +1766,12 @@ client.prompts.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1772,6 +1789,7 @@ client.prompts.delete(
Move the Prompt to a different path or change the name.
+
@@ -1797,6 +1815,7 @@ client.prompts.move(
)
```
+
@@ -1811,7 +1830,7 @@ client.prompts.move(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1819,7 +1838,7 @@ client.prompts.move(
**path:** `typing.Optional[str]` — Path of the Prompt including the Prompt name, which is used as a unique identifier.
-
+
@@ -1827,7 +1846,7 @@ client.prompts.move(
**name:** `typing.Optional[str]` — Name of the Prompt.
-
+
@@ -1835,13 +1854,12 @@ client.prompts.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1862,6 +1880,7 @@ Retrieve the Prompt with the given ID, including the populated template.
By default, the deployed version of the Prompt is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Prompt.
+
@@ -1887,6 +1906,7 @@ client.prompts.populate(
)
```
+
@@ -1901,15 +1921,15 @@ client.prompts.populate(
**id:** `str` — Unique identifier for Prompt.
-
+
-
-**request:** `typing.Dict[str, typing.Optional[typing.Any]]`
-
+**request:** `typing.Dict[str, typing.Optional[typing.Any]]`
+
@@ -1917,7 +1937,7 @@ client.prompts.populate(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve to populate the template.
-
+
@@ -1925,7 +1945,7 @@ client.prompts.populate(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from to populate the template.
-
+
@@ -1933,13 +1953,12 @@ client.prompts.populate(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1957,6 +1976,7 @@ client.prompts.populate(
Get a list of all the versions of a Prompt.
+
@@ -1981,6 +2001,7 @@ client.prompts.list_versions(
)
```
+
@@ -1995,7 +2016,7 @@ client.prompts.list_versions(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2003,7 +2024,7 @@ client.prompts.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -2011,13 +2032,12 @@ client.prompts.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2035,6 +2055,7 @@ client.prompts.list_versions(
Delete a version of the Prompt.
+
@@ -2060,6 +2081,7 @@ client.prompts.delete_prompt_version(
)
```
+
@@ -2074,7 +2096,7 @@ client.prompts.delete_prompt_version(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2082,7 +2104,7 @@ client.prompts.delete_prompt_version(
**version_id:** `str` — Unique identifier for the specific version of the Prompt.
-
+
@@ -2090,13 +2112,12 @@ client.prompts.delete_prompt_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2114,6 +2135,7 @@ client.prompts.delete_prompt_version(
Update the name or description of the Prompt version.
+
@@ -2139,6 +2161,7 @@ client.prompts.patch_prompt_version(
)
```
+
@@ -2153,7 +2176,7 @@ client.prompts.patch_prompt_version(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2161,7 +2184,7 @@ client.prompts.patch_prompt_version(
**version_id:** `str` — Unique identifier for the specific version of the Prompt.
-
+
@@ -2169,7 +2192,7 @@ client.prompts.patch_prompt_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -2177,7 +2200,7 @@ client.prompts.patch_prompt_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -2185,13 +2208,12 @@ client.prompts.patch_prompt_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2212,6 +2234,7 @@ Deploy Prompt to an Environment.
Set the deployed version for the specified Environment. This Prompt
will be used for calls made to the Prompt in this Environment.
+
@@ -2238,6 +2261,7 @@ client.prompts.set_deployment(
)
```
+
@@ -2252,7 +2276,7 @@ client.prompts.set_deployment(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2260,7 +2284,7 @@ client.prompts.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -2268,7 +2292,7 @@ client.prompts.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Prompt.
-
+
@@ -2276,13 +2300,12 @@ client.prompts.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2303,6 +2326,7 @@ Remove deployed Prompt from the Environment.
Remove the deployed version for the specified Environment. This Prompt
will no longer be used for calls made to the Prompt in this Environment.
+
@@ -2328,6 +2352,7 @@ client.prompts.remove_deployment(
)
```
+
@@ -2342,7 +2367,7 @@ client.prompts.remove_deployment(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2350,7 +2375,7 @@ client.prompts.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -2358,13 +2383,12 @@ client.prompts.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2382,6 +2406,7 @@ client.prompts.remove_deployment(
List all Environments and their deployed versions for the Prompt.
+
@@ -2406,6 +2431,7 @@ client.prompts.list_environments(
)
```
+
@@ -2420,7 +2446,7 @@ client.prompts.list_environments(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2428,13 +2454,12 @@ client.prompts.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2455,6 +2480,7 @@ Activate and deactivate Evaluators for monitoring the Prompt.
An activated Evaluator will automatically be run on all new Logs
within the Prompt for monitoring purposes.
+
@@ -2480,6 +2506,7 @@ client.prompts.update_monitoring(
)
```
+
@@ -2493,8 +2520,8 @@ client.prompts.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -2504,7 +2531,7 @@ client.prompts.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -2514,7 +2541,7 @@ client.prompts.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -2522,13 +2549,12 @@ client.prompts.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2552,6 +2578,7 @@ or for editing with an AI tool.
By default, the deployed version of the Prompt is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Prompt.
+
@@ -2576,6 +2603,7 @@ client.prompts.serialize(
)
```
+
@@ -2590,7 +2618,7 @@ client.prompts.serialize(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2598,7 +2626,7 @@ client.prompts.serialize(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
-
+
@@ -2606,7 +2634,7 @@ client.prompts.serialize(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -2614,13 +2642,12 @@ client.prompts.serialize(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2641,6 +2668,7 @@ Deserialize a Prompt from the .prompt file format.
This returns a subset of the attributes required by a Prompt.
This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
@@ -2665,6 +2693,7 @@ client.prompts.deserialize(
)
```
+
@@ -2678,8 +2707,8 @@ client.prompts.deserialize(
-
-**prompt:** `str`
-
+**prompt:** `str`
+
@@ -2687,18 +2716,18 @@ client.prompts.deserialize(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Tools
+
client.tools.call(...)
-
@@ -2722,6 +2751,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Tool details in the request body. In this case, we will check if the details correspond
to an existing version of the Tool. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Tool details in code.
+
@@ -2744,6 +2774,7 @@ client = Humanloop(
client.tools.call()
```
+
@@ -2758,7 +2789,7 @@ client.tools.call()
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
-
+
@@ -2766,7 +2797,7 @@ client.tools.call()
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
-
+
@@ -2774,7 +2805,7 @@ client.tools.call()
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -2782,7 +2813,7 @@ client.tools.call()
**id:** `typing.Optional[str]` — ID for an existing Tool.
-
+
@@ -2790,7 +2821,7 @@ client.tools.call()
**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
-
+
@@ -2798,7 +2829,7 @@ client.tools.call()
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -2806,7 +2837,7 @@ client.tools.call()
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -2814,7 +2845,7 @@ client.tools.call()
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -2822,7 +2853,7 @@ client.tools.call()
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -2830,7 +2861,7 @@ client.tools.call()
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -2838,7 +2869,7 @@ client.tools.call()
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -2846,7 +2877,7 @@ client.tools.call()
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -2854,7 +2885,7 @@ client.tools.call()
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -2862,7 +2893,7 @@ client.tools.call()
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -2870,7 +2901,7 @@ client.tools.call()
**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -2878,7 +2909,7 @@ client.tools.call()
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -2886,7 +2917,7 @@ client.tools.call()
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -2894,13 +2925,12 @@ client.tools.call()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2926,6 +2956,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Tool details in the request body. In this case, we will check if the details correspond
to an existing version of the Tool, if not we will create a new version. This is helpful
in the case where you are storing or deriving your Tool details in code.
+
@@ -2966,6 +2997,7 @@ client.tools.log(
)
```
+
@@ -2980,7 +3012,7 @@ client.tools.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
-
+
@@ -2988,7 +3020,7 @@ client.tools.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -2996,7 +3028,7 @@ client.tools.log(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -3004,7 +3036,7 @@ client.tools.log(
**id:** `typing.Optional[str]` — ID for an existing Tool.
-
+
@@ -3012,7 +3044,7 @@ client.tools.log(
**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
-
+
@@ -3020,7 +3052,7 @@ client.tools.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -3028,7 +3060,7 @@ client.tools.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -3036,15 +3068,15 @@ client.tools.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -3052,7 +3084,7 @@ client.tools.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -3060,7 +3092,7 @@ client.tools.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -3068,7 +3100,7 @@ client.tools.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -3076,7 +3108,7 @@ client.tools.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -3084,7 +3116,7 @@ client.tools.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -3092,7 +3124,7 @@ client.tools.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -3100,7 +3132,7 @@ client.tools.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -3108,7 +3140,7 @@ client.tools.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -3116,7 +3148,7 @@ client.tools.log(
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -3124,7 +3156,7 @@ client.tools.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -3132,7 +3164,7 @@ client.tools.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -3140,7 +3172,7 @@ client.tools.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -3148,7 +3180,7 @@ client.tools.log(
**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -3156,7 +3188,7 @@ client.tools.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -3164,7 +3196,7 @@ client.tools.log(
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -3172,13 +3204,12 @@ client.tools.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3198,6 +3229,7 @@ client.tools.log(
Update a Log.
Update the details of a Log with the given ID.
+
@@ -3223,6 +3255,7 @@ client.tools.update(
)
```
+
@@ -3237,7 +3270,7 @@ client.tools.update(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -3245,7 +3278,7 @@ client.tools.update(
**log_id:** `str` — Unique identifier for the Log.
-
+
@@ -3253,15 +3286,15 @@ client.tools.update(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -3269,7 +3302,7 @@ client.tools.update(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -3277,7 +3310,7 @@ client.tools.update(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -3285,7 +3318,7 @@ client.tools.update(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -3293,7 +3326,7 @@ client.tools.update(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -3301,7 +3334,7 @@ client.tools.update(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -3309,7 +3342,7 @@ client.tools.update(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -3317,7 +3350,7 @@ client.tools.update(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -3325,7 +3358,7 @@ client.tools.update(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -3333,7 +3366,7 @@ client.tools.update(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -3341,7 +3374,7 @@ client.tools.update(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -3349,7 +3382,7 @@ client.tools.update(
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -3357,13 +3390,12 @@ client.tools.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3381,6 +3413,7 @@ client.tools.update(
Get a list of all Tools.
+
@@ -3410,6 +3443,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -3424,7 +3458,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -3432,7 +3466,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
+
@@ -3440,7 +3474,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
-
+
@@ -3448,7 +3482,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
-
+
@@ -3456,7 +3490,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Tools by
-
+
@@ -3464,7 +3498,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -3472,13 +3506,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3502,6 +3535,7 @@ Tools are identified by the `ID` or their `path`. The name, description and para
You can provide `version_name` and `version_description` to identify and describe your versions.
Version names must be unique within a Tool - attempting to create a version with a name
that already exists will result in a 409 Conflict error.
+
@@ -3537,6 +3571,7 @@ client.tools.upsert(
)
```
+
@@ -3551,7 +3586,7 @@ client.tools.upsert(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -3559,7 +3594,7 @@ client.tools.upsert(
**id:** `typing.Optional[str]` — ID for an existing Tool.
-
+
@@ -3567,7 +3602,7 @@ client.tools.upsert(
**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
-
+
@@ -3575,7 +3610,7 @@ client.tools.upsert(
**source_code:** `typing.Optional[str]` — Code source of the Tool.
-
+
@@ -3583,7 +3618,7 @@ client.tools.upsert(
**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
-
+
@@ -3591,7 +3626,7 @@ client.tools.upsert(
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
-
+
@@ -3599,7 +3634,7 @@ client.tools.upsert(
**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
-
+
@@ -3607,7 +3642,7 @@ client.tools.upsert(
**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
-
+
@@ -3615,7 +3650,7 @@ client.tools.upsert(
**version_description:** `typing.Optional[str]` — Description of the Version.
-
+
@@ -3623,13 +3658,12 @@ client.tools.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3650,6 +3684,7 @@ Retrieve the Tool with the given ID.
By default, the deployed version of the Tool is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Tool.
+
@@ -3674,6 +3709,7 @@ client.tools.get(
)
```
+
@@ -3688,7 +3724,7 @@ client.tools.get(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3696,7 +3732,7 @@ client.tools.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
-
+
@@ -3704,7 +3740,7 @@ client.tools.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -3712,13 +3748,12 @@ client.tools.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3736,6 +3771,7 @@ client.tools.get(
Delete the Tool with the given ID.
+
@@ -3760,6 +3796,7 @@ client.tools.delete(
)
```
+
@@ -3774,7 +3811,7 @@ client.tools.delete(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3782,13 +3819,12 @@ client.tools.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3806,6 +3842,7 @@ client.tools.delete(
Move the Tool to a different path or change the name.
+
@@ -3831,6 +3868,7 @@ client.tools.move(
)
```
+
@@ -3845,7 +3883,7 @@ client.tools.move(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3853,7 +3891,7 @@ client.tools.move(
**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
-
+
@@ -3861,7 +3899,7 @@ client.tools.move(
**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
-
+
@@ -3869,13 +3907,12 @@ client.tools.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3893,6 +3930,7 @@ client.tools.move(
Get a list of all the versions of a Tool.
+
@@ -3917,6 +3955,7 @@ client.tools.list_versions(
)
```
+
@@ -3931,7 +3970,7 @@ client.tools.list_versions(
**id:** `str` — Unique identifier for the Tool.
-
+
@@ -3939,7 +3978,7 @@ client.tools.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -3947,13 +3986,12 @@ client.tools.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3971,6 +4009,7 @@ client.tools.list_versions(
Delete a version of the Tool.
+
@@ -3996,6 +4035,7 @@ client.tools.delete_tool_version(
)
```
+
@@ -4010,7 +4050,7 @@ client.tools.delete_tool_version(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -4018,7 +4058,7 @@ client.tools.delete_tool_version(
**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
+
@@ -4026,13 +4066,12 @@ client.tools.delete_tool_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4050,6 +4089,7 @@ client.tools.delete_tool_version(
Update the name or description of the Tool version.
+
@@ -4075,6 +4115,7 @@ client.tools.update_tool_version(
)
```
+
@@ -4089,7 +4130,7 @@ client.tools.update_tool_version(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -4097,7 +4138,7 @@ client.tools.update_tool_version(
**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
+
@@ -4105,7 +4146,7 @@ client.tools.update_tool_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -4113,7 +4154,7 @@ client.tools.update_tool_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -4121,13 +4162,12 @@ client.tools.update_tool_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4148,6 +4188,7 @@ Deploy Tool to an Environment.
Set the deployed version for the specified Environment. This Prompt
will be used for calls made to the Tool in this Environment.
+
@@ -4174,6 +4215,7 @@ client.tools.set_deployment(
)
```
+
@@ -4188,7 +4230,7 @@ client.tools.set_deployment(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -4196,7 +4238,7 @@ client.tools.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -4204,7 +4246,7 @@ client.tools.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
+
@@ -4212,13 +4254,12 @@ client.tools.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4239,6 +4280,7 @@ Remove deployed Tool from the Environment.
Remove the deployed version for the specified Environment. This Tool
will no longer be used for calls made to the Tool in this Environment.
+
@@ -4264,6 +4306,7 @@ client.tools.remove_deployment(
)
```
+
@@ -4278,7 +4321,7 @@ client.tools.remove_deployment(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -4286,7 +4329,7 @@ client.tools.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -4294,13 +4337,12 @@ client.tools.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4318,6 +4360,7 @@ client.tools.remove_deployment(
List all Environments and their deployed versions for the Tool.
+
@@ -4342,6 +4385,7 @@ client.tools.list_environments(
)
```
+
@@ -4356,7 +4400,7 @@ client.tools.list_environments(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -4364,13 +4408,12 @@ client.tools.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4391,6 +4434,7 @@ Activate and deactivate Evaluators for monitoring the Tool.
An activated Evaluator will automatically be run on all new Logs
within the Tool for monitoring purposes.
+
@@ -4416,6 +4460,7 @@ client.tools.update_monitoring(
)
```
+
@@ -4429,8 +4474,8 @@ client.tools.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -4440,7 +4485,7 @@ client.tools.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -4450,7 +4495,7 @@ client.tools.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -4458,13 +4503,12 @@ client.tools.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4492,6 +4536,7 @@ client.tools.get_environment_variables(
)
```
+
@@ -4506,7 +4551,7 @@ client.tools.get_environment_variables(
**id:** `str` — Unique identifier for File.
-
+
@@ -4514,13 +4559,12 @@ client.tools.get_environment_variables(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4538,6 +4582,7 @@ client.tools.get_environment_variables(
Add an environment variable to a Tool.
+
@@ -4563,6 +4608,7 @@ client.tools.add_environment_variable(
)
```
+
@@ -4577,15 +4623,15 @@ client.tools.add_environment_variable(
**id:** `str` — Unique identifier for Tool.
-
+
-
-**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
-
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
+
@@ -4593,13 +4639,12 @@ client.tools.add_environment_variable(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4628,6 +4673,7 @@ client.tools.delete_environment_variable(
)
```
+
@@ -4642,7 +4688,7 @@ client.tools.delete_environment_variable(
**id:** `str` — Unique identifier for File.
-
+
@@ -4650,7 +4696,7 @@ client.tools.delete_environment_variable(
**name:** `str` — Name of the Environment Variable to delete.
-
+
@@ -4658,18 +4704,18 @@ client.tools.delete_environment_variable(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Datasets
+
client.datasets.list(...)
-
@@ -4683,6 +4729,7 @@ client.tools.delete_environment_variable(
-
List all Datasets.
+
@@ -4712,6 +4759,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -4726,7 +4774,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -4734,7 +4782,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
-
+
@@ -4742,7 +4790,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
-
+
@@ -4750,7 +4798,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
-
+
@@ -4758,7 +4806,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Datasets by
-
+
@@ -4766,7 +4814,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -4774,13 +4822,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4814,6 +4861,7 @@ that already exists will result in a 409 Conflict error.
Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
+
@@ -4864,6 +4912,7 @@ client.datasets.upsert(
)
```
+
@@ -4878,7 +4927,7 @@ client.datasets.upsert(
**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
-
+
@@ -4886,7 +4935,7 @@ client.datasets.upsert(
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
+
@@ -4894,7 +4943,7 @@ client.datasets.upsert(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
+
@@ -4902,7 +4951,7 @@ client.datasets.upsert(
**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
@@ -4910,7 +4959,7 @@ client.datasets.upsert(
**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -4918,23 +4967,23 @@ client.datasets.upsert(
**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+**action:** `typing.Optional[UpdateDatesetAction]`
The action to take with the provided Datapoints.
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+- If `"set"`, the created version will only contain the Datapoints provided in this request.
+- If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+- If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+
@@ -4942,7 +4991,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+
@@ -4950,7 +4999,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
-
+
@@ -4958,7 +5007,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
-
+
@@ -4966,13 +5015,12 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4998,6 +5046,7 @@ retrieve Datapoints for a large Dataset.
By default, the deployed version of the Dataset is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Dataset.
+
@@ -5024,6 +5073,7 @@ client.datasets.get(
)
```
+
@@ -5038,7 +5088,7 @@ client.datasets.get(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5046,7 +5096,7 @@ client.datasets.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
+
@@ -5054,7 +5104,7 @@ client.datasets.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -5062,7 +5112,7 @@ client.datasets.get(
**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
@@ -5070,13 +5120,12 @@ client.datasets.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5094,6 +5143,7 @@ client.datasets.get(
Delete the Dataset with the given ID.
+
@@ -5118,6 +5168,7 @@ client.datasets.delete(
)
```
+
@@ -5132,7 +5183,7 @@ client.datasets.delete(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5140,13 +5191,12 @@ client.datasets.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5164,6 +5214,7 @@ client.datasets.delete(
Move the Dataset to a different path or change the name.
+
@@ -5188,6 +5239,7 @@ client.datasets.move(
)
```
+
@@ -5202,7 +5254,7 @@ client.datasets.move(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5210,7 +5262,7 @@ client.datasets.move(
**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
-
+
@@ -5218,7 +5270,7 @@ client.datasets.move(
**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
-
+
@@ -5226,13 +5278,12 @@ client.datasets.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5250,6 +5301,7 @@ client.datasets.move(
List all Datapoints for the Dataset with the given ID.
+
@@ -5280,6 +5332,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -5294,7 +5347,7 @@ for page in response.iter_pages():
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5302,7 +5355,7 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
+
@@ -5310,7 +5363,7 @@ for page in response.iter_pages():
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -5318,7 +5371,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -5326,7 +5379,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
-
+
@@ -5334,13 +5387,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5358,6 +5410,7 @@ for page in response.iter_pages():
Get a list of the versions for a Dataset.
+
@@ -5382,6 +5435,7 @@ client.datasets.list_versions(
)
```
+
@@ -5396,7 +5450,7 @@ client.datasets.list_versions(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5404,7 +5458,7 @@ client.datasets.list_versions(
**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
-
+
@@ -5412,13 +5466,12 @@ client.datasets.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5436,6 +5489,7 @@ client.datasets.list_versions(
Delete a version of the Dataset.
+
@@ -5461,6 +5515,7 @@ client.datasets.delete_dataset_version(
)
```
+
@@ -5475,7 +5530,7 @@ client.datasets.delete_dataset_version(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5483,7 +5538,7 @@ client.datasets.delete_dataset_version(
**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
+
@@ -5491,13 +5546,12 @@ client.datasets.delete_dataset_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5515,6 +5569,7 @@ client.datasets.delete_dataset_version(
Update the name or description of the Dataset version.
+
@@ -5540,6 +5595,7 @@ client.datasets.update_dataset_version(
)
```
+
@@ -5554,7 +5610,7 @@ client.datasets.update_dataset_version(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5562,7 +5618,7 @@ client.datasets.update_dataset_version(
**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
+
@@ -5570,7 +5626,7 @@ client.datasets.update_dataset_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -5578,7 +5634,7 @@ client.datasets.update_dataset_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -5586,13 +5642,12 @@ client.datasets.update_dataset_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5620,6 +5675,7 @@ of the Dataset that is deployed to the default Environment.
You can optionally provide a name and description for the new version using `version_name`
and `version_description` parameters.
+
@@ -5644,6 +5700,7 @@ client.datasets.upload_csv(
)
```
+
@@ -5658,17 +5715,17 @@ client.datasets.upload_csv(
**id:** `str` — Unique identifier for the Dataset
-
+
-
-**file:** `from __future__ import annotations
+**file:** `from **future** import annotations
core.File` — See core.File for more documentation
-
+
@@ -5676,7 +5733,7 @@ core.File` — See core.File for more documentation
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
-
+
@@ -5684,7 +5741,7 @@ core.File` — See core.File for more documentation
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
-
+
@@ -5692,7 +5749,7 @@ core.File` — See core.File for more documentation
**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
-
+
@@ -5700,7 +5757,7 @@ core.File` — See core.File for more documentation
**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
-
+
@@ -5708,13 +5765,12 @@ core.File` — See core.File for more documentation
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5734,6 +5790,7 @@ core.File` — See core.File for more documentation
Deploy Dataset to Environment.
Set the deployed version for the specified Environment.
+
@@ -5760,6 +5817,7 @@ client.datasets.set_deployment(
)
```
+
@@ -5774,7 +5832,7 @@ client.datasets.set_deployment(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5782,7 +5840,7 @@ client.datasets.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -5790,7 +5848,7 @@ client.datasets.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
+
@@ -5798,13 +5856,12 @@ client.datasets.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5824,6 +5881,7 @@ client.datasets.set_deployment(
Remove deployed Dataset from Environment.
Remove the deployed version for the specified Environment.
+
@@ -5849,6 +5907,7 @@ client.datasets.remove_deployment(
)
```
+
@@ -5863,7 +5922,7 @@ client.datasets.remove_deployment(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5871,7 +5930,7 @@ client.datasets.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -5879,13 +5938,12 @@ client.datasets.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5903,6 +5961,7 @@ client.datasets.remove_deployment(
List all Environments and their deployed versions for the Dataset.
+
@@ -5927,6 +5986,7 @@ client.datasets.list_environments(
)
```
+
@@ -5941,7 +6001,7 @@ client.datasets.list_environments(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -5949,18 +6009,18 @@ client.datasets.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Evaluators
+
client.evaluators.log(...)
-
@@ -5976,6 +6036,7 @@ client.datasets.list_environments(
Submit Evaluator judgment for an existing Log.
Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+
@@ -6000,6 +6061,7 @@ client.evaluators.log(
)
```
+
@@ -6014,7 +6076,7 @@ client.evaluators.log(
**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
-
+
@@ -6022,7 +6084,7 @@ client.evaluators.log(
**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
-
+
@@ -6030,7 +6092,7 @@ client.evaluators.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -6038,7 +6100,7 @@ client.evaluators.log(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -6046,7 +6108,7 @@ client.evaluators.log(
**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
@@ -6054,7 +6116,7 @@ client.evaluators.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -6062,7 +6124,7 @@ client.evaluators.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -6070,15 +6132,15 @@ client.evaluators.log(
**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -6086,7 +6148,7 @@ client.evaluators.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -6094,7 +6156,7 @@ client.evaluators.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -6102,7 +6164,7 @@ client.evaluators.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -6110,7 +6172,7 @@ client.evaluators.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+
@@ -6118,7 +6180,7 @@ client.evaluators.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
+
@@ -6126,7 +6188,7 @@ client.evaluators.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -6134,7 +6196,7 @@ client.evaluators.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -6142,7 +6204,7 @@ client.evaluators.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -6150,7 +6212,7 @@ client.evaluators.log(
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -6158,7 +6220,7 @@ client.evaluators.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -6166,7 +6228,7 @@ client.evaluators.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -6174,7 +6236,7 @@ client.evaluators.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -6182,7 +6244,7 @@ client.evaluators.log(
**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -6190,7 +6252,7 @@ client.evaluators.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -6198,7 +6260,7 @@ client.evaluators.log(
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -6206,7 +6268,7 @@ client.evaluators.log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
-
+
@@ -6214,7 +6276,7 @@ client.evaluators.log(
**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
-
+
@@ -6222,15 +6284,15 @@ client.evaluators.log(
**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
-
+
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
-
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+
@@ -6238,13 +6300,12 @@ client.evaluators.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6262,6 +6323,7 @@ client.evaluators.log(
Get a list of all Evaluators.
+
@@ -6291,6 +6353,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -6305,7 +6368,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -6313,7 +6376,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
-
+
@@ -6321,7 +6384,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
-
+
@@ -6329,7 +6392,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
-
+
@@ -6337,7 +6400,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Evaluators by
-
+
@@ -6345,7 +6408,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -6353,13 +6416,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6383,6 +6445,7 @@ Evaluators are identified by the `ID` or their `path`. The spec provided determi
You can provide `version_name` and `version_description` to identify and describe your versions.
Version names must be unique within an Evaluator - attempting to create a version with a name
that already exists will result in a 409 Conflict error.
+
@@ -6415,6 +6478,7 @@ client.evaluators.upsert(
)
```
+
@@ -6428,8 +6492,8 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
+**spec:** `EvaluatorRequestSpecParams`
+
@@ -6437,7 +6501,7 @@ client.evaluators.upsert(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -6445,7 +6509,7 @@ client.evaluators.upsert(
**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
@@ -6453,7 +6517,7 @@ client.evaluators.upsert(
**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
-
+
@@ -6461,7 +6525,7 @@ client.evaluators.upsert(
**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
-
+
@@ -6469,13 +6533,12 @@ client.evaluators.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6496,6 +6559,7 @@ Retrieve the Evaluator with the given ID.
By default, the deployed version of the Evaluator is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Evaluator.
+
@@ -6520,6 +6584,7 @@ client.evaluators.get(
)
```
+
@@ -6534,7 +6599,7 @@ client.evaluators.get(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6542,7 +6607,7 @@ client.evaluators.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
-
+
@@ -6550,7 +6615,7 @@ client.evaluators.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -6558,13 +6623,12 @@ client.evaluators.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6582,6 +6646,7 @@ client.evaluators.get(
Delete the Evaluator with the given ID.
+
@@ -6606,6 +6671,7 @@ client.evaluators.delete(
)
```
+
@@ -6620,7 +6686,7 @@ client.evaluators.delete(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6628,13 +6694,12 @@ client.evaluators.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6652,6 +6717,7 @@ client.evaluators.delete(
Move the Evaluator to a different path or change the name.
+
@@ -6677,6 +6743,7 @@ client.evaluators.move(
)
```
+
@@ -6691,7 +6758,7 @@ client.evaluators.move(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6699,7 +6766,7 @@ client.evaluators.move(
**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
-
+
@@ -6707,7 +6774,7 @@ client.evaluators.move(
**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
-
+
@@ -6715,13 +6782,12 @@ client.evaluators.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6739,6 +6805,7 @@ client.evaluators.move(
Get a list of all the versions of an Evaluator.
+
@@ -6763,6 +6830,7 @@ client.evaluators.list_versions(
)
```
+
@@ -6777,7 +6845,7 @@ client.evaluators.list_versions(
**id:** `str` — Unique identifier for the Evaluator.
-
+
@@ -6785,7 +6853,7 @@ client.evaluators.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -6793,13 +6861,12 @@ client.evaluators.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6817,6 +6884,7 @@ client.evaluators.list_versions(
Delete a version of the Evaluator.
+
@@ -6842,6 +6910,7 @@ client.evaluators.delete_evaluator_version(
)
```
+
@@ -6856,7 +6925,7 @@ client.evaluators.delete_evaluator_version(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6864,7 +6933,7 @@ client.evaluators.delete_evaluator_version(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
-
+
@@ -6872,13 +6941,12 @@ client.evaluators.delete_evaluator_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6896,6 +6964,7 @@ client.evaluators.delete_evaluator_version(
Update the name or description of the Evaluator version.
+
@@ -6921,6 +6990,7 @@ client.evaluators.update_evaluator_version(
)
```
+
@@ -6935,7 +7005,7 @@ client.evaluators.update_evaluator_version(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6943,7 +7013,7 @@ client.evaluators.update_evaluator_version(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
-
+
@@ -6951,7 +7021,7 @@ client.evaluators.update_evaluator_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -6959,7 +7029,7 @@ client.evaluators.update_evaluator_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -6967,13 +7037,12 @@ client.evaluators.update_evaluator_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6994,6 +7063,7 @@ Deploy Evaluator to an Environment.
Set the deployed version for the specified Environment. This Evaluator
will be used for calls made to the Evaluator in this Environment.
+
@@ -7020,6 +7090,7 @@ client.evaluators.set_deployment(
)
```
+
@@ -7034,7 +7105,7 @@ client.evaluators.set_deployment(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -7042,7 +7113,7 @@ client.evaluators.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -7050,7 +7121,7 @@ client.evaluators.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
-
+
@@ -7058,13 +7129,12 @@ client.evaluators.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7085,6 +7155,7 @@ Remove deployed Evaluator from the Environment.
Remove the deployed version for the specified Environment. This Evaluator
will no longer be used for calls made to the Evaluator in this Environment.
+
@@ -7110,6 +7181,7 @@ client.evaluators.remove_deployment(
)
```
+
@@ -7124,7 +7196,7 @@ client.evaluators.remove_deployment(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -7132,7 +7204,7 @@ client.evaluators.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -7140,13 +7212,12 @@ client.evaluators.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7164,6 +7235,7 @@ client.evaluators.remove_deployment(
List all Environments and their deployed versions for the Evaluator.
+
@@ -7188,6 +7260,7 @@ client.evaluators.list_environments(
)
```
+
@@ -7202,7 +7275,7 @@ client.evaluators.list_environments(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -7210,13 +7283,12 @@ client.evaluators.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7237,6 +7309,7 @@ Activate and deactivate Evaluators for monitoring the Evaluator.
An activated Evaluator will automatically be run on all new Logs
within the Evaluator for monitoring purposes.
+
@@ -7261,6 +7334,7 @@ client.evaluators.update_monitoring(
)
```
+
@@ -7274,8 +7348,8 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -7285,7 +7359,7 @@ client.evaluators.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -7295,7 +7369,7 @@ client.evaluators.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -7303,18 +7377,18 @@ client.evaluators.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Flows
+
client.flows.log(...)
-
@@ -7334,6 +7408,7 @@ an existing version of the Flow. Otherwise, the default deployed version will be
If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
in order to trigger Evaluators.
+
@@ -7385,6 +7460,7 @@ client.flows.log(
)
```
+
@@ -7399,7 +7475,7 @@ client.flows.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
-
+
@@ -7407,7 +7483,7 @@ client.flows.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -7415,7 +7491,7 @@ client.flows.log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
-
+
@@ -7423,7 +7499,7 @@ client.flows.log(
**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
+
@@ -7431,7 +7507,7 @@ client.flows.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
@@ -7439,7 +7515,7 @@ client.flows.log(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -7447,7 +7523,7 @@ client.flows.log(
**id:** `typing.Optional[str]` — ID for an existing Flow.
-
+
@@ -7455,7 +7531,7 @@ client.flows.log(
**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
-
+
@@ -7463,7 +7539,7 @@ client.flows.log(
**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
-
+
@@ -7471,15 +7547,15 @@ client.flows.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -7487,7 +7563,7 @@ client.flows.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -7495,7 +7571,7 @@ client.flows.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -7503,7 +7579,7 @@ client.flows.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -7511,7 +7587,7 @@ client.flows.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -7519,7 +7595,7 @@ client.flows.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -7527,7 +7603,7 @@ client.flows.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -7535,7 +7611,7 @@ client.flows.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -7543,7 +7619,7 @@ client.flows.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -7551,7 +7627,7 @@ client.flows.log(
**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
-
+
@@ -7559,7 +7635,7 @@ client.flows.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -7567,7 +7643,7 @@ client.flows.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -7575,7 +7651,7 @@ client.flows.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -7583,7 +7659,7 @@ client.flows.log(
**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -7591,7 +7667,7 @@ client.flows.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -7599,7 +7675,7 @@ client.flows.log(
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -7607,7 +7683,7 @@ client.flows.log(
**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
-
+
@@ -7615,13 +7691,12 @@ client.flows.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7644,6 +7719,7 @@ Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
Inputs and output (or error) must be provided in order to mark it as complete.
The end_time log attribute will be set to match the time the log is marked as complete.
+
@@ -7673,6 +7749,7 @@ client.flows.update_log(
)
```
+
@@ -7687,7 +7764,7 @@ client.flows.update_log(
**log_id:** `str` — Unique identifier of the Flow Log.
-
+
@@ -7695,7 +7772,7 @@ client.flows.update_log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
-
+
@@ -7703,7 +7780,7 @@ client.flows.update_log(
**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
+
@@ -7711,7 +7788,7 @@ client.flows.update_log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
-
+
@@ -7719,7 +7796,7 @@ client.flows.update_log(
**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
-
+
@@ -7727,7 +7804,7 @@ client.flows.update_log(
**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
-
+
@@ -7735,7 +7812,7 @@ client.flows.update_log(
**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
-
+
@@ -7743,13 +7820,12 @@ client.flows.update_log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7770,6 +7846,7 @@ Retrieve the Flow with the given ID.
By default, the deployed version of the Flow is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Flow.
+
@@ -7794,6 +7871,7 @@ client.flows.get(
)
```
+
@@ -7808,7 +7886,7 @@ client.flows.get(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7816,7 +7894,7 @@ client.flows.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
-
+
@@ -7824,7 +7902,7 @@ client.flows.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -7832,13 +7910,12 @@ client.flows.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7856,6 +7933,7 @@ client.flows.get(
Delete the Flow with the given ID.
+
@@ -7880,6 +7958,7 @@ client.flows.delete(
)
```
+
@@ -7894,7 +7973,7 @@ client.flows.delete(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7902,13 +7981,12 @@ client.flows.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7926,6 +8004,7 @@ client.flows.delete(
Move the Flow to a different path or change the name.
+
@@ -7951,6 +8030,7 @@ client.flows.move(
)
```
+
@@ -7965,7 +8045,7 @@ client.flows.move(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7973,7 +8053,7 @@ client.flows.move(
**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
-
+
@@ -7981,7 +8061,7 @@ client.flows.move(
**name:** `typing.Optional[str]` — Name of the Flow.
-
+
@@ -7989,7 +8069,7 @@ client.flows.move(
**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
-
+
@@ -7997,13 +8077,12 @@ client.flows.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8021,6 +8100,7 @@ client.flows.move(
Get a list of Flows.
+
@@ -8050,6 +8130,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -8064,7 +8145,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -8072,7 +8153,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
+
@@ -8080,7 +8161,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
+
@@ -8088,7 +8169,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
+
@@ -8096,7 +8177,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Flows by
-
+
@@ -8104,7 +8185,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -8112,13 +8193,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8142,6 +8222,7 @@ Flows can also be identified by the `ID` or their `path`.
You can provide `version_name` and `version_description` to identify and describe your versions.
Version names must be unique within a Flow - attempting to create a version with a name
that already exists will result in a 409 Conflict error.
+
@@ -8180,6 +8261,7 @@ client.flows.upsert(
)
```
+
@@ -8194,7 +8276,7 @@ client.flows.upsert(
**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
+
@@ -8202,7 +8284,7 @@ client.flows.upsert(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -8210,7 +8292,7 @@ client.flows.upsert(
**id:** `typing.Optional[str]` — ID for an existing Flow.
-
+
@@ -8218,7 +8300,7 @@ client.flows.upsert(
**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
-
+
@@ -8226,7 +8308,7 @@ client.flows.upsert(
**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
-
+
@@ -8234,13 +8316,12 @@ client.flows.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8258,6 +8339,7 @@ client.flows.upsert(
Get a list of all the versions of a Flow.
+
@@ -8282,6 +8364,7 @@ client.flows.list_versions(
)
```
+
@@ -8296,7 +8379,7 @@ client.flows.list_versions(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8304,7 +8387,7 @@ client.flows.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -8312,13 +8395,12 @@ client.flows.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8336,6 +8418,7 @@ client.flows.list_versions(
Delete a version of the Flow.
+
@@ -8361,6 +8444,7 @@ client.flows.delete_flow_version(
)
```
+
@@ -8375,7 +8459,7 @@ client.flows.delete_flow_version(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8383,7 +8467,7 @@ client.flows.delete_flow_version(
**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
+
@@ -8391,13 +8475,12 @@ client.flows.delete_flow_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8415,6 +8498,7 @@ client.flows.delete_flow_version(
Update the name or description of the Flow version.
+
@@ -8440,6 +8524,7 @@ client.flows.update_flow_version(
)
```
+
@@ -8454,7 +8539,7 @@ client.flows.update_flow_version(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8462,7 +8547,7 @@ client.flows.update_flow_version(
**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
+
@@ -8470,7 +8555,7 @@ client.flows.update_flow_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -8478,7 +8563,7 @@ client.flows.update_flow_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -8486,13 +8571,12 @@ client.flows.update_flow_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8513,6 +8597,7 @@ Deploy Flow to an Environment.
Set the deployed version for the specified Environment. This Flow
will be used for calls made to the Flow in this Environment.
+
@@ -8539,6 +8624,7 @@ client.flows.set_deployment(
)
```
+
@@ -8553,7 +8639,7 @@ client.flows.set_deployment(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8561,7 +8647,7 @@ client.flows.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -8569,7 +8655,7 @@ client.flows.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
+
@@ -8577,13 +8663,12 @@ client.flows.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8604,6 +8689,7 @@ Remove deployed Flow from the Environment.
Remove the deployed version for the specified Environment. This Flow
will no longer be used for calls made to the Flow in this Environment.
+
@@ -8629,6 +8715,7 @@ client.flows.remove_deployment(
)
```
+
@@ -8643,7 +8730,7 @@ client.flows.remove_deployment(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8651,7 +8738,7 @@ client.flows.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -8659,13 +8746,12 @@ client.flows.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8683,6 +8769,7 @@ client.flows.remove_deployment(
List all Environments and their deployed versions for the Flow.
+
@@ -8707,6 +8794,7 @@ client.flows.list_environments(
)
```
+
@@ -8721,7 +8809,7 @@ client.flows.list_environments(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -8729,13 +8817,12 @@ client.flows.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8756,6 +8843,7 @@ Activate and deactivate Evaluators for monitoring the Flow.
An activated Evaluator will automatically be run on all new "completed" Logs
within the Flow for monitoring purposes.
+
@@ -8781,6 +8869,7 @@ client.flows.update_monitoring(
)
```
+
@@ -8794,8 +8883,8 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -8805,7 +8894,7 @@ client.flows.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -8815,7 +8904,7 @@ client.flows.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -8823,18 +8912,18 @@ client.flows.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Agents
+
client.agents.log(...)
-
@@ -8854,6 +8943,7 @@ an existing version of the Agent. Otherwise, the default deployed version will b
If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
in order to trigger Evaluators.
+
@@ -8876,6 +8966,7 @@ client = Humanloop(
client.agents.log()
```
+
@@ -8890,7 +8981,7 @@ client.agents.log()
**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
-
+
@@ -8898,7 +8989,7 @@ client.agents.log()
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -8906,7 +8997,7 @@ client.agents.log()
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
@@ -8914,7 +9005,7 @@ client.agents.log()
**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -8922,7 +9013,7 @@ client.agents.log()
**id:** `typing.Optional[str]` — ID for an existing Agent.
-
+
@@ -8930,7 +9021,7 @@ client.agents.log()
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
-
+
@@ -8938,7 +9029,7 @@ client.agents.log()
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
-
+
@@ -8946,7 +9037,7 @@ client.agents.log()
**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
-
+
@@ -8954,7 +9045,7 @@ client.agents.log()
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
-
+
@@ -8962,7 +9053,7 @@ client.agents.log()
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
-
+
@@ -8970,7 +9061,7 @@ client.agents.log()
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
-
+
@@ -8978,7 +9069,7 @@ client.agents.log()
**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
-
+
@@ -8986,34 +9077,36 @@ client.agents.log()
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
The Agent configuration to use. Two formats are supported:
+
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-A new Agent version will be created if the provided details do not match any existing version.
-
+ A new Agent version will be created if the provided details do not match any existing version.
+
@@ -9021,7 +9114,7 @@ A new Agent version will be created if the provided details do not match any exi
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -9029,7 +9122,7 @@ A new Agent version will be created if the provided details do not match any exi
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -9037,15 +9130,15 @@ A new Agent version will be created if the provided details do not match any exi
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -9053,7 +9146,7 @@ A new Agent version will be created if the provided details do not match any exi
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -9061,7 +9154,7 @@ A new Agent version will be created if the provided details do not match any exi
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -9069,7 +9162,7 @@ A new Agent version will be created if the provided details do not match any exi
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -9077,7 +9170,7 @@ A new Agent version will be created if the provided details do not match any exi
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -9085,7 +9178,7 @@ A new Agent version will be created if the provided details do not match any exi
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -9093,7 +9186,7 @@ A new Agent version will be created if the provided details do not match any exi
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -9101,7 +9194,7 @@ A new Agent version will be created if the provided details do not match any exi
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -9109,7 +9202,7 @@ A new Agent version will be created if the provided details do not match any exi
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -9117,7 +9210,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -9125,7 +9218,7 @@ A new Agent version will be created if the provided details do not match any exi
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -9133,7 +9226,7 @@ A new Agent version will be created if the provided details do not match any exi
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -9141,7 +9234,7 @@ A new Agent version will be created if the provided details do not match any exi
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -9149,7 +9242,7 @@ A new Agent version will be created if the provided details do not match any exi
**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -9157,7 +9250,7 @@ A new Agent version will be created if the provided details do not match any exi
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -9165,7 +9258,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -9173,13 +9266,12 @@ A new Agent version will be created if the provided details do not match any exi
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9199,6 +9291,7 @@ A new Agent version will be created if the provided details do not match any exi
Update a Log.
Update the details of a Log with the given ID.
+
@@ -9224,6 +9317,7 @@ client.agents.update_log(
)
```
+
@@ -9238,7 +9332,7 @@ client.agents.update_log(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -9246,7 +9340,7 @@ client.agents.update_log(
**log_id:** `str` — Unique identifier for the Log.
-
+
@@ -9254,7 +9348,7 @@ client.agents.update_log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
-
+
@@ -9262,7 +9356,7 @@ client.agents.update_log(
**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
+
@@ -9270,7 +9364,7 @@ client.agents.update_log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
-
+
@@ -9278,7 +9372,7 @@ client.agents.update_log(
**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
-
+
@@ -9286,7 +9380,7 @@ client.agents.update_log(
**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
-
+
@@ -9294,7 +9388,7 @@ client.agents.update_log(
**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
-
+
@@ -9302,13 +9396,12 @@ client.agents.update_log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9325,21 +9418,19 @@ client.agents.update_log(
-
-Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
-If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
-pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+Call an Agent.
-The agent will run for the maximum number of iterations, or until it encounters a stop condition,
-according to its configuration.
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
-Agent details in the request body. A new version is created if it does not match
-any existing ones. This is helpful in the case where you are storing or deriving
-your Agent details in code.
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
@@ -9364,6 +9455,7 @@ for chunk in response.data:
yield chunk
```
+
@@ -9378,7 +9470,7 @@ for chunk in response.data:
**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
-
+
@@ -9386,7 +9478,7 @@ for chunk in response.data:
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -9394,7 +9486,7 @@ for chunk in response.data:
**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -9402,7 +9494,7 @@ for chunk in response.data:
**id:** `typing.Optional[str]` — ID for an existing Agent.
-
+
@@ -9410,34 +9502,36 @@ for chunk in response.data:
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
The Agent configuration to use. Two formats are supported:
+
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-A new Agent version will be created if the provided details do not match any existing version.
-
+ A new Agent version will be created if the provided details do not match any existing version.
+
@@ -9445,7 +9539,7 @@ A new Agent version will be created if the provided details do not match any exi
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -9453,7 +9547,7 @@ A new Agent version will be created if the provided details do not match any exi
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -9461,7 +9555,7 @@ A new Agent version will be created if the provided details do not match any exi
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -9469,7 +9563,7 @@ A new Agent version will be created if the provided details do not match any exi
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -9477,7 +9571,7 @@ A new Agent version will be created if the provided details do not match any exi
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -9485,7 +9579,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -9493,7 +9587,7 @@ A new Agent version will be created if the provided details do not match any exi
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -9501,7 +9595,7 @@ A new Agent version will be created if the provided details do not match any exi
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -9509,7 +9603,7 @@ A new Agent version will be created if the provided details do not match any exi
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -9517,7 +9611,7 @@ A new Agent version will be created if the provided details do not match any exi
**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -9525,7 +9619,7 @@ A new Agent version will be created if the provided details do not match any exi
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -9533,7 +9627,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -9541,7 +9635,7 @@ A new Agent version will be created if the provided details do not match any exi
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -9549,7 +9643,7 @@ A new Agent version will be created if the provided details do not match any exi
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -9557,7 +9651,7 @@ A new Agent version will be created if the provided details do not match any exi
**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
-
+
@@ -9565,13 +9659,12 @@ A new Agent version will be created if the provided details do not match any exi
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9588,21 +9681,19 @@ A new Agent version will be created if the provided details do not match any exi
-
-Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+Call an Agent.
-If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
-pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
-The agent will run for the maximum number of iterations, or until it encounters a stop condition,
-according to its configuration.
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
-Agent details in the request body. A new version is created if it does not match
-any existing ones. This is helpful in the case where you are storing or deriving
-your Agent details in code.
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
@@ -9625,6 +9716,7 @@ client = Humanloop(
client.agents.call()
```
+
@@ -9639,7 +9731,7 @@ client.agents.call()
**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
-
+
@@ -9647,7 +9739,7 @@ client.agents.call()
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -9655,7 +9747,7 @@ client.agents.call()
**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -9663,7 +9755,7 @@ client.agents.call()
**id:** `typing.Optional[str]` — ID for an existing Agent.
-
+
@@ -9671,34 +9763,36 @@ client.agents.call()
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model must call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
-
-**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
The Agent configuration to use. Two formats are supported:
+
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-A new Agent version will be created if the provided details do not match any existing version.
-
+ A new Agent version will be created if the provided details do not match any existing version.
+
@@ -9706,7 +9800,7 @@ A new Agent version will be created if the provided details do not match any exi
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -9714,7 +9808,7 @@ A new Agent version will be created if the provided details do not match any exi
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -9722,7 +9816,7 @@ A new Agent version will be created if the provided details do not match any exi
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -9730,7 +9824,7 @@ A new Agent version will be created if the provided details do not match any exi
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -9738,7 +9832,7 @@ A new Agent version will be created if the provided details do not match any exi
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -9746,7 +9840,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
@@ -9754,7 +9848,7 @@ A new Agent version will be created if the provided details do not match any exi
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -9762,7 +9856,7 @@ A new Agent version will be created if the provided details do not match any exi
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -9770,7 +9864,7 @@ A new Agent version will be created if the provided details do not match any exi
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -9778,7 +9872,7 @@ A new Agent version will be created if the provided details do not match any exi
**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -9786,7 +9880,7 @@ A new Agent version will be created if the provided details do not match any exi
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -9794,7 +9888,7 @@ A new Agent version will be created if the provided details do not match any exi
**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
+
@@ -9802,7 +9896,7 @@ A new Agent version will be created if the provided details do not match any exi
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -9810,7 +9904,7 @@ A new Agent version will be created if the provided details do not match any exi
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -9818,7 +9912,7 @@ A new Agent version will be created if the provided details do not match any exi
**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
-
+
@@ -9826,18 +9920,17 @@ A new Agent version will be created if the provided details do not match any exi
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
-client.agents.continue_call_stream(...)
+client.agents.continue_stream(...)
-
@@ -9851,13 +9944,14 @@ A new Agent version will be created if the provided details do not match any exi
Continue an incomplete Agent call.
-This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
-requested by the Agent. The Agent will resume processing from where it left off.
-
-The messages in the request will be appended to the original messages in the Log. You do not
-have to provide the previous conversation history.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
@@ -9877,7 +9971,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.agents.continue_call_stream(
+response = client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
@@ -9885,6 +9979,7 @@ for chunk in response.data:
yield chunk
```
+
@@ -9899,7 +9994,7 @@ for chunk in response.data:
**log_id:** `str` — This identifies the Agent Log to continue.
-
+
@@ -9907,7 +10002,7 @@ for chunk in response.data:
**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
-
+
@@ -9915,7 +10010,7 @@ for chunk in response.data:
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -9923,7 +10018,7 @@ for chunk in response.data:
**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
-
+
@@ -9931,18 +10026,17 @@ for chunk in response.data:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
-client.agents.continue_call(...)
+client.agents.continue_(...)
-
@@ -9956,13 +10050,14 @@ for chunk in response.data:
Continue an incomplete Agent call.
-This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
-requested by the Agent. The Agent will resume processing from where it left off.
-
-The messages in the request will be appended to the original messages in the Log. You do not
-have to provide the previous conversation history.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
@@ -9988,6 +10083,7 @@ client.agents.continue_call(
)
```
+
@@ -10002,7 +10098,7 @@ client.agents.continue_call(
**log_id:** `str` — This identifies the Agent Log to continue.
-
+
@@ -10010,7 +10106,7 @@ client.agents.continue_call(
**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
-
+
@@ -10018,7 +10114,7 @@ client.agents.continue_call(
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -10026,7 +10122,7 @@ client.agents.continue_call(
**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
-
+
@@ -10034,13 +10130,12 @@ client.agents.continue_call(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10058,6 +10153,7 @@ client.agents.continue_call(
Get a list of all Agents.
+
@@ -10080,6 +10176,7 @@ client = Humanloop(
client.agents.list()
```
+
@@ -10094,7 +10191,7 @@ client.agents.list()
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -10102,7 +10199,7 @@ client.agents.list()
**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
-
+
@@ -10110,7 +10207,7 @@ client.agents.list()
**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
-
+
@@ -10118,7 +10215,7 @@ client.agents.list()
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
-
+
@@ -10126,7 +10223,7 @@ client.agents.list()
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Agents by
-
+
@@ -10134,7 +10231,7 @@ client.agents.list()
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -10142,13 +10239,12 @@ client.agents.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10173,6 +10269,7 @@ tools determine the versions of the Agent.
You can provide `version_name` and `version_description` to identify and describe your versions.
Version names must be unique within an Agent - attempting to create a version with a name
that already exists will result in a 409 Conflict error.
+
@@ -10197,6 +10294,7 @@ client.agents.upsert(
)
```
+
@@ -10211,7 +10309,7 @@ client.agents.upsert(
**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
-
+
@@ -10219,7 +10317,7 @@ client.agents.upsert(
**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -10227,7 +10325,7 @@ client.agents.upsert(
**id:** `typing.Optional[str]` — ID for an existing Agent.
-
+
@@ -10235,22 +10333,22 @@ client.agents.upsert(
**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
-
+
-
-**template:** `typing.Optional[AgentRequestTemplateParams]`
+**template:** `typing.Optional[AgentRequestTemplateParams]`
-The template contains the main structure and instructions for the model, including input variables for dynamic values.
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
-For completion models, provide a prompt template as a string.
+For completion models, provide a prompt template as a string.
Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
-
+
@@ -10258,7 +10356,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
-
+
@@ -10266,7 +10364,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
-
+
@@ -10274,7 +10372,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
-
+
@@ -10282,7 +10380,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
-
+
@@ -10290,7 +10388,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
-
+
@@ -10298,7 +10396,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
-
+
@@ -10306,7 +10404,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
-
+
@@ -10314,7 +10412,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
-
+
@@ -10322,7 +10420,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
-
+
@@ -10330,7 +10428,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
-
+
@@ -10338,7 +10436,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
-
+
@@ -10346,15 +10444,15 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
-
+
-
-**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
-
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
+
@@ -10362,7 +10460,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
-
+
@@ -10370,7 +10468,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
-
+
@@ -10378,7 +10476,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
-
+
@@ -10386,7 +10484,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**version_description:** `typing.Optional[str]` — Description of the Version.
-
+
@@ -10394,7 +10492,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**description:** `typing.Optional[str]` — Description of the Prompt.
-
+
@@ -10402,7 +10500,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
-
+
@@ -10410,7 +10508,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**readme:** `typing.Optional[str]` — Long description of the Prompt.
-
+
@@ -10418,13 +10516,12 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10442,6 +10539,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
Delete a version of the Agent.
+
@@ -10467,6 +10565,7 @@ client.agents.delete_agent_version(
)
```
+
@@ -10481,7 +10580,7 @@ client.agents.delete_agent_version(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10489,7 +10588,7 @@ client.agents.delete_agent_version(
**version_id:** `str` — Unique identifier for the specific version of the Agent.
-
+
@@ -10497,13 +10596,12 @@ client.agents.delete_agent_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10521,6 +10619,7 @@ client.agents.delete_agent_version(
Update the name or description of the Agent version.
+
@@ -10546,6 +10645,7 @@ client.agents.patch_agent_version(
)
```
+
@@ -10560,7 +10660,7 @@ client.agents.patch_agent_version(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10568,7 +10668,7 @@ client.agents.patch_agent_version(
**version_id:** `str` — Unique identifier for the specific version of the Agent.
-
+
@@ -10576,7 +10676,7 @@ client.agents.patch_agent_version(
**name:** `typing.Optional[str]` — Name of the version.
-
+
@@ -10584,7 +10684,7 @@ client.agents.patch_agent_version(
**description:** `typing.Optional[str]` — Description of the version.
-
+
@@ -10592,13 +10692,12 @@ client.agents.patch_agent_version(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10619,6 +10718,7 @@ Retrieve the Agent with the given ID.
By default, the deployed version of the Agent is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Agent.
+
@@ -10643,6 +10743,7 @@ client.agents.get(
)
```
+
@@ -10657,7 +10758,7 @@ client.agents.get(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10665,7 +10766,7 @@ client.agents.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
-
+
@@ -10673,7 +10774,7 @@ client.agents.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -10681,13 +10782,12 @@ client.agents.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10705,6 +10805,7 @@ client.agents.get(
Delete the Agent with the given ID.
+
@@ -10729,6 +10830,7 @@ client.agents.delete(
)
```
+
@@ -10743,7 +10845,7 @@ client.agents.delete(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10751,13 +10853,12 @@ client.agents.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10775,6 +10876,7 @@ client.agents.delete(
Move the Agent to a different path or change the name.
+
@@ -10799,6 +10901,7 @@ client.agents.move(
)
```
+
@@ -10813,7 +10916,7 @@ client.agents.move(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10821,7 +10924,7 @@ client.agents.move(
**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
-
+
@@ -10829,7 +10932,7 @@ client.agents.move(
**name:** `typing.Optional[str]` — Name of the Flow.
-
+
@@ -10837,7 +10940,7 @@ client.agents.move(
**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
-
+
@@ -10845,13 +10948,12 @@ client.agents.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10869,6 +10971,7 @@ client.agents.move(
Get a list of all the versions of a Agent.
+
@@ -10893,6 +10996,7 @@ client.agents.list_versions(
)
```
+
@@ -10907,7 +11011,7 @@ client.agents.list_versions(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10915,7 +11019,7 @@ client.agents.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -10923,13 +11027,12 @@ client.agents.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -10950,6 +11053,7 @@ Deploy Agent to an Environment.
Set the deployed version for the specified Environment. This Agent
will be used for calls made to the Agent in this Environment.
+
@@ -10976,6 +11080,7 @@ client.agents.set_deployment(
)
```
+
@@ -10990,7 +11095,7 @@ client.agents.set_deployment(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -10998,7 +11103,7 @@ client.agents.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -11006,7 +11111,7 @@ client.agents.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Agent.
-
+
@@ -11014,13 +11119,12 @@ client.agents.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11041,6 +11145,7 @@ Remove deployed Agent from the Environment.
Remove the deployed version for the specified Environment. This Agent
will no longer be used for calls made to the Agent in this Environment.
+
@@ -11066,6 +11171,7 @@ client.agents.remove_deployment(
)
```
+
@@ -11080,7 +11186,7 @@ client.agents.remove_deployment(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -11088,7 +11194,7 @@ client.agents.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -11096,13 +11202,12 @@ client.agents.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11120,6 +11225,7 @@ client.agents.remove_deployment(
List all Environments and their deployed versions for the Agent.
+
@@ -11144,6 +11250,7 @@ client.agents.list_environments(
)
```
+
@@ -11158,7 +11265,7 @@ client.agents.list_environments(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -11166,13 +11273,12 @@ client.agents.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11193,6 +11299,7 @@ Activate and deactivate Evaluators for monitoring the Agent.
An activated Evaluator will automatically be run on all new Logs
within the Agent for monitoring purposes.
+
@@ -11217,6 +11324,7 @@ client.agents.update_monitoring(
)
```
+
@@ -11230,8 +11338,8 @@ client.agents.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -11241,7 +11349,7 @@ client.agents.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -11251,7 +11359,7 @@ client.agents.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -11259,13 +11367,12 @@ client.agents.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11289,6 +11396,7 @@ or for editing with an AI tool.
By default, the deployed version of the Agent is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Agent.
+
@@ -11313,6 +11421,7 @@ client.agents.serialize(
)
```
+
@@ -11327,7 +11436,7 @@ client.agents.serialize(
**id:** `str` — Unique identifier for Agent.
-
+
@@ -11335,7 +11444,7 @@ client.agents.serialize(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
-
+
@@ -11343,7 +11452,7 @@ client.agents.serialize(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -11351,13 +11460,12 @@ client.agents.serialize(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11378,6 +11486,7 @@ Deserialize an Agent from the .agent file format.
This returns a subset of the attributes required by an Agent.
This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
@@ -11402,6 +11511,7 @@ client.agents.deserialize(
)
```
+
@@ -11415,8 +11525,8 @@ client.agents.deserialize(
-
-**agent:** `str`
-
+**agent:** `str`
+
@@ -11424,18 +11534,18 @@ client.agents.deserialize(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Directories
+
client.directories.list()
-
@@ -11449,6 +11559,7 @@ client.agents.deserialize(
-
Retrieve a list of all Directories.
+
@@ -11471,6 +11582,7 @@ client = Humanloop(
client.directories.list()
```
+
@@ -11485,13 +11597,12 @@ client.directories.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11509,6 +11620,7 @@ client.directories.list()
Creates a Directory.
+
@@ -11531,6 +11643,7 @@ client = Humanloop(
client.directories.create()
```
+
@@ -11545,7 +11658,7 @@ client.directories.create()
**name:** `typing.Optional[str]` — Name of the directory to create.
-
+
@@ -11553,7 +11666,7 @@ client.directories.create()
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Starts with `dir_`.
-
+
@@ -11561,7 +11674,7 @@ client.directories.create()
**path:** `typing.Optional[str]` — Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`.
-
+
@@ -11569,13 +11682,12 @@ client.directories.create()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11593,6 +11705,7 @@ client.directories.create()
Fetches a directory by ID.
+
@@ -11617,6 +11730,7 @@ client.directories.get(
)
```
+
@@ -11631,7 +11745,7 @@ client.directories.get(
**id:** `str` — String ID of directory. Starts with `dir_`.
-
+
@@ -11639,13 +11753,12 @@ client.directories.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11665,6 +11778,7 @@ client.directories.get(
Delete the Directory with the given ID.
The Directory must be empty (i.e. contain no Directories or Files).
+
@@ -11689,6 +11803,7 @@ client.directories.delete(
)
```
+
@@ -11703,7 +11818,7 @@ client.directories.delete(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`.
-
+
@@ -11711,13 +11826,12 @@ client.directories.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11735,6 +11849,7 @@ client.directories.delete(
Update the Directory with the given ID.
+
@@ -11759,6 +11874,7 @@ client.directories.update(
)
```
+
@@ -11773,7 +11889,7 @@ client.directories.update(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`.
-
+
@@ -11781,7 +11897,7 @@ client.directories.update(
**name:** `typing.Optional[str]` — Name to set for the directory.
-
+
@@ -11789,7 +11905,7 @@ client.directories.update(
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Specify this to move directories. Starts with `dir_`.
-
+
@@ -11797,7 +11913,7 @@ client.directories.update(
**path:** `typing.Optional[str]` — Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`.
-
+
@@ -11805,18 +11921,18 @@ client.directories.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Files
+
client.files.list_files(...)
-
@@ -11830,6 +11946,7 @@ client.directories.update(
-
Get a paginated list of files.
+
@@ -11852,6 +11969,7 @@ client = Humanloop(
client.files.list_files()
```
+
@@ -11866,7 +11984,7 @@ client.files.list_files()
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -11874,7 +11992,7 @@ client.files.list_files()
**size:** `typing.Optional[int]` — Page size for pagination. Number of files to fetch.
-
+
@@ -11882,7 +12000,7 @@ client.files.list_files()
**name:** `typing.Optional[str]` — Case-insensitive filter for file name.
-
+
@@ -11890,7 +12008,7 @@ client.files.list_files()
**path:** `typing.Optional[str]` — Path of the directory to filter for. Returns files in this directory and all its subdirectories.
-
+
@@ -11898,7 +12016,7 @@ client.files.list_files()
**template:** `typing.Optional[bool]` — Filter to include only template files.
-
+
@@ -11906,7 +12024,7 @@ client.files.list_files()
**type:** `typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]]` — List of file types to filter for.
-
+
@@ -11914,7 +12032,7 @@ client.files.list_files()
**environment:** `typing.Optional[str]` — Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
-
+
@@ -11922,7 +12040,7 @@ client.files.list_files()
**sort_by:** `typing.Optional[FileSortBy]` — Field to sort files by
-
+
@@ -11930,7 +12048,7 @@ client.files.list_files()
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -11938,7 +12056,7 @@ client.files.list_files()
**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
-
+
@@ -11946,13 +12064,12 @@ client.files.list_files()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -11970,6 +12087,7 @@ client.files.list_files()
Retrieve a File by path.
+
@@ -11994,6 +12112,7 @@ client.files.retrieve_by_path(
)
```
+
@@ -12008,7 +12127,7 @@ client.files.retrieve_by_path(
**path:** `str` — Path of the File to retrieve.
-
+
@@ -12016,7 +12135,7 @@ client.files.retrieve_by_path(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -12024,7 +12143,7 @@ client.files.retrieve_by_path(
**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
-
+
@@ -12032,18 +12151,18 @@ client.files.retrieve_by_path(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Evaluations
+
client.evaluations.list(...)
-
@@ -12057,6 +12176,7 @@ client.files.retrieve_by_path(
-
Retrieve a list of Evaluations for the specified File.
+
@@ -12087,6 +12207,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -12101,7 +12222,7 @@ for page in response.iter_pages():
**file_id:** `str` — Filter by File ID. Only Evaluations for the specified File will be returned.
-
+
@@ -12109,7 +12230,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -12117,7 +12238,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluations to fetch.
-
+
@@ -12125,13 +12246,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12153,6 +12273,7 @@ Create an Evaluation.
Create a new Evaluation by specifying the File to evaluate, and a name
for the Evaluation.
You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint.
+
@@ -12177,6 +12298,7 @@ client.evaluations.create(
)
```
+
@@ -12191,7 +12313,7 @@ client.evaluations.create(
**evaluators:** `typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams]` — The Evaluators used to evaluate.
-
+
@@ -12199,7 +12321,7 @@ client.evaluations.create(
**file:** `typing.Optional[FileRequestParams]` — The File to associate with the Evaluation. This File contains the Logs you're evaluating.
-
+
@@ -12207,7 +12329,7 @@ client.evaluations.create(
**name:** `typing.Optional[str]` — Name of the Evaluation to help identify it. Must be unique within the associated File.
-
+
@@ -12215,13 +12337,12 @@ client.evaluations.create(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12241,6 +12362,7 @@ client.evaluations.create(
Add Evaluators to an Evaluation.
The Evaluators will be run on the Logs generated for the Evaluation.
+
@@ -12266,6 +12388,7 @@ client.evaluations.add_evaluators(
)
```
+
@@ -12280,7 +12403,7 @@ client.evaluations.add_evaluators(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12288,7 +12411,7 @@ client.evaluations.add_evaluators(
**evaluators:** `typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams]` — The Evaluators to add to this Evaluation.
-
+
@@ -12296,13 +12419,12 @@ client.evaluations.add_evaluators(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12322,6 +12444,7 @@ client.evaluations.add_evaluators(
Remove an Evaluator from an Evaluation.
The Evaluator will no longer be run on the Logs in the Evaluation.
+
@@ -12347,6 +12470,7 @@ client.evaluations.remove_evaluator(
)
```
+
@@ -12361,7 +12485,7 @@ client.evaluations.remove_evaluator(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12369,7 +12493,7 @@ client.evaluations.remove_evaluator(
**evaluator_version_id:** `str` — Unique identifier for Evaluator Version.
-
+
@@ -12377,13 +12501,12 @@ client.evaluations.remove_evaluator(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12407,6 +12530,7 @@ such as its name.
To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint.
To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint.
+
@@ -12431,6 +12555,7 @@ client.evaluations.get(
)
```
+
@@ -12445,7 +12570,7 @@ client.evaluations.get(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12453,13 +12578,12 @@ client.evaluations.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12479,6 +12603,7 @@ client.evaluations.get(
Delete an Evaluation.
The Runs and Evaluators in the Evaluation will not be deleted.
+
@@ -12503,6 +12628,7 @@ client.evaluations.delete(
)
```
+
@@ -12517,7 +12643,7 @@ client.evaluations.delete(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12525,13 +12651,12 @@ client.evaluations.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12549,6 +12674,7 @@ client.evaluations.delete(
List all Runs for an Evaluation.
+
@@ -12573,6 +12699,7 @@ client.evaluations.list_runs_for_evaluation(
)
```
+
@@ -12587,7 +12714,7 @@ client.evaluations.list_runs_for_evaluation(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12595,13 +12722,12 @@ client.evaluations.list_runs_for_evaluation(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12632,6 +12758,7 @@ referencing a datapoint in the specified Dataset will be associated with the Run
To keep updated on the progress of the Run, you can poll the Run using
the `GET /evaluations/{id}/runs` endpoint and check its status.
+
@@ -12656,6 +12783,7 @@ client.evaluations.create_run(
)
```
+
@@ -12670,7 +12798,7 @@ client.evaluations.create_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12678,7 +12806,7 @@ client.evaluations.create_run(
**dataset:** `typing.Optional[CreateRunRequestDatasetParams]` — Dataset to use in this Run.
-
+
@@ -12686,7 +12814,7 @@ client.evaluations.create_run(
**version:** `typing.Optional[CreateRunRequestVersionParams]` — Version to use in this Run.
-
+
@@ -12694,7 +12822,7 @@ client.evaluations.create_run(
**orchestrated:** `typing.Optional[bool]` — Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API.
-
+
@@ -12702,7 +12830,7 @@ client.evaluations.create_run(
**use_existing_logs:** `typing.Optional[bool]` — If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided.
-
+
@@ -12710,13 +12838,12 @@ client.evaluations.create_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12737,6 +12864,7 @@ Add an existing Run to the specified Evaluation.
This is useful if you want to compare the Runs in this Evaluation with an existing Run
that exists within another Evaluation.
+
@@ -12762,6 +12890,7 @@ client.evaluations.add_existing_run(
)
```
+
@@ -12776,7 +12905,7 @@ client.evaluations.add_existing_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12784,7 +12913,7 @@ client.evaluations.add_existing_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -12792,13 +12921,12 @@ client.evaluations.add_existing_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12819,6 +12947,7 @@ Remove a Run from an Evaluation.
The Logs and Versions used in the Run will not be deleted.
If this Run is used in any other Evaluations, it will still be available in those Evaluations.
+
@@ -12844,6 +12973,7 @@ client.evaluations.remove_run(
)
```
+
@@ -12858,7 +12988,7 @@ client.evaluations.remove_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12866,7 +12996,7 @@ client.evaluations.remove_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -12874,13 +13004,12 @@ client.evaluations.remove_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12901,6 +13030,7 @@ Update an Evaluation Run.
Specify `control=true` to use this Run as the control Run for the Evaluation.
You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed.
+
@@ -12926,6 +13056,7 @@ client.evaluations.update_evaluation_run(
)
```
+
@@ -12940,7 +13071,7 @@ client.evaluations.update_evaluation_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -12948,7 +13079,7 @@ client.evaluations.update_evaluation_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -12956,7 +13087,7 @@ client.evaluations.update_evaluation_run(
**control:** `typing.Optional[bool]` — If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run.
-
+
@@ -12964,7 +13095,7 @@ client.evaluations.update_evaluation_run(
**status:** `typing.Optional[EvaluationStatus]` — Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`.
-
+
@@ -12972,13 +13103,12 @@ client.evaluations.update_evaluation_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -12996,6 +13126,7 @@ client.evaluations.update_evaluation_run(
Add the specified Logs to a Run.
+
@@ -13022,6 +13153,7 @@ client.evaluations.add_logs_to_run(
)
```
+
@@ -13036,7 +13168,7 @@ client.evaluations.add_logs_to_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -13044,7 +13176,7 @@ client.evaluations.add_logs_to_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -13052,7 +13184,7 @@ client.evaluations.add_logs_to_run(
**log_ids:** `typing.Sequence[str]` — The IDs of the Logs to add to the Run.
-
+
@@ -13060,13 +13192,12 @@ client.evaluations.add_logs_to_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -13087,6 +13218,7 @@ Get Evaluation Stats.
Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the
corresponding Evaluator statistics (such as the mean and percentiles).
+
@@ -13111,6 +13243,7 @@ client.evaluations.get_stats(
)
```
+
@@ -13125,7 +13258,7 @@ client.evaluations.get_stats(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -13133,13 +13266,12 @@ client.evaluations.get_stats(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -13159,6 +13291,7 @@ client.evaluations.get_stats(
Get the Logs associated to a specific Evaluation.
This returns the Logs associated to all Runs within with the Evaluation.
+
@@ -13183,6 +13316,7 @@ client.evaluations.get_logs(
)
```
+
@@ -13197,7 +13331,7 @@ client.evaluations.get_logs(
**id:** `str` — String ID of evaluation. Starts with `ev_` or `evr_`.
-
+
@@ -13205,7 +13339,7 @@ client.evaluations.get_logs(
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -13213,7 +13347,7 @@ client.evaluations.get_logs(
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch.
-
+
@@ -13221,7 +13355,7 @@ client.evaluations.get_logs(
**run_id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Filter by Run IDs. Only Logs for the specified Runs will be returned.
-
+
@@ -13229,18 +13363,18 @@ client.evaluations.get_logs(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Logs
+
client.logs.list(...)
-
@@ -13254,6 +13388,7 @@ client.evaluations.get_logs(
-
List all Logs for the given filter criteria.
+
@@ -13284,6 +13419,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -13298,7 +13434,7 @@ for page in response.iter_pages():
**file_id:** `str` — Unique identifier for the File to list Logs for.
-
+
@@ -13306,7 +13442,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -13314,7 +13450,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch.
-
+
@@ -13322,7 +13458,15 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — If provided, only Logs belonging to the specified Version will be returned.
-
+
+
+
+
+
+-
+
+**version_status:** `typing.Optional[VersionStatus]` — If provided, only Logs belonging to Versions with the specified status will be returned.
+
@@ -13330,7 +13474,7 @@ for page in response.iter_pages():
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — If provided, returns Logs whose IDs contain any of the specified values as substrings.
-
+
@@ -13338,7 +13482,7 @@ for page in response.iter_pages():
**search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its inputs and output will be returned.
-
+
@@ -13346,7 +13490,7 @@ for page in response.iter_pages():
**metadata_search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its metadata will be returned.
-
+
@@ -13354,7 +13498,7 @@ for page in response.iter_pages():
**start_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created after the specified date will be returned.
-
+
@@ -13362,7 +13506,7 @@ for page in response.iter_pages():
**end_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created before the specified date will be returned.
-
+
@@ -13370,7 +13514,7 @@ for page in response.iter_pages():
**include_parent:** `typing.Optional[bool]` — If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs.
-
+
@@ -13378,7 +13522,7 @@ for page in response.iter_pages():
**in_trace_filter:** `typing.Optional[typing.Union[bool, typing.Sequence[bool]]]` — If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
-
+
@@ -13386,7 +13530,7 @@ for page in response.iter_pages():
**sample:** `typing.Optional[int]` — If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
-
+
@@ -13394,7 +13538,7 @@ for page in response.iter_pages():
**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
-
+
@@ -13402,13 +13546,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -13426,6 +13569,7 @@ for page in response.iter_pages():
Delete Logs with the given IDs.
+
@@ -13450,6 +13594,7 @@ client.logs.delete(
)
```
+
@@ -13464,7 +13609,7 @@ client.logs.delete(
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Unique identifiers for the Logs to delete.
-
+
@@ -13472,13 +13617,12 @@ client.logs.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -13496,6 +13640,7 @@ client.logs.delete(
Retrieve the Log with the given ID.
+
@@ -13520,6 +13665,7 @@ client.logs.get(
)
```
+
@@ -13534,7 +13680,7 @@ client.logs.get(
**id:** `str` — Unique identifier for Log.
-
+
@@ -13542,14 +13688,12 @@ client.logs.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
-
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 8e7fa495..46712075 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -6,10 +6,10 @@
AgentCallStreamResponse,
AgentCallStreamResponsePayload,
AgentConfigResponse,
- AgentContinueCallResponse,
- AgentContinueCallResponseToolChoice,
- AgentContinueCallStreamResponse,
- AgentContinueCallStreamResponsePayload,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
AgentInlineTool,
AgentKernelRequest,
AgentKernelRequestReasoningEffort,
@@ -275,10 +275,10 @@
AgentCallStreamResponseParams,
AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
- AgentContinueCallResponseParams,
- AgentContinueCallResponseToolChoiceParams,
- AgentContinueCallStreamResponseParams,
- AgentContinueCallStreamResponsePayloadParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
AgentInlineToolParams,
AgentKernelRequestParams,
AgentKernelRequestReasoningEffortParams,
@@ -447,14 +447,14 @@
"AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
- "AgentContinueCallResponse",
- "AgentContinueCallResponseParams",
- "AgentContinueCallResponseToolChoice",
- "AgentContinueCallResponseToolChoiceParams",
- "AgentContinueCallStreamResponse",
- "AgentContinueCallStreamResponseParams",
- "AgentContinueCallStreamResponsePayload",
- "AgentContinueCallStreamResponsePayloadParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
"AgentInlineTool",
"AgentInlineToolParams",
"AgentKernelRequest",
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index 3cb31092..acbe9e9a 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -10,7 +10,7 @@
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
from ..types.create_agent_log_response import CreateAgentLogResponse
-from ..types.agent_log_response import AgentLogResponse
+from ..types.log_response import LogResponse
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
@@ -290,7 +290,7 @@ def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentLogResponse:
+ ) -> LogResponse:
"""
Update a Log.
@@ -327,7 +327,7 @@ def update_log(
Returns
-------
- AgentLogResponse
+ LogResponse
Successful Response
Examples
@@ -383,21 +383,18 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -548,21 +545,18 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -683,7 +677,7 @@ def call(
)
return response.data
- def continue_call_stream(
+ def continue_stream(
self,
*,
log_id: str,
@@ -691,18 +685,18 @@ def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[AgentContinueCallStreamResponse]:
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -722,7 +716,7 @@ def continue_call_stream(
Yields
------
- typing.Iterator[AgentContinueCallStreamResponse]
+ typing.Iterator[AgentContinueStreamResponse]
Examples
@@ -732,14 +726,14 @@ def continue_call_stream(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- response = client.agents.continue_call_stream(
+ response = client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
for chunk in response:
yield chunk
"""
- with self._raw_client.continue_call_stream(
+ with self._raw_client.continue_stream(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -748,7 +742,7 @@ def continue_call_stream(
) as r:
yield from r.data
- def continue_call(
+ def continue_(
self,
*,
log_id: str,
@@ -756,18 +750,18 @@ def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentContinueCallResponse:
+ ) -> AgentContinueResponse:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -787,7 +781,7 @@ def continue_call(
Returns
-------
- AgentContinueCallResponse
+ AgentContinueResponse
Examples
@@ -802,7 +796,7 @@ def continue_call(
messages=[{"role": "user"}],
)
"""
- response = self._raw_client.continue_call(
+ response = self._raw_client.continue_(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -1803,7 +1797,7 @@ async def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentLogResponse:
+ ) -> LogResponse:
"""
Update a Log.
@@ -1840,7 +1834,7 @@ async def update_log(
Returns
-------
- AgentLogResponse
+ LogResponse
Successful Response
Examples
@@ -1904,21 +1898,18 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2078,21 +2069,18 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2221,7 +2209,7 @@ async def main() -> None:
)
return response.data
- async def continue_call_stream(
+ async def continue_stream(
self,
*,
log_id: str,
@@ -2229,18 +2217,18 @@ async def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[AgentContinueCallStreamResponse]:
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2260,7 +2248,7 @@ async def continue_call_stream(
Yields
------
- typing.AsyncIterator[AgentContinueCallStreamResponse]
+ typing.AsyncIterator[AgentContinueStreamResponse]
Examples
@@ -2275,7 +2263,7 @@ async def continue_call_stream(
async def main() -> None:
- response = await client.agents.continue_call_stream(
+ response = await client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
@@ -2285,7 +2273,7 @@ async def main() -> None:
asyncio.run(main())
"""
- async with self._raw_client.continue_call_stream(
+ async with self._raw_client.continue_stream(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -2295,7 +2283,7 @@ async def main() -> None:
async for data in r.data:
yield data
- async def continue_call(
+ async def continue_(
self,
*,
log_id: str,
@@ -2303,18 +2291,18 @@ async def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentContinueCallResponse:
+ ) -> AgentContinueResponse:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2334,7 +2322,7 @@ async def continue_call(
Returns
-------
- AgentContinueCallResponse
+ AgentContinueResponse
Examples
@@ -2357,7 +2345,7 @@ async def main() -> None:
asyncio.run(main())
"""
- response = await self._raw_client.continue_call(
+ response = await self._raw_client.continue_(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index 8ce4fa79..ec39a884 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -16,7 +16,7 @@
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
-from ..types.agent_log_response import AgentLogResponse
+from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
@@ -320,7 +320,7 @@ def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[AgentLogResponse]:
+ ) -> HttpResponse[LogResponse]:
"""
Update a Log.
@@ -357,7 +357,7 @@ def update_log(
Returns
-------
- HttpResponse[AgentLogResponse]
+ HttpResponse[LogResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -384,9 +384,9 @@ def update_log(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentLogResponse,
+ LogResponse,
construct_type(
- type_=AgentLogResponse, # type: ignore
+ type_=LogResponse, # type: ignore
object_=_response.json(),
),
)
@@ -435,21 +435,18 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -641,21 +638,18 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -810,7 +804,7 @@ def call(
raise ApiError(status_code=_response.status_code, body=_response_json)
@contextlib.contextmanager
- def continue_call_stream(
+ def continue_stream(
self,
*,
log_id: str,
@@ -818,18 +812,18 @@ def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]:
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -849,7 +843,7 @@ def continue_call_stream(
Yields
------
- typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -873,7 +867,7 @@ def continue_call_stream(
omit=OMIT,
) as _response:
- def stream() -> HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]:
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
try:
if 200 <= _response.status_code < 300:
@@ -907,7 +901,7 @@ def _iter():
yield stream()
- def continue_call(
+ def continue_(
self,
*,
log_id: str,
@@ -915,18 +909,18 @@ def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[AgentContinueCallResponse]:
+ ) -> HttpResponse[AgentContinueResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -946,7 +940,7 @@ def continue_call(
Returns
-------
- HttpResponse[AgentContinueCallResponse]
+ HttpResponse[AgentContinueResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -972,7 +966,7 @@ def continue_call(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentContinueCallResponse,
+ AgentContinueResponse,
construct_type(
type_=AgentContinueCallResponse, # type: ignore
object_=_response.json(),
@@ -2242,7 +2236,7 @@ async def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[AgentLogResponse]:
+ ) -> AsyncHttpResponse[LogResponse]:
"""
Update a Log.
@@ -2279,7 +2273,7 @@ async def update_log(
Returns
-------
- AsyncHttpResponse[AgentLogResponse]
+ AsyncHttpResponse[LogResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2306,9 +2300,9 @@ async def update_log(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentLogResponse,
+ LogResponse,
construct_type(
- type_=AgentLogResponse, # type: ignore
+ type_=LogResponse, # type: ignore
object_=_response.json(),
),
)
@@ -2357,21 +2351,18 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2563,21 +2554,18 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2732,7 +2720,7 @@ async def call(
raise ApiError(status_code=_response.status_code, body=_response_json)
@contextlib.asynccontextmanager
- async def continue_call_stream(
+ async def continue_stream(
self,
*,
log_id: str,
@@ -2740,18 +2728,18 @@ async def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]:
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2771,7 +2759,7 @@ async def continue_call_stream(
Yields
------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2795,7 +2783,7 @@ async def continue_call_stream(
omit=OMIT,
) as _response:
- async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]:
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
try:
if 200 <= _response.status_code < 300:
@@ -2829,7 +2817,7 @@ async def _iter():
yield await stream()
- async def continue_call(
+ async def continue_(
self,
*,
log_id: str,
@@ -2837,18 +2825,18 @@ async def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[AgentContinueCallResponse]:
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2868,7 +2856,7 @@ async def continue_call(
Returns
-------
- AsyncHttpResponse[AgentContinueCallResponse]
+ AsyncHttpResponse[AgentContinueResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2894,7 +2882,7 @@ async def continue_call(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentContinueCallResponse,
+ AgentContinueResponse,
construct_type(
type_=AgentContinueCallResponse, # type: ignore
object_=_response.json(),
diff --git a/src/humanloop/cli/__init__.py b/src/humanloop/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
new file mode 100644
index 00000000..ae5d1b43
--- /dev/null
+++ b/src/humanloop/cli/__main__.py
@@ -0,0 +1,256 @@
+import click
+import logging
+from pathlib import Path
+from typing import Optional, Callable
+from functools import wraps
+from dotenv import load_dotenv, find_dotenv
+import os
+import sys
+from humanloop import Humanloop
+from humanloop.sync.sync_client import SyncClient
+from datetime import datetime
+from humanloop.cli.progress import progress_context
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO) # Set back to INFO level
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s") # Simplified formatter
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Color constants
+SUCCESS_COLOR = "green"
+ERROR_COLOR = "red"
+INFO_COLOR = "blue"
+WARNING_COLOR = "yellow"
+
+MAX_FILES_TO_DISPLAY = 10
+
+def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
+ """Get a Humanloop client instance."""
+ if not api_key:
+ if env_file:
+ load_dotenv(env_file)
+ else:
+ env_path = find_dotenv()
+ if env_path:
+ load_dotenv(env_path)
+ else:
+ if os.path.exists(".env"):
+ load_dotenv(".env")
+ else:
+ load_dotenv()
+
+ api_key = os.getenv("HUMANLOOP_API_KEY")
+ if not api_key:
+ raise click.ClickException(
+ click.style("No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR)
+ )
+
+ return Humanloop(api_key=api_key, base_url=base_url)
+
+def common_options(f: Callable) -> Callable:
+ """Decorator for common CLI options."""
+ @click.option(
+ "--api-key",
+ help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
+ default=None,
+ show_default=False,
+ )
+ @click.option(
+ "--env-file",
+ help="Path to .env file. If not provided, looks for .env in current directory.",
+ default=None,
+ type=click.Path(exists=True),
+ show_default=False,
+ )
+ @click.option(
+ "--base-dir",
+ help="Base directory for pulled files",
+ default="humanloop",
+ type=click.Path(),
+ )
+ @click.option(
+ "--base-url",
+ default=None,
+ hidden=True,
+ )
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs)
+ return wrapper
+
+def handle_sync_errors(f: Callable) -> Callable:
+ """Decorator for handling sync operation errors."""
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ click.echo(click.style(str(f"Error: {e}"), fg=ERROR_COLOR))
+ sys.exit(1)
+ return wrapper
+
+@click.group(
+ help="Humanloop CLI for managing sync operations.",
+ context_settings={
+ "help_option_names": ["-h", "--help"],
+ "max_content_width": 100,
+ }
+)
+def cli():
+ """Humanloop CLI for managing sync operations."""
+ pass
+
+@cli.command()
+@click.option(
+ "--path",
+ "-p",
+ help="Path to pull (file or directory). If not provided, pulls everything. "
+ "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "
+ "To pull a directory, simply specify the path to the directory (e.g. abc/def to pull all files under abc/def and its subdirectories).",
+ default=None,
+)
+@click.option(
+ "--environment",
+ "-e",
+ help="Environment to pull from (e.g. 'production', 'staging')",
+ default=None,
+)
+@click.option(
+ "--verbose",
+ "-v",
+ is_flag=True,
+ help="Show detailed information about the operation",
+)
+@handle_sync_errors
+@common_options
+def pull(
+ path: Optional[str],
+ environment: Optional[str],
+ api_key: Optional[str],
+ env_file: Optional[str],
+ base_dir: str,
+ base_url: Optional[str],
+ verbose: bool
+):
+ """Pull prompt and agent files from Humanloop to your local filesystem.
+
+ \b
+ This command will:
+ 1. Fetch prompt and agent files from your Humanloop workspace
+ 2. Save them to your local filesystem
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ \b
+ The files will be saved with the following structure:
+ {base_dir}/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
+
+ click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
+ click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
+ click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
+
+ if verbose:
+ # Don't use the spinner in verbose mode as the spinner and sync client logging compete
+ successful_files = sync_client.pull(path, environment)
+ else:
+ with progress_context("Pulling files..."):
+ successful_files = sync_client.pull(path, environment)
+
+ # Get metadata about the operation
+ metadata = sync_client.metadata.get_last_operation()
+ if metadata:
+ # Determine if the operation was successful based on failed_files
+ is_successful = not metadata.get('failed_files') and not metadata.get('error')
+ duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
+ click.echo(click.style(f"Pull completed in {metadata['duration_ms']}ms", fg=duration_color))
+
+ if metadata['successful_files']:
+ click.echo(click.style(f"\nSuccessfully pulled {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
+
+ if verbose:
+ for file in metadata['successful_files']:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+ else:
+ files_to_display = metadata['successful_files'][:MAX_FILES_TO_DISPLAY]
+ for file in files_to_display:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+
+ if len(metadata['successful_files']) > MAX_FILES_TO_DISPLAY:
+ remaining = len(metadata['successful_files']) - MAX_FILES_TO_DISPLAY
+ click.echo(click.style(f" ...and {remaining} more", fg=SUCCESS_COLOR))
+ if metadata['failed_files']:
+ click.echo(click.style(f"\nFailed to pull {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
+ for file in metadata['failed_files']:
+ click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
+ if metadata.get('error'):
+ click.echo(click.style(f"\nError: {metadata['error']}", fg=ERROR_COLOR))
+
+def format_timestamp(timestamp: str) -> str:
+ """Format timestamp to a more readable format."""
+ try:
+ dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
+ return dt.strftime('%Y-%m-%d %H:%M:%S')
+ except (ValueError, AttributeError):
+ return timestamp
+
+@cli.command()
+@click.option(
+ "--oneline",
+ is_flag=True,
+ help="Display history in a single line per operation",
+)
+@handle_sync_errors
+@common_options
+def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
+ """Show sync operation history."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir)
+
+ history = sync_client.metadata.get_history()
+ if not history:
+ click.echo(click.style("No sync operations found in history.", fg=WARNING_COLOR))
+ return
+
+ if not oneline:
+ click.echo(click.style("Sync Operation History:", fg=INFO_COLOR))
+ click.echo(click.style("======================", fg=INFO_COLOR))
+
+ for op in history:
+ if oneline:
+ # Format: timestamp | operation_type | path | environment | duration_ms | status
+ status = click.style("✓", fg=SUCCESS_COLOR) if not op['failed_files'] else click.style("✗", fg=ERROR_COLOR)
+ click.echo(f"{format_timestamp(op['timestamp'])} | {op['operation_type']} | {op['path'] or '(root)'} | {op['environment'] or '-'} | {op['duration_ms']}ms | {status}")
+ else:
+ click.echo(click.style(f"\nOperation: {op['operation_type']}", fg=INFO_COLOR))
+ click.echo(f"Timestamp: {format_timestamp(op['timestamp'])}")
+ click.echo(f"Path: {op['path'] or '(root)'}")
+ if op['environment']:
+ click.echo(f"Environment: {op['environment']}")
+ click.echo(f"Duration: {op['duration_ms']}ms")
+ if op['successful_files']:
+ click.echo(click.style(f"Successfully {op['operation_type']}ed {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
+ if op['failed_files']:
+ click.echo(click.style(f"Failed to {op['operation_type']}ed {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
+ if op['error']:
+ click.echo(click.style(f"Error: {op['error']}", fg=ERROR_COLOR))
+ click.echo(click.style("----------------------", fg=INFO_COLOR))
+
+if __name__ == "__main__":
+ cli()
\ No newline at end of file
diff --git a/src/humanloop/cli/progress.py b/src/humanloop/cli/progress.py
new file mode 100644
index 00000000..67ef4506
--- /dev/null
+++ b/src/humanloop/cli/progress.py
@@ -0,0 +1,120 @@
+import sys
+import time
+from typing import Optional, Callable, Any
+from threading import Thread, Event
+from contextlib import contextmanager
+
+class Spinner:
+ """A simple terminal spinner for indicating progress."""
+
+ def __init__(
+ self,
+ message: str = "Loading...",
+ delay: float = 0.1,
+ spinner_chars: str = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
+ ):
+ self.message = message
+ self.delay = delay
+ self.spinner_chars = spinner_chars
+ self.stop_event = Event()
+ self.spinner_thread: Optional[Thread] = None
+
+ def _spin(self):
+ """The actual spinner animation."""
+ i = 0
+ while not self.stop_event.is_set():
+ sys.stdout.write(f"\r{self.spinner_chars[i]} {self.message}")
+ sys.stdout.flush()
+ i = (i + 1) % len(self.spinner_chars)
+ time.sleep(self.delay)
+
+ def start(self):
+ """Start the spinner animation."""
+ self.stop_event.clear()
+ self.spinner_thread = Thread(target=self._spin)
+ self.spinner_thread.daemon = True
+ self.spinner_thread.start()
+
+ def stop(self, final_message: Optional[str] = None):
+ """Stop the spinner and optionally display a final message."""
+ if self.spinner_thread is None:
+ return
+
+ self.stop_event.set()
+ self.spinner_thread.join()
+
+ # Clear the spinner line
+ sys.stdout.write("\r" + " " * (len(self.message) + 2) + "\r")
+
+ if final_message:
+ print(final_message)
+ sys.stdout.flush()
+
+ def update_message(self, message: str):
+ """Update the spinner message."""
+ self.message = message
+
+class ProgressTracker:
+ """A simple progress tracker that shows percentage completion."""
+
+ def __init__(
+ self,
+ total: int,
+ message: str = "Progress",
+ width: int = 40
+ ):
+ self.total = total
+ self.current = 0
+ self.message = message
+ self.width = width
+ self.start_time = time.time()
+
+ def update(self, increment: int = 1):
+ """Update the progress."""
+ self.current += increment
+ self._display()
+
+ def _display(self):
+ """Display the current progress."""
+ percentage = (self.current / self.total) * 100
+ filled = int(self.width * self.current / self.total)
+ bar = "█" * filled + "░" * (self.width - filled)
+
+ elapsed = time.time() - self.start_time
+ if self.current > 0:
+ rate = elapsed / self.current
+ eta = rate * (self.total - self.current)
+ time_str = f"ETA: {eta:.1f}s"
+ else:
+ time_str = "Calculating..."
+
+ sys.stdout.write(f"\r{self.message}: [{bar}] {percentage:.1f}% {time_str}")
+ sys.stdout.flush()
+
+ def finish(self, final_message: Optional[str] = None):
+ """Complete the progress bar and optionally show a final message."""
+ self._display()
+ print() # New line
+ if final_message:
+ print(final_message)
+
+@contextmanager
+def progress_context(message: str = "Loading...", success_message: str | None = None, error_message: str | None = None):
+ """Context manager for showing a spinner during an operation."""
+ spinner = Spinner(message)
+ spinner.start()
+ try:
+ yield spinner
+ spinner.stop(success_message)
+ except Exception as e:
+ spinner.stop(error_message)
+ raise
+
+def with_progress(message: str = "Loading..."):
+ """Decorator to add a spinner to a function."""
+ def decorator(func: Callable):
+ def wrapper(*args, **kwargs):
+ with progress_context(message) as spinner:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
\ No newline at end of file
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 74cd6c97..996b75ad 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -18,7 +18,7 @@
)
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log
+from humanloop.overload import overload_call, overload_log, overload_with_local_files
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -29,6 +29,7 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
+from humanloop.sync.sync_client import SyncClient, DEFAULT_CACHE_SIZE
class ExtendedEvalsClient(EvaluationsClient):
@@ -87,8 +88,9 @@ class Humanloop(BaseHumanloop):
"""
See docstring of :class:`BaseHumanloop`.
- This class extends the base client with custom evaluation utilities
- and decorators for declaring Files in code.
+ This class extends the base client with custom evaluation utilities,
+ decorators for declaring Files in code, and utilities for syncing
+ files between Humanloop and local filesystem.
"""
def __init__(
@@ -102,6 +104,9 @@ def __init__(
httpx_client: typing.Optional[httpx.Client] = None,
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
+ use_local_files: bool = False,
+ files_directory: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
):
"""
Extends the base client with custom evaluation utilities and
@@ -111,6 +116,21 @@ def __init__(
You can provide a TracerProvider and a Tracer to integrate
with your existing telemetry system. If not provided,
an internal TracerProvider will be used.
+
+ Parameters
+ ----------
+ base_url: Optional base URL for the API
+ environment: The environment to use (default: DEFAULT)
+ api_key: Your Humanloop API key (default: from HUMANLOOP_API_KEY env var)
+ timeout: Optional timeout for API requests
+ follow_redirects: Whether to follow redirects
+ httpx_client: Optional custom httpx client
+ opentelemetry_tracer_provider: Optional tracer provider for telemetry
+ opentelemetry_tracer: Optional tracer for telemetry
+ use_local_files: Whether to use local files for prompts and agents
+ files_directory: Directory for local files (default: "humanloop")
+ cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
+ This parameter has no effect if use_local_files is False.
"""
super().__init__(
base_url=base_url,
@@ -121,6 +141,12 @@ def __init__(
httpx_client=httpx_client,
)
+ self.use_local_files = use_local_files
+ self._sync_client = SyncClient(
+ client=self,
+ base_dir=files_directory,
+ cache_size=cache_size
+ )
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -130,6 +156,16 @@ def __init__(
# and the @flow decorator providing the trace_id
self.prompts = overload_log(client=self.prompts)
self.prompts = overload_call(client=self.prompts)
+ self.prompts = overload_with_local_files(
+ client=self.prompts,
+ sync_client=self._sync_client,
+ use_local_files=self.use_local_files
+ )
+ self.agents = overload_with_local_files(
+ client=self.agents,
+ sync_client=self._sync_client,
+ use_local_files=self.use_local_files
+ )
self.flows = overload_log(client=self.flows)
self.tools = overload_log(client=self.tools)
@@ -351,7 +387,50 @@ def agent():
attributes=attributes,
)
+ def pull(self,
+ environment: str | None = None,
+ path: str | None = None
+ ) -> List[str]:
+ """Pull Prompt and Agent files from Humanloop to local filesystem.
+
+ This method will:
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
+ 2. Save them to the local filesystem using the client's files_directory (set during initialization)
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ The path parameter can be used in two ways:
+ - If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory will be pulled
+ - If no path is provided, all Prompt and Agent files will be pulled
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped.
+
+ The files will be saved with the following structure:
+ ```
+ {files_directory}/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+ ```
+
+ :param environment: The environment to pull the files from.
+ :param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
+ If not provided, all Prompt and Agent files will be pulled.
+ :return: List of successfully processed file paths.
+ """
+ return self._sync_client.pull(
+ environment=environment,
+ path=path
+ )
+
class AsyncHumanloop(AsyncBaseHumanloop):
"""
See docstring of AsyncBaseHumanloop.
@@ -359,4 +438,4 @@ class AsyncHumanloop(AsyncBaseHumanloop):
TODO: Add custom evaluation utilities for async case.
"""
- pass
+ pass
\ No newline at end of file
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 26cb2465..84a95dda 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -41,6 +41,7 @@ def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
+ version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -69,6 +70,9 @@ def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
+ version_status : typing.Optional[VersionStatus]
+ If provided, only Logs belonging to Versions with the specified status will be returned.
+
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -130,6 +134,7 @@ def list(
"page": page,
"size": size,
"version_id": version_id,
+ "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -157,6 +162,7 @@ def list(
page=page + 1,
size=size,
version_id=version_id,
+ version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
@@ -274,6 +280,7 @@ async def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
+ version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -302,6 +309,9 @@ async def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
+ version_status : typing.Optional[VersionStatus]
+ If provided, only Logs belonging to Versions with the specified status will be returned.
+
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -371,6 +381,7 @@ async def main() -> None:
"page": page,
"size": size,
"version_id": version_id,
+ "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -398,6 +409,7 @@ async def main() -> None:
page=page + 1,
size=size,
version_id=version_id,
+ version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index b0c83215..bd409236 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,19 +1,23 @@
import inspect
import logging
import types
+import warnings
from typing import TypeVar, Union
-
+from pathlib import Path
from humanloop.context import (
get_decorator_context,
get_evaluation_context,
get_trace_id,
)
-from humanloop.evals.run import HumanloopRuntimeError
+from humanloop.error import HumanloopRuntimeError
from humanloop.evaluators.client import EvaluatorsClient
from humanloop.flows.client import FlowsClient
from humanloop.prompts.client import PromptsClient
+from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
+from humanloop.sync.sync_client import SyncClient
+from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
@@ -74,7 +78,7 @@ def _overload_log(
try:
response = self._log(**kwargs_eval)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
if eval_callback is not None:
eval_callback(response.id)
@@ -82,7 +86,7 @@ def _overload_log(
try:
response = self._log(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -114,7 +118,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
try:
response = self._call(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -122,3 +126,97 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
# Replace the original log method with the overloaded one
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
+
+def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> FileType:
+ """Get the file type based on the client type."""
+ if isinstance(client, PromptsClient):
+ return "prompt"
+ elif isinstance(client, AgentsClient):
+ return "agent"
+ else:
+ raise ValueError(f"Unsupported client type: {type(client)}")
+
+def overload_with_local_files(
+ client: Union[PromptsClient, AgentsClient],
+ sync_client: SyncClient,
+ use_local_files: bool,
+) -> Union[PromptsClient, AgentsClient]:
+ """Overload call and log methods to handle local files when use_local_files is True.
+
+ When use_local_files is True, the following prioritization strategy is used:
+ 1. Direct Parameters: If {file_type} parameters are provided directly (as a PromptKernelRequestParams or AgentKernelRequestParams object),
+ these take precedence and the local file is ignored.
+ 2. Version/Environment: If version_id or environment is specified, the remote version is used instead
+ of the local file.
+ 3. Local File: If neither of the above are specified, attempts to use the local file at the given path.
+
+ For example, with a prompt client:
+ - If prompt={model: "gpt-4", ...} is provided, uses those parameters directly
+ - If version_id="123" is provided, uses that remote version
+ - Otherwise, tries to load from the local file at the given path
+
+ Args:
+ client: The client to overload (PromptsClient or AgentsClient)
+ sync_client: The sync client used for file operations
+ use_local_files: Whether to enable local file handling
+
+ Returns:
+ The client with overloaded methods
+
+ Raises:
+ HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
+ """
+ original_call = client._call if hasattr(client, '_call') else client.call
+ original_log = client._log if hasattr(client, '_log') else client.log
+ file_type = _get_file_type_from_client(client)
+
+ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
+ if "id" in kwargs and "path" in kwargs:
+ raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
+ # Handle local files if enabled
+ if use_local_files and "path" in kwargs:
+ # Check if version_id or environment is specified
+ has_version_info = "version_id" in kwargs or "environment" in kwargs
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+
+ if has_version_info:
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as version_id or environment was specified. "
+ "Using remote version instead."
+ )
+ else:
+ # Only use local file if no version info is specified
+ try:
+ # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
+ if file_type in kwargs and not isinstance(kwargs[file_type], str):
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
+ "Using provided parameters instead."
+ )
+ else:
+ file_content = sync_client.get_file_content(normalized_path, file_type)
+ kwargs[file_type] = file_content
+ except (HumanloopRuntimeError) as e:
+ # Re-raise with more context
+ raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
+
+ try:
+ if function_name == "call":
+ return original_call(**kwargs)
+ elif function_name == "log":
+ return original_log(**kwargs)
+ else:
+ raise ValueError(f"Unsupported function name: {function_name}")
+ except Exception as e:
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
+ raise HumanloopRuntimeError from e
+
+ def _overload_call(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "call", **kwargs)
+
+ def _overload_log(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "log", **kwargs)
+
+ client.call = types.MethodType(_overload_call, client)
+ client.log = types.MethodType(_overload_log, client)
+ return client
\ No newline at end of file
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index fb1580df..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -5,10 +5,10 @@
from .agent_call_stream_response import AgentCallStreamResponseParams
from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
-from .agent_continue_call_response import AgentContinueCallResponseParams
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
-from .agent_continue_call_stream_response import AgentContinueCallStreamResponseParams
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
from .agent_inline_tool import AgentInlineToolParams
from .agent_kernel_request import AgentKernelRequestParams
from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
@@ -178,10 +178,10 @@
"AgentCallStreamResponseParams",
"AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
- "AgentContinueCallResponseParams",
- "AgentContinueCallResponseToolChoiceParams",
- "AgentContinueCallStreamResponseParams",
- "AgentContinueCallStreamResponsePayloadParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
"AgentInlineToolParams",
"AgentKernelRequestParams",
"AgentKernelRequestReasoningEffortParams",
diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py
deleted file mode 100644
index 90938dea..00000000
--- a/src/humanloop/requests/agent_continue_call_response.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
-import typing
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
-from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-from .evaluator_log_response import EvaluatorLogResponseParams
-from .log_response import LogResponseParams
-
-
-class AgentContinueCallResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing_extensions.NotRequired[int]
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing_extensions.NotRequired[str]
- """
- Reason the generation finished.
- """
-
- messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing_extensions.NotRequired[AgentContinueCallResponseToolChoiceParams]
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponseParams
- """
- Agent that generated the Log.
- """
-
- start_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event started.
- """
-
- end_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event ended.
- """
-
- output: typing_extensions.NotRequired[str]
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing_extensions.NotRequired[dt.datetime]
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing_extensions.NotRequired[str]
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing_extensions.NotRequired[float]
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing_extensions.NotRequired[str]
- """
- Captured log and debug statements.
- """
-
- provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw request sent to provider.
- """
-
- provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw response received the provider.
- """
-
- inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- The inputs passed to the prompt template.
- """
-
- source: typing_extensions.NotRequired[str]
- """
- Identifies where the model was called from.
- """
-
- metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Any additional metadata to record.
- """
-
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing_extensions.NotRequired[str]
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing_extensions.NotRequired[typing.Sequence[str]]
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing_extensions.NotRequired[str]
- """
- End-user ID related to the Log.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing_extensions.NotRequired[bool]
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing_extensions.NotRequired[str]
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
deleted file mode 100644
index 4722dd2e..00000000
--- a/src/humanloop/requests/agent_continue_call_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoiceParams
-
-AgentContinueCallResponseToolChoiceParams = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
-]
diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py
deleted file mode 100644
index 3eb2b498..00000000
--- a/src/humanloop/requests/agent_continue_call_stream_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
-from ..types.event_type import EventType
-import datetime as dt
-
-
-class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing_extensions.NotRequired[AgentContinueCallStreamResponsePayloadParams]
- type: EventType
- created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
deleted file mode 100644
index 87e1562b..00000000
--- a/src/humanloop/requests/agent_continue_call_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponseParams
-from .log_response import LogResponseParams
-from .tool_call import ToolCallParams
-
-AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
new file mode 100644
index 00000000..007659df
--- /dev/null
+++ b/src/humanloop/sync/__init__.py
@@ -0,0 +1,3 @@
+from humanloop.sync.sync_client import SyncClient
+
+__all__ = ["SyncClient"]
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
new file mode 100644
index 00000000..18de2e8a
--- /dev/null
+++ b/src/humanloop/sync/metadata_handler.py
@@ -0,0 +1,125 @@
+import json
+import time
+from datetime import datetime
+from pathlib import Path
+from typing import Dict, List, Optional, TypedDict, NotRequired
+import logging
+
+logger = logging.getLogger(__name__)
+
+class OperationData(TypedDict):
+ """Type definition for operation data structure."""
+ timestamp: str
+ operation_type: str
+ path: str
+ environment: NotRequired[Optional[str]]
+ successful_files: List[str]
+ failed_files: List[str]
+ error: NotRequired[Optional[str]]
+ duration_ms: float
+
+class Metadata(TypedDict):
+ """Type definition for the metadata structure."""
+ last_operation: Optional[OperationData]
+ history: List[OperationData]
+
+class MetadataHandler:
+ """Handles metadata storage and retrieval for sync operations.
+
+ This class manages a JSON file that stores the last 5 sync operations
+ and maintains a record of the most recent operation with detailed information.
+ """
+
+ def __init__(self, base_dir: Path, max_history: int = 5) -> None:
+ """Initialize the metadata handler.
+
+ Args:
+ base_dir: Base directory where metadata will be stored
+ max_history: Maximum number of operations to keep in history
+ """
+ self.base_dir = base_dir
+ self.metadata_file = base_dir / ".sync_metadata.json"
+ self.max_history = max_history
+ self._ensure_metadata_file()
+
+ def _ensure_metadata_file(self) -> None:
+ """Ensure the metadata file exists with proper structure."""
+ if not self.metadata_file.exists():
+ initial_data: Metadata = {
+ "last_operation": None,
+ "history": []
+ }
+ self._write_metadata(initial_data)
+
+ def _read_metadata(self) -> Metadata:
+ """Read the current metadata from file."""
+ try:
+ with open(self.metadata_file, 'r') as f:
+ return json.load(f)
+ except Exception as e:
+ logger.error(f"Error reading metadata file: {e}")
+ return {"last_operation": None, "history": []}
+
+ def _write_metadata(self, data: Metadata) -> None:
+ """Write metadata to file."""
+ try:
+ self.metadata_file.parent.mkdir(parents=True, exist_ok=True)
+ with open(self.metadata_file, 'w') as f:
+ json.dump(data, f, indent=2)
+ except Exception as e:
+ logger.error(f"Error writing metadata file: {e}")
+
+ def log_operation(
+ self,
+ operation_type: str,
+ path: str,
+ duration_ms: float,
+ environment: Optional[str] = None,
+ successful_files: Optional[List[str]] = None,
+ failed_files: Optional[List[str]] = None,
+ error: Optional[str] = None,
+ ) -> None:
+ """Log a sync operation.
+
+ Args:
+ operation_type: Type of operation (e.g., "pull", "push")
+ path: The path that was synced
+ duration_ms: Duration of the operation in milliseconds
+ environment: Optional environment name
+ successful_files: List of successfully processed files
+ failed_files: List of files that failed to process
+ error: Any error message if the operation failed
+ """
+ current_time = datetime.now().isoformat()
+
+ operation_data: OperationData = {
+ "timestamp": current_time,
+ "operation_type": operation_type,
+ "path": path,
+ "environment": environment,
+ "successful_files": successful_files or [],
+ "failed_files": failed_files or [],
+ "error": error,
+ "duration_ms": duration_ms
+ }
+
+ metadata = self._read_metadata()
+
+ # Update last operation
+ metadata["last_operation"] = operation_data
+
+ # Update history
+ metadata["history"].insert(0, operation_data)
+ metadata["history"] = metadata["history"][:self.max_history]
+
+ self._write_metadata(metadata)
+
+ def get_last_operation(self) -> Optional[OperationData]:
+ """Get the most recent operation details."""
+ metadata = self._read_metadata()
+ return metadata.get("last_operation")
+
+ def get_history(self) -> List[OperationData]:
+ """Get the operation history."""
+ metadata = self._read_metadata()
+ return metadata.get("history", [])
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
new file mode 100644
index 00000000..6b16cde0
--- /dev/null
+++ b/src/humanloop/sync/sync_client.py
@@ -0,0 +1,327 @@
+import logging
+from pathlib import Path
+from typing import List, TYPE_CHECKING, Optional
+from functools import lru_cache
+from humanloop.types import FileType
+from .metadata_handler import MetadataHandler
+import time
+from humanloop.error import HumanloopRuntimeError
+import json
+
+if TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Default cache size for file content caching
+DEFAULT_CACHE_SIZE = 100
+
+def format_api_error(error: Exception) -> str:
+ """Format API error messages to be more user-friendly."""
+ error_msg = str(error)
+ if "status_code" not in error_msg or "body" not in error_msg:
+ return error_msg
+
+ try:
+ # Extract the body part and parse as JSON
+ body_str = error_msg.split("body: ")[1]
+ # Convert Python dict string to valid JSON by replacing single quotes with double quotes
+ body_str = body_str.replace("'", '"')
+ body = json.loads(body_str)
+
+ # Get the detail from the body
+ detail = body.get("detail", {})
+
+ # Prefer description, fall back to msg
+ return detail.get("description") or detail.get("msg") or error_msg
+ except Exception as e:
+ logger.debug(f"Failed to parse error message: {str(e)}")
+ return error_msg
+
+class SyncClient:
+ """Client for managing synchronization between local filesystem and Humanloop.
+
+ This client provides file synchronization between Humanloop and the local filesystem,
+ with built-in caching for improved performance. The cache uses Python's LRU (Least
+ Recently Used) cache to automatically manage memory usage by removing least recently
+ accessed files when the cache is full.
+
+ The cache is automatically updated when files are pulled or saved, and can be
+ manually cleared using the clear_cache() method.
+ """
+
+ def __init__(
+ self,
+ client: "BaseHumanloop",
+ base_dir: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
+ log_level: int = logging.WARNING
+ ):
+ """
+ Parameters
+ ----------
+ client: Humanloop client instance
+ base_dir: Base directory for synced files (default: "humanloop")
+ cache_size: Maximum number of files to cache (default: DEFAULT_CACHE_SIZE)
+ log_level: Log level for logging (default: WARNING)
+ """
+ self.client = client
+ self.base_dir = Path(base_dir)
+ self._cache_size = cache_size
+
+ global logger
+ logger.setLevel(log_level)
+
+ # Create a new cached version of get_file_content with the specified cache size
+ self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
+ # Initialize metadata handler
+ self.metadata = MetadataHandler(self.base_dir)
+
+ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
+ """Implementation of get_file_content without the cache decorator.
+
+ This is the actual implementation that gets wrapped by lru_cache.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (Prompt or Agent)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
+ """
+ # Construct path to local file
+ local_path = self.base_dir / path
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if not local_path.exists():
+ raise HumanloopRuntimeError(f"Local file not found: {local_path}")
+
+ try:
+ # Read the raw file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
+
+ def get_file_content(self, path: str, file_type: FileType) -> str:
+ """Get the raw file content of a file from cache or filesystem.
+
+ This method uses an LRU cache to store file contents. When the cache is full,
+ the least recently accessed files are automatically removed to make space.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (Prompt or Agent)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
+ """
+ return self._get_file_content_impl(path, file_type)
+
+ def clear_cache(self) -> None:
+ """Clear the LRU cache."""
+ self.get_file_content.cache_clear()
+
+ def _normalize_path(self, path: str) -> str:
+ """Normalize the path by:
+ 1. Removing any file extensions (.prompt, .agent)
+ 2. Converting backslashes to forward slashes
+ 3. Removing leading and trailing slashes
+ 4. Removing leading and trailing whitespace
+ 5. Normalizing multiple consecutive slashes into a single forward slash
+
+ Args:
+ path: The path to normalize
+
+ Returns:
+ The normalized path
+ """
+ # Remove any file extensions
+ path = path.rsplit('.', 1)[0] if '.' in path else path
+
+ # Convert backslashes to forward slashes and normalize multiple slashes
+ path = path.replace('\\', '/')
+
+ # Remove leading/trailing whitespace and slashes
+ path = path.strip().strip('/')
+
+ # Normalize multiple consecutive slashes into a single forward slash
+ while '//' in path:
+ path = path.replace('//', '/')
+
+ return path
+
+ def is_file(self, path: str) -> bool:
+ """Check if the path is a file by checking for .prompt or .agent extension."""
+ return path.endswith('.prompt') or path.endswith('.agent')
+
+ def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
+ """Save serialized file to local filesystem."""
+ try:
+ # Create full path including base_dir prefix
+ full_path = self.base_dir / file_path
+ # Create directory if it doesn't exist
+ full_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Add file type extension
+ new_path = full_path.parent / f"{full_path.stem}.{file_type}"
+
+ # Write raw file content to file
+ with open(new_path, "w") as f:
+ f.write(serialized_content)
+
+ # Clear the cache for this file to ensure we get fresh content next time
+ self.clear_cache()
+ except Exception as e:
+ logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
+ raise
+
+ def _pull_file(self, path: str, environment: str | None = None) -> None:
+ """Pull a specific file from Humanloop to local filesystem."""
+ file = self.client.files.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=True
+ )
+
+ if file.type not in ["prompt", "agent"]:
+ raise ValueError(f"Unsupported file type: {file.type}")
+
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
+
+ def _pull_directory(self,
+ path: str | None = None,
+ environment: str | None = None,
+ ) -> List[str]:
+ """Sync Prompt and Agent files from Humanloop to local filesystem."""
+ successful_files = []
+ failed_files = []
+ page = 1
+
+ logger.debug(f"Fetching files from directory: {path or '(root)'} in environment: {environment or '(default)'}")
+
+ while True:
+ try:
+ logger.debug(f"Requesting page {page} of files")
+ response = self.client.files.list_files(
+ type=["prompt", "agent"],
+ page=page,
+ include_raw_file_content=True,
+ environment=environment,
+ path=path
+ )
+
+ if len(response.records) == 0:
+ logger.debug("No more files found")
+ break
+
+ logger.debug(f"Found {len(response.records)} files from page {page}")
+
+ # Process each file
+ for file in response.records:
+ # Skip if not a Prompt or Agent
+ if file.type not in ["prompt", "agent"]:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ continue
+
+ # Skip if no raw file content
+ if not getattr(file, "raw_file_content", None):
+ logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
+ continue
+
+ try:
+ logger.debug(f"Saving {file.type} {file.path}")
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ successful_files.append(file.path)
+ except Exception as e:
+ failed_files.append(file.path)
+ logger.error(f"Task failed for {file.path}: {str(e)}")
+
+ page += 1
+ except Exception as e:
+ formatted_error = format_api_error(e)
+ raise HumanloopRuntimeError(f"Failed to pull files: {formatted_error}")
+
+ if successful_files:
+ logger.info(f"Successfully pulled {len(successful_files)} files")
+ if failed_files:
+ logger.warning(f"Failed to pull {len(failed_files)} files")
+
+ return successful_files
+
+ def pull(self, path: str | None = None, environment: str | None = None) -> List[str]:
+ """Pull files from Humanloop to local filesystem.
+
+ If the path ends with .prompt or .agent, pulls that specific file.
+ Otherwise, pulls all files under the specified path.
+ If no path is provided, pulls all files from the root.
+
+ Args:
+ path: The path to pull from (either a specific file or directory)
+ environment: The environment to pull from
+
+ Returns:
+ List of successfully processed file paths
+ """
+ start_time = time.time()
+ normalized_path = self._normalize_path(path) if path else None
+
+ logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
+ try:
+ if path is None:
+ # Pull all files from the root
+ logger.debug("Pulling all files from root")
+ successful_files = self._pull_directory(None, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+ else:
+ if self.is_file(path.strip()):
+ logger.debug(f"Pulling specific file: {normalized_path}")
+ self._pull_file(normalized_path, environment)
+ successful_files = [path]
+ failed_files = []
+ else:
+ logger.debug(f"Pulling directory: {normalized_path}")
+ successful_files = self._pull_directory(normalized_path, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
+ # Log the successful operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=normalized_path or "", # Use empty string if path is None
+ environment=environment,
+ successful_files=successful_files,
+ failed_files=failed_files,
+ duration_ms=duration_ms
+ )
+
+ return successful_files
+ except Exception as e:
+ duration_ms = int((time.time() - start_time) * 1000)
+ # Log the failed operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=normalized_path or "", # Use empty string if path is None
+ environment=environment,
+ error=str(e),
+ duration_ms=duration_ms
+ )
+ raise
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 7d863134..7814f611 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -5,10 +5,10 @@
from .agent_call_stream_response import AgentCallStreamResponse
from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
-from .agent_continue_call_response import AgentContinueCallResponse
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
-from .agent_continue_call_stream_response import AgentContinueCallStreamResponse
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
from .agent_inline_tool import AgentInlineTool
from .agent_kernel_request import AgentKernelRequest
from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
@@ -212,10 +212,10 @@
"AgentCallStreamResponse",
"AgentCallStreamResponsePayload",
"AgentConfigResponse",
- "AgentContinueCallResponse",
- "AgentContinueCallResponseToolChoice",
- "AgentContinueCallStreamResponse",
- "AgentContinueCallStreamResponsePayload",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
"AgentInlineTool",
"AgentKernelRequest",
"AgentKernelRequestReasoningEffort",
diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py
deleted file mode 100644
index c98af953..00000000
--- a/src/humanloop/types/agent_continue_call_response.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
-from .chat_message import ChatMessage
-import pydantic
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
-import datetime as dt
-from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AgentContinueCallResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing.Optional[str] = pydantic.Field(default=None)
- """
- Reason the generation finished.
- """
-
- messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing.Optional[AgentContinueCallResponseToolChoice] = pydantic.Field(default=None)
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponse = pydantic.Field()
- """
- Agent that generated the Log.
- """
-
- start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event started.
- """
-
- end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event ended.
- """
-
- output: typing.Optional[str] = pydantic.Field(default=None)
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing.Optional[str] = pydantic.Field(default=None)
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing.Optional[float] = pydantic.Field(default=None)
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing.Optional[str] = pydantic.Field(default=None)
- """
- Captured log and debug statements.
- """
-
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw request sent to provider.
- """
-
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw response received the provider.
- """
-
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- The inputs passed to the prompt template.
- """
-
- source: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifies where the model was called from.
- """
-
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Any additional metadata to record.
- """
-
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing.Optional[str] = pydantic.Field(default=None)
- """
- End-user ID related to the Log.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str = pydantic.Field()
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py
deleted file mode 100644
index 5b90e98d..00000000
--- a/src/humanloop/types/agent_continue_call_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoice
-
-AgentContinueCallResponseToolChoice = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
-]
diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py
deleted file mode 100644
index cdd34dce..00000000
--- a/src/humanloop/types/agent_continue_call_stream_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
-from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-
-
-class AgentContinueCallStreamResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing.Optional[AgentContinueCallStreamResponsePayload] = None
- type: EventType
- created_at: dt.datetime
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py
deleted file mode 100644
index 8e23829b..00000000
--- a/src/humanloop/types/agent_continue_call_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponse
-from .log_response import LogResponse
-from .tool_call import ToolCall
-
-AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/tests/conftest.py b/tests/conftest.py
index 80e3b336..a8c78ac5 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -191,8 +191,14 @@ def api_keys() -> APIKeys:
@pytest.fixture(scope="session")
-def humanloop_client(api_keys: APIKeys) -> Humanloop:
- return Humanloop(api_key=api_keys.humanloop)
+def humanloop_client(request, api_keys: APIKeys) -> Humanloop:
+ """Create a Humanloop client for testing."""
+ use_local_files = getattr(request, "param", False)
+ return Humanloop(
+ api_key=api_keys.humanloop,
+ base_url="http://localhost:80/v5",
+ use_local_files=use_local_files
+ )
@pytest.fixture(scope="session", autouse=True)
@@ -214,8 +220,8 @@ def directory_cleanup(directory_id: str, humanloop_client: Humanloop):
client = humanloop_client.evaluators # type: ignore [assignment]
elif file.type == "flow":
client = humanloop_client.flows # type: ignore [assignment]
- else:
- raise NotImplementedError(f"Unknown HL file type {file.type}")
+ elif file.type == "agent":
+ client = humanloop_client.agents # type: ignore [assignment]
client.delete(file_id)
for subdirectory in response.subdirectories:
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
new file mode 100644
index 00000000..a2441b4b
--- /dev/null
+++ b/tests/sync/test_sync.py
@@ -0,0 +1,267 @@
+from typing import List, NamedTuple, Union
+from pathlib import Path
+import pytest
+from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
+from humanloop.error import HumanloopRuntimeError
+
+
+class SyncableFile(NamedTuple):
+ path: str
+ type: FileType
+ model: str
+ id: str = ""
+ version_id: str = ""
+
+
+@pytest.fixture
+def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[SyncableFile]:
+ """Creates a predefined structure of files in Humanloop for testing sync"""
+ files: List[SyncableFile] = [
+ SyncableFile(
+ path="prompts/gpt-4",
+ type="prompt",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="prompts/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="prompts/nested/complex/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="agents/gpt-4",
+ type="agent",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="agents/gpt-4o",
+ type="agent",
+ model="gpt-4o",
+ ),
+ ]
+
+ # Create the files in Humanloop
+ created_files = []
+ for file in files:
+ full_path = get_test_path(file.path)
+ response: Union[AgentResponse, PromptResponse]
+ if file.type == "prompt":
+ response = humanloop_client.prompts.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ elif file.type == "agent":
+ response = humanloop_client.agents.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ created_files.append(SyncableFile(
+ path=full_path,
+ type=file.type,
+ model=file.model,
+ id=response.id,
+ version_id=response.version_id
+ ))
+
+ return created_files
+
+
+@pytest.fixture
+def cleanup_local_files():
+ """Cleanup any locally synced files after tests"""
+ yield
+ # Clean up the local humanloop directory after tests
+ local_dir = Path("humanloop")
+ if local_dir.exists():
+ import shutil
+
+ shutil.rmtree(local_dir)
+
+
+def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that humanloop.sync() correctly syncs remote files to local filesystem"""
+ # Run the sync
+ successful_files = humanloop_client.pull()
+
+ # Verify each file was synced correctly
+ for file in test_file_structure:
+ # Get the extension based on file type: .prompt, .agent
+ extension = f".{file.type}"
+
+ # The local path should mirror the remote path structure
+ local_path = Path("humanloop") / f"{file.path}{extension}"
+
+ # Basic assertions
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Verify it's not empty
+ content = local_path.read_text()
+ assert content, f"File at {local_path} should not be empty"
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test using the pulled files
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test call with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(path="invalid/path")
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(path="invalid/path")
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files for log operations.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test logging using the pulled files
+
+ :param humanloop_client: The Humanloop client with local files enabled
+ :param test_file_structure: List of test files created in remote
+ :param cleanup_local_files: Fixture to clean up local files after test
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test log with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_version_environment_handling(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles version_id and environment parameters.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test that version_id/environment parameters cause remote usage with warning
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test with version_id - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with both version_id and environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
\ No newline at end of file