From 7fc99002052109c64a9dd1c9739f8fb8560e7a8e Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 18:51:24 +0000 Subject: [PATCH 1/2] Release 0.8.30 --- poetry.lock | 290 +++++++++--------- pyproject.toml | 2 +- reference.md | 14 +- src/humanloop/core/client_wrapper.py | 2 +- src/humanloop/flows/client.py | 8 +- src/humanloop/prompts/client.py | 20 +- .../requests/prompt_call_response.py | 2 +- src/humanloop/requests/prompt_log_response.py | 2 +- src/humanloop/types/prompt_call_response.py | 2 +- src/humanloop/types/prompt_log_response.py | 2 +- 10 files changed, 172 insertions(+), 172 deletions(-) diff --git a/poetry.lock b/poetry.lock index 35b2ec1d..f1d5d785 100644 --- a/poetry.lock +++ b/poetry.lock @@ -190,13 +190,13 @@ files = [ [[package]] name = "cohere" -version = "5.14.0" +version = "5.14.2" description = "" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "cohere-5.14.0-py3-none-any.whl", hash = "sha256:286b4ff66f9a59c06a30b8819fb8b2aee3354dc2f7dc83f19519da771e598a5e"}, - {file = "cohere-5.14.0.tar.gz", hash = "sha256:fdbf2d4c54049b74c8f79ff8ed6fd149c4c2055d3b5eae57b9c70716e65c78fd"}, + {file = "cohere-5.14.2-py3-none-any.whl", hash = "sha256:fe2cbbf6c79fba21a66731d387647b981ab5ea6dbcfb09beb85386e96695bd64"}, + {file = "cohere-5.14.2.tar.gz", hash = "sha256:5aaf5a70e619ade2bb991b12f573fd4cc9bd1f3097f0f67acd973d060a7e86c6"}, ] [package.dependencies] @@ -384,13 +384,13 @@ tqdm = ["tqdm"] [[package]] name = "groq" -version = "0.21.0" +version = "0.22.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" files = [ - {file = "groq-0.21.0-py3-none-any.whl", hash = "sha256:ab1cb6bf4fb4e4f59fae0bc2337295b2b8b4335d8d5b8148a4d0ca26490a16b3"}, - {file = "groq-0.21.0.tar.gz", hash = "sha256:0a94920d9599c02a46f80c207eb7e3ab5dbf415790661e4b91216c39ba1089d0"}, + {file = "groq-0.22.0-py3-none-any.whl", hash = "sha256:f53d3966dff713aaa635671c2d075ebb932b0d48e3c4031ede9b84a2a6694c79"}, + {file = "groq-0.22.0.tar.gz", hash = "sha256:9d090fbe4a051655faff649890d18aaacb3121393ad9d55399171fe081f1057b"}, ] [package.dependencies] @@ -470,13 +470,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.30.1" +version = "0.30.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.30.1-py3-none-any.whl", hash = "sha256:0f6aa5ec5a4e68e5b9e45d556b4e5ea180c58f5a5ffa734e7f38c9d573028959"}, - {file = "huggingface_hub-0.30.1.tar.gz", hash = "sha256:f379e8b8d0791295602538856638460ae3cf679c7f304201eb80fb98c771950e"}, + {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"}, + {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"}, ] [package.dependencies] @@ -873,13 +873,13 @@ files = [ [[package]] name = "openai" -version = "1.70.0" +version = "1.72.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.70.0-py3-none-any.whl", hash = "sha256:f6438d053fd8b2e05fd6bef70871e832d9bbdf55e119d0ac5b92726f1ae6f614"}, - {file = "openai-1.70.0.tar.gz", hash = "sha256:e52a8d54c3efeb08cf58539b5b21a5abef25368b5432965e4de88cdf4e091b2b"}, + {file = "openai-1.72.0-py3-none-any.whl", hash = "sha256:34f5496ba5c8cb06c592831d69e847e2d164526a2fb92afdc3b5cf2891c328c3"}, + {file = "openai-1.72.0.tar.gz", hash = "sha256:f51de971448905cc90ed5175a5b19e92fd94e31f68cde4025762f9f5257150db"}, ] [package.dependencies] @@ -894,18 +894,18 @@ typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -realtime = ["websockets (>=13,<15)"] +realtime = ["websockets (>=13,<16)"] voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] [[package]] name = "opentelemetry-api" -version = "1.31.1" +version = "1.32.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.31.1-py3-none-any.whl", hash = "sha256:1511a3f470c9c8a32eeea68d4ea37835880c0eed09dd1a0187acc8b1301da0a1"}, - {file = "opentelemetry_api-1.31.1.tar.gz", hash = "sha256:137ad4b64215f02b3000a0292e077641c8611aab636414632a9b9068593b7e91"}, + {file = "opentelemetry_api-1.32.0-py3-none-any.whl", hash = "sha256:15df743c765078611f376037b0d9111ec5c1febf2ec9440cdd919370faa1ce55"}, + {file = "opentelemetry_api-1.32.0.tar.gz", hash = "sha256:2623280c916f9b19cad0aa4280cb171265f19fd2909b0d47e4f06f7c83b02cb5"}, ] [package.dependencies] @@ -914,18 +914,18 @@ importlib-metadata = ">=6.0,<8.7.0" [[package]] name = "opentelemetry-instrumentation" -version = "0.52b1" +version = "0.53b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation-0.52b1-py3-none-any.whl", hash = "sha256:8c0059c4379d77bbd8015c8d8476020efe873c123047ec069bb335e4b8717477"}, - {file = "opentelemetry_instrumentation-0.52b1.tar.gz", hash = "sha256:739f3bfadbbeec04dd59297479e15660a53df93c131d907bb61052e3d3c1406f"}, + {file = "opentelemetry_instrumentation-0.53b0-py3-none-any.whl", hash = "sha256:70600778fd567c9c5fbfca181378ae179c0dec3ff613171707d3d77c360ff105"}, + {file = "opentelemetry_instrumentation-0.53b0.tar.gz", hash = "sha256:f2c21d71a3cdf28c656e3d90d247ee7558fb9b0239b3d9e9190266499dbed9d2"}, ] [package.dependencies] opentelemetry-api = ">=1.4,<2.0" -opentelemetry-semantic-conventions = "0.52b1" +opentelemetry-semantic-conventions = "0.53b0" packaging = ">=18.0" wrapt = ">=1.0.0,<2.0.0" @@ -1036,13 +1036,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3" [[package]] name = "opentelemetry-proto" -version = "1.31.1" +version = "1.32.0" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_proto-1.31.1-py3-none-any.whl", hash = "sha256:1398ffc6d850c2f1549ce355744e574c8cd7c1dba3eea900d630d52c41d07178"}, - {file = "opentelemetry_proto-1.31.1.tar.gz", hash = "sha256:d93e9c2b444e63d1064fb50ae035bcb09e5822274f1683886970d2734208e790"}, + {file = "opentelemetry_proto-1.32.0-py3-none-any.whl", hash = "sha256:f699269dc037e18fba05442580a8682c9fbd0f4c7f5addfed82c44be0c53c5ff"}, + {file = "opentelemetry_proto-1.32.0.tar.gz", hash = "sha256:f8b70ae52f4ef8a4e4c0760e87c9071e07ece2618c080d4839bef44c0156cd44"}, ] [package.dependencies] @@ -1050,34 +1050,34 @@ protobuf = ">=5.0,<6.0" [[package]] name = "opentelemetry-sdk" -version = "1.31.1" +version = "1.32.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.31.1-py3-none-any.whl", hash = "sha256:882d021321f223e37afaca7b4e06c1d8bbc013f9e17ff48a7aa017460a8e7dae"}, - {file = "opentelemetry_sdk-1.31.1.tar.gz", hash = "sha256:c95f61e74b60769f8ff01ec6ffd3d29684743404603df34b20aa16a49dc8d903"}, + {file = "opentelemetry_sdk-1.32.0-py3-none-any.whl", hash = "sha256:ed252d035c22a15536c1f603ca089298daab60850fc2f5ddfa95d95cc1c043ea"}, + {file = "opentelemetry_sdk-1.32.0.tar.gz", hash = "sha256:5ff07fb371d1ab1189fa7047702e2e888b5403c5efcbb18083cae0d5aa5f58d2"}, ] [package.dependencies] -opentelemetry-api = "1.31.1" -opentelemetry-semantic-conventions = "0.52b1" +opentelemetry-api = "1.32.0" +opentelemetry-semantic-conventions = "0.53b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.52b1" +version = "0.53b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.52b1-py3-none-any.whl", hash = "sha256:72b42db327e29ca8bb1b91e8082514ddf3bbf33f32ec088feb09526ade4bc77e"}, - {file = "opentelemetry_semantic_conventions-0.52b1.tar.gz", hash = "sha256:7b3d226ecf7523c27499758a58b542b48a0ac8d12be03c0488ff8ec60c5bae5d"}, + {file = "opentelemetry_semantic_conventions-0.53b0-py3-none-any.whl", hash = "sha256:561da89f766ab51615c0e72b12329e0a1bc16945dbd62c8646ffc74e36a1edff"}, + {file = "opentelemetry_semantic_conventions-0.53b0.tar.gz", hash = "sha256:05b7908e1da62d72f9bf717ed25c72f566fe005a2dd260c61b11e025f2552cf6"}, ] [package.dependencies] deprecated = ">=1.2.6" -opentelemetry-api = "1.31.1" +opentelemetry-api = "1.32.0" [[package]] name = "opentelemetry-semantic-conventions-ai" @@ -1092,13 +1092,13 @@ files = [ [[package]] name = "orderly-set" -version = "5.3.0" +version = "5.3.2" description = "Orderly set" optional = false python-versions = ">=3.8" files = [ - {file = "orderly_set-5.3.0-py3-none-any.whl", hash = "sha256:c2c0bfe604f5d3d9b24e8262a06feb612594f37aa3845650548befd7772945d1"}, - {file = "orderly_set-5.3.0.tar.gz", hash = "sha256:80b3d8fdd3d39004d9aad389eaa0eab02c71f0a0511ba3a6d54a935a6c6a0acc"}, + {file = "orderly_set-5.3.2-py3-none-any.whl", hash = "sha256:81250ce092333db454a70e5d7ef1409ec4d3002e0d2c7546d710f4639f20f19d"}, + {file = "orderly_set-5.3.2.tar.gz", hash = "sha256:5fd6d917788d0e2196582f38a1c4b74591963d4df9be24ae5a51ba4cea2c987f"}, ] [[package]] @@ -1320,18 +1320,18 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] [[package]] name = "pydantic" -version = "2.11.1" +version = "2.11.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic-2.11.1-py3-none-any.whl", hash = "sha256:5b6c415eee9f8123a14d859be0c84363fec6b1feb6b688d6435801230b56e0b8"}, - {file = "pydantic-2.11.1.tar.gz", hash = "sha256:442557d2910e75c991c39f4b4ab18963d57b9b55122c8b2a9cd176d8c29ce968"}, + {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"}, + {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.33.0" +pydantic-core = "2.33.1" typing-extensions = ">=4.12.2" typing-inspection = ">=0.4.0" @@ -1341,110 +1341,110 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.33.0" +version = "2.33.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" files = [ - {file = "pydantic_core-2.33.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71dffba8fe9ddff628c68f3abd845e91b028361d43c5f8e7b3f8b91d7d85413e"}, - {file = "pydantic_core-2.33.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abaeec1be6ed535a5d7ffc2e6c390083c425832b20efd621562fbb5bff6dc518"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759871f00e26ad3709efc773ac37b4d571de065f9dfb1778012908bcc36b3a73"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dcfebee69cd5e1c0b76a17e17e347c84b00acebb8dd8edb22d4a03e88e82a207"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b1262b912435a501fa04cd213720609e2cefa723a07c92017d18693e69bf00b"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4726f1f3f42d6a25678c67da3f0b10f148f5655813c5aca54b0d1742ba821b8f"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e790954b5093dff1e3a9a2523fddc4e79722d6f07993b4cd5547825c3cbf97b5"}, - {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34e7fb3abe375b5c4e64fab75733d605dda0f59827752debc99c17cb2d5f3276"}, - {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ecb158fb9b9091b515213bed3061eb7deb1d3b4e02327c27a0ea714ff46b0760"}, - {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:4d9149e7528af8bbd76cc055967e6e04617dcb2a2afdaa3dea899406c5521faa"}, - {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e81a295adccf73477220e15ff79235ca9dcbcee4be459eb9d4ce9a2763b8386c"}, - {file = "pydantic_core-2.33.0-cp310-cp310-win32.whl", hash = "sha256:f22dab23cdbce2005f26a8f0c71698457861f97fc6318c75814a50c75e87d025"}, - {file = "pydantic_core-2.33.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cb2390355ba084c1ad49485d18449b4242da344dea3e0fe10babd1f0db7dcfc"}, - {file = "pydantic_core-2.33.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a608a75846804271cf9c83e40bbb4dab2ac614d33c6fd5b0c6187f53f5c593ef"}, - {file = "pydantic_core-2.33.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e1c69aa459f5609dec2fa0652d495353accf3eda5bdb18782bc5a2ae45c9273a"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9ec80eb5a5f45a2211793f1c4aeddff0c3761d1c70d684965c1807e923a588b"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e925819a98318d17251776bd3d6aa9f3ff77b965762155bdad15d1a9265c4cfd"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bf68bb859799e9cec3d9dd8323c40c00a254aabb56fe08f907e437005932f2b"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b2ea72dea0825949a045fa4071f6d5b3d7620d2a208335207793cf29c5a182d"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1583539533160186ac546b49f5cde9ffc928062c96920f58bd95de32ffd7bffd"}, - {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:23c3e77bf8a7317612e5c26a3b084c7edeb9552d645742a54a5867635b4f2453"}, - {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a7a7f2a3f628d2f7ef11cb6188bcf0b9e1558151d511b974dfea10a49afe192b"}, - {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:f1fb026c575e16f673c61c7b86144517705865173f3d0907040ac30c4f9f5915"}, - {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:635702b2fed997e0ac256b2cfbdb4dd0bf7c56b5d8fba8ef03489c03b3eb40e2"}, - {file = "pydantic_core-2.33.0-cp311-cp311-win32.whl", hash = "sha256:07b4ced28fccae3f00626eaa0c4001aa9ec140a29501770a88dbbb0966019a86"}, - {file = "pydantic_core-2.33.0-cp311-cp311-win_amd64.whl", hash = "sha256:4927564be53239a87770a5f86bdc272b8d1fbb87ab7783ad70255b4ab01aa25b"}, - {file = "pydantic_core-2.33.0-cp311-cp311-win_arm64.whl", hash = "sha256:69297418ad644d521ea3e1aa2e14a2a422726167e9ad22b89e8f1130d68e1e9a"}, - {file = "pydantic_core-2.33.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6c32a40712e3662bebe524abe8abb757f2fa2000028d64cc5a1006016c06af43"}, - {file = "pydantic_core-2.33.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ec86b5baa36f0a0bfb37db86c7d52652f8e8aa076ab745ef7725784183c3fdd"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4deac83a8cc1d09e40683be0bc6d1fa4cde8df0a9bf0cda5693f9b0569ac01b6"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:175ab598fb457a9aee63206a1993874badf3ed9a456e0654273e56f00747bbd6"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f36afd0d56a6c42cf4e8465b6441cf546ed69d3a4ec92724cc9c8c61bd6ecf4"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a98257451164666afafc7cbf5fb00d613e33f7e7ebb322fbcd99345695a9a61"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecc6d02d69b54a2eb83ebcc6f29df04957f734bcf309d346b4f83354d8376862"}, - {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a69b7596c6603afd049ce7f3835bcf57dd3892fc7279f0ddf987bebed8caa5a"}, - {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea30239c148b6ef41364c6f51d103c2988965b643d62e10b233b5efdca8c0099"}, - {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:abfa44cf2f7f7d7a199be6c6ec141c9024063205545aa09304349781b9a125e6"}, - {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20d4275f3c4659d92048c70797e5fdc396c6e4446caf517ba5cad2db60cd39d3"}, - {file = "pydantic_core-2.33.0-cp312-cp312-win32.whl", hash = "sha256:918f2013d7eadea1d88d1a35fd4a1e16aaf90343eb446f91cb091ce7f9b431a2"}, - {file = "pydantic_core-2.33.0-cp312-cp312-win_amd64.whl", hash = "sha256:aec79acc183865bad120b0190afac467c20b15289050648b876b07777e67ea48"}, - {file = "pydantic_core-2.33.0-cp312-cp312-win_arm64.whl", hash = "sha256:5461934e895968655225dfa8b3be79e7e927e95d4bd6c2d40edd2fa7052e71b6"}, - {file = "pydantic_core-2.33.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f00e8b59e1fc8f09d05594aa7d2b726f1b277ca6155fc84c0396db1b373c4555"}, - {file = "pydantic_core-2.33.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a73be93ecef45786d7d95b0c5e9b294faf35629d03d5b145b09b81258c7cd6d"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff48a55be9da6930254565ff5238d71d5e9cd8c5487a191cb85df3bdb8c77365"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a4ea04195638dcd8c53dadb545d70badba51735b1594810e9768c2c0b4a5da"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41d698dcbe12b60661f0632b543dbb119e6ba088103b364ff65e951610cb7ce0"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae62032ef513fe6281ef0009e30838a01057b832dc265da32c10469622613885"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f225f3a3995dbbc26affc191d0443c6c4aa71b83358fd4c2b7d63e2f6f0336f9"}, - {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5bdd36b362f419c78d09630cbaebc64913f66f62bda6d42d5fbb08da8cc4f181"}, - {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2a0147c0bef783fd9abc9f016d66edb6cac466dc54a17ec5f5ada08ff65caf5d"}, - {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:c860773a0f205926172c6644c394e02c25421dc9a456deff16f64c0e299487d3"}, - {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:138d31e3f90087f42aa6286fb640f3c7a8eb7bdae829418265e7e7474bd2574b"}, - {file = "pydantic_core-2.33.0-cp313-cp313-win32.whl", hash = "sha256:d20cbb9d3e95114325780f3cfe990f3ecae24de7a2d75f978783878cce2ad585"}, - {file = "pydantic_core-2.33.0-cp313-cp313-win_amd64.whl", hash = "sha256:ca1103d70306489e3d006b0f79db8ca5dd3c977f6f13b2c59ff745249431a606"}, - {file = "pydantic_core-2.33.0-cp313-cp313-win_arm64.whl", hash = "sha256:6291797cad239285275558e0a27872da735b05c75d5237bbade8736f80e4c225"}, - {file = "pydantic_core-2.33.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7b79af799630af263eca9ec87db519426d8c9b3be35016eddad1832bac812d87"}, - {file = "pydantic_core-2.33.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eabf946a4739b5237f4f56d77fa6668263bc466d06a8036c055587c130a46f7b"}, - {file = "pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7"}, - {file = "pydantic_core-2.33.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:7c9c84749f5787781c1c45bb99f433402e484e515b40675a5d121ea14711cf61"}, - {file = "pydantic_core-2.33.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64672fa888595a959cfeff957a654e947e65bbe1d7d82f550417cbd6898a1d6b"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26bc7367c0961dec292244ef2549afa396e72e28cc24706210bd44d947582c59"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce72d46eb201ca43994303025bd54d8a35a3fc2a3495fac653d6eb7205ce04f4"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14229c1504287533dbf6b1fc56f752ce2b4e9694022ae7509631ce346158de11"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:085d8985b1c1e48ef271e98a658f562f29d89bda98bf120502283efbc87313eb"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31860fbda80d8f6828e84b4a4d129fd9c4535996b8249cfb8c720dc2a1a00bb8"}, - {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f200b2f20856b5a6c3a35f0d4e344019f805e363416e609e9b47c552d35fd5ea"}, - {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f72914cfd1d0176e58ddc05c7a47674ef4222c8253bf70322923e73e14a4ac3"}, - {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:91301a0980a1d4530d4ba7e6a739ca1a6b31341252cb709948e0aca0860ce0ae"}, - {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7419241e17c7fbe5074ba79143d5523270e04f86f1b3a0dff8df490f84c8273a"}, - {file = "pydantic_core-2.33.0-cp39-cp39-win32.whl", hash = "sha256:7a25493320203005d2a4dac76d1b7d953cb49bce6d459d9ae38e30dd9f29bc9c"}, - {file = "pydantic_core-2.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:82a4eba92b7ca8af1b7d5ef5f3d9647eee94d1f74d21ca7c21e3a2b92e008358"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2762c568596332fdab56b07060c8ab8362c56cf2a339ee54e491cd503612c50"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bf637300ff35d4f59c006fff201c510b2b5e745b07125458a5389af3c0dff8c"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c151ce3d59ed56ebd7ce9ce5986a409a85db697d25fc232f8e81f195aa39a1"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee65f0cc652261744fd07f2c6e6901c914aa6c5ff4dcfaf1136bc394d0dd26b"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:024d136ae44d233e6322027bbf356712b3940bee816e6c948ce4b90f18471b3d"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e37f10f6d4bc67c58fbd727108ae1d8b92b397355e68519f1e4a7babb1473442"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:502ed542e0d958bd12e7c3e9a015bce57deaf50eaa8c2e1c439b512cb9db1e3a"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:715c62af74c236bf386825c0fdfa08d092ab0f191eb5b4580d11c3189af9d330"}, - {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bccc06fa0372151f37f6b69834181aa9eb57cf8665ed36405fb45fbf6cac3bae"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d8dc9f63a26f7259b57f46a7aab5af86b2ad6fbe48487500bb1f4b27e051e4c"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:30369e54d6d0113d2aa5aee7a90d17f225c13d87902ace8fcd7bbf99b19124db"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3eb479354c62067afa62f53bb387827bee2f75c9c79ef25eef6ab84d4b1ae3b"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0310524c833d91403c960b8a3cf9f46c282eadd6afd276c8c5edc617bd705dc9"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eddb18a00bbb855325db27b4c2a89a4ba491cd6a0bd6d852b225172a1f54b36c"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ade5dbcf8d9ef8f4b28e682d0b29f3008df9842bb5ac48ac2c17bc55771cc976"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2c0afd34f928383e3fd25740f2050dbac9d077e7ba5adbaa2227f4d4f3c8da5c"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7da333f21cd9df51d5731513a6d39319892947604924ddf2e24a4612975fb936"}, - {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b6d77c75a57f041c5ee915ff0b0bb58eabb78728b69ed967bc5b780e8f701b8"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba95691cf25f63df53c1d342413b41bd7762d9acb425df8858d7efa616c0870e"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f1ab031feb8676f6bd7c85abec86e2935850bf19b84432c64e3e239bffeb1ec"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1151827eef98b83d49b6ca6065575876a02d2211f259fb1a6b7757bd24dd8"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66d931ea2c1464b738ace44b7334ab32a2fd50be023d863935eb00f42be1778"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0bcf0bab28995d483f6c8d7db25e0d05c3efa5cebfd7f56474359e7137f39856"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:89670d7a0045acb52be0566df5bc8b114ac967c662c06cf5e0c606e4aadc964b"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:b716294e721d8060908dbebe32639b01bfe61b15f9f57bcc18ca9a0e00d9520b"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fc53e05c16697ff0c1c7c2b98e45e131d4bfb78068fffff92a82d169cbb4c7b7"}, - {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:68504959253303d3ae9406b634997a2123a0b0c1da86459abbd0ffc921695eac"}, - {file = "pydantic_core-2.33.0.tar.gz", hash = "sha256:40eb8af662ba409c3cbf4a8150ad32ae73514cd7cb1f1a2113af39763dd616b3"}, + {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"}, + {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5183e4f6a2d468787243ebcd70cf4098c247e60d73fb7d68d5bc1e1beaa0c4db"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:398a38d323f37714023be1e0285765f0a27243a8b1506b7b7de87b647b517e48"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3776f0001b43acebfa86f8c64019c043b55cc5a6a2e313d728b5c95b46969"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c566dd9c5f63d22226409553531f89de0cac55397f2ab8d97d6f06cfce6d947e"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d5f3acc81452c56895e90643a625302bd6be351e7010664151cc55b7b97f89"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3a07fadec2a13274a8d861d3d37c61e97a816beae717efccaa4b36dfcaadcde"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f99aeda58dce827f76963ee87a0ebe75e648c72ff9ba1174a253f6744f518f65"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:902dbc832141aa0ec374f4310f1e4e7febeebc3256f00dc359a9ac3f264a45dc"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe44d56aa0b00d66640aa84a3cbe80b7a3ccdc6f0b1ca71090696a6d4777c091"}, + {file = "pydantic_core-2.33.1-cp310-cp310-win32.whl", hash = "sha256:ed3eb16d51257c763539bde21e011092f127a2202692afaeaccb50db55a31383"}, + {file = "pydantic_core-2.33.1-cp310-cp310-win_amd64.whl", hash = "sha256:694ad99a7f6718c1a498dc170ca430687a39894a60327f548e02a9c7ee4b6504"}, + {file = "pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24"}, + {file = "pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89"}, + {file = "pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8"}, + {file = "pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea"}, + {file = "pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a"}, + {file = "pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18"}, + {file = "pydantic_core-2.33.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5ab77f45d33d264de66e1884fca158bc920cb5e27fd0764a72f72f5756ae8bdb"}, + {file = "pydantic_core-2.33.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7aaba1b4b03aaea7bb59e1b5856d734be011d3e6d98f5bcaa98cb30f375f2ad"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb66263e9ba8fea2aa85e1e5578980d127fb37d7f2e292773e7bc3a38fb0c7b"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f2648b9262607a7fb41d782cc263b48032ff7a03a835581abbf7a3bec62bcf5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723c5630c4259400818b4ad096735a829074601805d07f8cafc366d95786d331"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d100e3ae783d2167782391e0c1c7a20a31f55f8015f3293647544df3f9c67824"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177d50460bc976a0369920b6c744d927b0ecb8606fb56858ff542560251b19e5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3edde68d1a1f9af1273b2fe798997b33f90308fb6d44d8550c89fc6a3647cf6"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a62c3c3ef6a7e2c45f7853b10b5bc4ddefd6ee3cd31024754a1a5842da7d598d"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:c91dbb0ab683fa0cd64a6e81907c8ff41d6497c346890e26b23de7ee55353f96"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f466e8bf0a62dc43e068c12166281c2eca72121dd2adc1040f3aa1e21ef8599"}, + {file = "pydantic_core-2.33.1-cp39-cp39-win32.whl", hash = "sha256:ab0277cedb698749caada82e5d099dc9fed3f906a30d4c382d1a21725777a1e5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-win_amd64.whl", hash = "sha256:5773da0ee2d17136b1f1c6fbde543398d452a6ad2a7b54ea1033e2daa739b8d2"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c834f54f8f4640fd7e4b193f80eb25a0602bba9e19b3cd2fc7ffe8199f5ae02"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:049e0de24cf23766f12cc5cc71d8abc07d4a9deb9061b334b62093dedc7cb068"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a28239037b3d6f16916a4c831a5a0eadf856bdd6d2e92c10a0da3a59eadcf3e"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3da303ab5f378a268fa7d45f37d7d85c3ec19769f28d2cc0c61826a8de21fe"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25626fb37b3c543818c14821afe0fd3830bc327a43953bc88db924b68c5723f1"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3ab2d36e20fbfcce8f02d73c33a8a7362980cff717926bbae030b93ae46b56c7"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2f9284e11c751b003fd4215ad92d325d92c9cb19ee6729ebd87e3250072cdcde"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:048c01eee07d37cbd066fc512b9d8b5ea88ceeb4e629ab94b3e56965ad655add"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5ccd429694cf26af7997595d627dd2637e7932214486f55b8a357edaac9dae8c"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7edbc454a29fc6aeae1e1eecba4f07b63b8d76e76a748532233c4c167b4cb9ea"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ad05b683963f69a1d5d2c2bdab1274a31221ca737dbbceaa32bcb67359453cdd"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df6a94bf9452c6da9b5d76ed229a5683d0306ccb91cca8e1eea883189780d568"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7965c13b3967909a09ecc91f21d09cfc4576bf78140b988904e94f130f188396"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3f1fdb790440a34f6ecf7679e1863b825cb5ffde858a9197f851168ed08371e5"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5277aec8d879f8d05168fdd17ae811dd313b8ff894aeeaf7cd34ad28b4d77e33"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8ab581d3530611897d863d1a649fb0644b860286b4718db919bfd51ece41f10b"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0483847fa9ad5e3412265c1bd72aad35235512d9ce9d27d81a56d935ef489672"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:de9e06abe3cc5ec6a2d5f75bc99b0bdca4f5c719a5b34026f8c57efbdecd2ee3"}, + {file = "pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df"}, ] [package.dependencies] @@ -2130,13 +2130,13 @@ urllib3 = ">=2" [[package]] name = "typing-extensions" -version = "4.13.0" +version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.13.0-py3-none-any.whl", hash = "sha256:c8dd92cc0d6425a97c18fbb9d1954e5ff92c1ca881a309c45f06ebc0b79058e5"}, - {file = "typing_extensions-4.13.0.tar.gz", hash = "sha256:0a4ac55a5820789d87e297727d229866c9650f6521b64206413c4fbada24d95b"}, + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, ] [[package]] @@ -2166,13 +2166,13 @@ files = [ [[package]] name = "urllib3" -version = "2.3.0" +version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" files = [ - {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, - {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] diff --git a/pyproject.toml b/pyproject.toml index ca930475..f2f6b7c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "humanloop" [tool.poetry] name = "humanloop" -version = "0.8.29b1" +version = "0.8.30" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 0866270e..2cee6af8 100644 --- a/reference.md +++ b/reference.md @@ -56,7 +56,7 @@ client.prompts.log( messages=[{"role": "user", "content": "What really happened at Roswell?"}], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-19 00:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -193,7 +193,7 @@ client.prompts.log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. @@ -512,7 +512,7 @@ client.prompts.update_log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. @@ -743,7 +743,7 @@ for chunk in response: Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. @@ -1017,7 +1017,7 @@ client.prompts.call( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. @@ -6760,10 +6760,10 @@ client.flows.log( output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", log_status="incomplete", start_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:35+00:00", + "2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:39+00:00", + "2024-07-08 21:40:39+00:00", ), ) diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index ce08a630..751292f1 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.29b1", + "X-Fern-SDK-Version": "0.8.30", } headers["X-API-KEY"] = self.api_key return headers diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py index a5e2a03a..69f678a4 100644 --- a/src/humanloop/flows/client.py +++ b/src/humanloop/flows/client.py @@ -204,10 +204,10 @@ def log( output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", log_status="incomplete", start_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:35+00:00", + "2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:39+00:00", + "2024-07-08 21:40:39+00:00", ), ) """ @@ -1468,10 +1468,10 @@ async def main() -> None: output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", log_status="incomplete", start_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:35+00:00", + "2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat( - "2024-07-08 22:40:39+00:00", + "2024-07-08 21:40:39+00:00", ), ) diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py index ce350c4e..fe2ce046 100644 --- a/src/humanloop/prompts/client.py +++ b/src/humanloop/prompts/client.py @@ -154,7 +154,7 @@ def log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] @@ -248,7 +248,7 @@ def log( messages=[{"role": "user", "content": "What really happened at Roswell?"}], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-19 00:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -411,7 +411,7 @@ def update_log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. output : typing.Optional[str] @@ -600,7 +600,7 @@ def call_stream( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] @@ -817,7 +817,7 @@ def call( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] @@ -2183,7 +2183,7 @@ async def log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] @@ -2283,7 +2283,7 @@ async def main() -> None: ], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-19 00:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -2449,7 +2449,7 @@ async def update_log( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. output : typing.Optional[str] @@ -2646,7 +2646,7 @@ async def call_stream( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] @@ -2871,7 +2871,7 @@ async def call( Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. prompt : typing.Optional[PromptKernelRequestParams] diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py index 7501d3de..e465218f 100644 --- a/src/humanloop/requests/prompt_call_response.py +++ b/src/humanloop/requests/prompt_call_response.py @@ -36,7 +36,7 @@ class PromptCallResponseParams(typing_extensions.TypedDict): Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. """ diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py index 9e637b2b..15dba29f 100644 --- a/src/humanloop/requests/prompt_log_response.py +++ b/src/humanloop/requests/prompt_log_response.py @@ -66,7 +66,7 @@ class PromptLogResponseParams(typing_extensions.TypedDict): Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. """ diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py index 067d14bc..dc643472 100644 --- a/src/humanloop/types/prompt_call_response.py +++ b/src/humanloop/types/prompt_call_response.py @@ -45,7 +45,7 @@ class PromptCallResponse(UncheckedBaseModel): Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. """ diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py index 9f699959..a88f7471 100644 --- a/src/humanloop/types/prompt_log_response.py +++ b/src/humanloop/types/prompt_log_response.py @@ -69,7 +69,7 @@ class PromptLogResponse(UncheckedBaseModel): Controls how the model uses tools. The following options are supported: - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. - - `'required'` means the model can decide to call one or more of the provided tools. + - `'required'` means the model must call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. """ From a0968a7df280b133202a603f1b79572fb4b87579 Mon Sep 17 00:00:00 2001 From: oleh Date: Thu, 10 Apr 2025 19:37:54 +0100 Subject: [PATCH 2/2] new poetry lock file --- poetry.lock | 124 +++- pyproject.toml | 2 +- src/humanloop/core/client_wrapper.py | 2 +- src/humanloop/eval_utils/__init__.py | 4 + src/humanloop/eval_utils/context.py | 26 + src/humanloop/eval_utils/run.py | 755 +++++++++++++++++++++++ src/humanloop/eval_utils/types.py | 91 +++ src/humanloop/otel/exporter.py | 365 +++++++++++ src/humanloop/otel/helpers.py | 1 - src/humanloop/types/trace_status.py | 5 + src/humanloop/utilities/__init__.py | 0 src/humanloop/utilities/flow.py | 89 +++ src/humanloop/utilities/helpers.py | 21 + src/humanloop/utilities/prompt.py | 88 +++ src/humanloop/utilities/tool.py | 505 +++++++++++++++ src/humanloop/utilities/types.py | 12 + tests/utilities/__init__.py | 0 tests/utilities/test_flow_decorator.py | 287 +++++++++ tests/utilities/test_prompt_decorator.py | 321 ++++++++++ tests/utilities/test_tool_decorator.py | 567 +++++++++++++++++ 20 files changed, 3243 insertions(+), 22 deletions(-) create mode 100644 src/humanloop/eval_utils/__init__.py create mode 100644 src/humanloop/eval_utils/context.py create mode 100644 src/humanloop/eval_utils/run.py create mode 100644 src/humanloop/eval_utils/types.py create mode 100644 src/humanloop/otel/exporter.py create mode 100644 src/humanloop/types/trace_status.py create mode 100644 src/humanloop/utilities/__init__.py create mode 100644 src/humanloop/utilities/flow.py create mode 100644 src/humanloop/utilities/helpers.py create mode 100644 src/humanloop/utilities/prompt.py create mode 100644 src/humanloop/utilities/tool.py create mode 100644 src/humanloop/utilities/types.py create mode 100644 tests/utilities/__init__.py create mode 100644 tests/utilities/test_flow_decorator.py create mode 100644 tests/utilities/test_prompt_decorator.py create mode 100644 tests/utilities/test_tool_decorator.py diff --git a/poetry.lock b/poetry.lock index f1d5d785..3915e235 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,6 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -17,6 +18,7 @@ version = "0.49.0" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "anthropic-0.49.0-py3-none-any.whl", hash = "sha256:bbc17ad4e7094988d2fa86b87753ded8dce12498f4b85fe5810f208f454a8375"}, {file = "anthropic-0.49.0.tar.gz", hash = "sha256:c09e885b0f674b9119b4f296d8508907f6cff0009bc20d5cf6b35936c40b4398"}, @@ -41,6 +43,7 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -63,18 +66,19 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "certifi" @@ -82,6 +86,7 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -93,6 +98,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -194,6 +200,7 @@ version = "5.14.2" description = "" optional = false python-versions = "<4.0,>=3.9" +groups = ["dev"] files = [ {file = "cohere-5.14.2-py3-none-any.whl", hash = "sha256:fe2cbbf6c79fba21a66731d387647b981ab5ea6dbcfb09beb85386e96695bd64"}, {file = "cohere-5.14.2.tar.gz", hash = "sha256:5aaf5a70e619ade2bb991b12f573fd4cc9bd1f3097f0f67acd973d060a7e86c6"}, @@ -216,10 +223,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} [[package]] name = "deepdiff" @@ -227,6 +236,7 @@ version = "8.4.2" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"}, {file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"}, @@ -245,6 +255,7 @@ version = "1.2.18" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] files = [ {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, @@ -254,7 +265,7 @@ files = [ wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] [[package]] name = "distro" @@ -262,6 +273,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -273,6 +285,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -287,6 +301,7 @@ version = "1.10.0" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"}, {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"}, @@ -333,6 +348,7 @@ version = "3.18.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, @@ -341,7 +357,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "fsspec" @@ -349,6 +365,7 @@ version = "2025.3.2" description = "File-system specification" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"}, {file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"}, @@ -388,6 +405,7 @@ version = "0.22.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "groq-0.22.0-py3-none-any.whl", hash = "sha256:f53d3966dff713aaa635671c2d075ebb932b0d48e3c4031ede9b84a2a6694c79"}, {file = "groq-0.22.0.tar.gz", hash = "sha256:9d090fbe4a051655faff649890d18aaacb3121393ad9d55399171fe081f1057b"}, @@ -407,6 +425,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -418,6 +437,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -439,6 +459,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -451,7 +472,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -463,6 +484,7 @@ version = "0.4.0" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, @@ -474,6 +496,7 @@ version = "0.30.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["main", "dev"] files = [ {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"}, {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"}, @@ -509,6 +532,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -523,6 +547,7 @@ version = "8.6.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, @@ -532,12 +557,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] @@ -546,6 +571,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -557,6 +583,7 @@ version = "0.9.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, @@ -642,6 +669,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -663,6 +691,7 @@ version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, @@ -677,6 +706,7 @@ version = "5.1.0" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"}, {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"}, @@ -775,6 +805,7 @@ version = "1.0.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, @@ -821,6 +852,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -832,6 +864,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -877,6 +910,7 @@ version = "1.72.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "openai-1.72.0-py3-none-any.whl", hash = "sha256:34f5496ba5c8cb06c592831d69e847e2d164526a2fb92afdc3b5cf2891c328c3"}, {file = "openai-1.72.0.tar.gz", hash = "sha256:f51de971448905cc90ed5175a5b19e92fd94e31f68cde4025762f9f5257150db"}, @@ -903,6 +937,7 @@ version = "1.32.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_api-1.32.0-py3-none-any.whl", hash = "sha256:15df743c765078611f376037b0d9111ec5c1febf2ec9440cdd919370faa1ce55"}, {file = "opentelemetry_api-1.32.0.tar.gz", hash = "sha256:2623280c916f9b19cad0aa4280cb171265f19fd2909b0d47e4f06f7c83b02cb5"}, @@ -918,6 +953,7 @@ version = "0.53b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation-0.53b0-py3-none-any.whl", hash = "sha256:70600778fd567c9c5fbfca181378ae179c0dec3ff613171707d3d77c360ff105"}, {file = "opentelemetry_instrumentation-0.53b0.tar.gz", hash = "sha256:f2c21d71a3cdf28c656e3d90d247ee7558fb9b0239b3d9e9190266499dbed9d2"}, @@ -935,6 +971,7 @@ version = "0.39.0" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_anthropic-0.39.0-py3-none-any.whl", hash = "sha256:4e456883a2dec8da1977a27d6444798252829e23cf0500f222b70db77cb3d125"}, {file = "opentelemetry_instrumentation_anthropic-0.39.0.tar.gz", hash = "sha256:62bec0cde6ebc0b77ef324e5e8262fd5545d7db68f38a9da16916d703e32a7ad"}, @@ -952,6 +989,7 @@ version = "0.39.0" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_bedrock-0.39.0-py3-none-any.whl", hash = "sha256:d5923b1c72216d7e23bf698b6ffbbfdb1cf7cbb26c6b04f5626720fbea33af45"}, {file = "opentelemetry_instrumentation_bedrock-0.39.0.tar.gz", hash = "sha256:a643abaeb223d7337dbb4cec13e5d4f8be3db2696e954ff40351c5b87646ac22"}, @@ -971,6 +1009,7 @@ version = "0.39.0" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_cohere-0.39.0-py3-none-any.whl", hash = "sha256:3edf85d1f5236492568d5a7ced15617922c832535cc74cf2b6d5a55abe1968a6"}, {file = "opentelemetry_instrumentation_cohere-0.39.0.tar.gz", hash = "sha256:cd1644ec795aa89b9609890e7da2ce0b97287900e0558ba4237588436fd87556"}, @@ -988,6 +1027,7 @@ version = "0.39.0" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_groq-0.39.0-py3-none-any.whl", hash = "sha256:60d3b0bdcb8f765ab0f0ee749a9b78285338ae40506ff27ed961a76f23d377d7"}, {file = "opentelemetry_instrumentation_groq-0.39.0.tar.gz", hash = "sha256:7664f9d097dcc4bf8d611068c85c4e19b522ae70dda13b4d172230d830191600"}, @@ -1005,6 +1045,7 @@ version = "0.39.0" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_openai-0.39.0-py3-none-any.whl", hash = "sha256:ca6f0e2e4af526e05850b87c6749068d7a4557ef3f02babf956552760af2315b"}, {file = "opentelemetry_instrumentation_openai-0.39.0.tar.gz", hash = "sha256:dffb5cb2d89410dc4cb5ed2b978e930eadecd430ea5b7e0ac003088e1eee0f4d"}, @@ -1023,6 +1064,7 @@ version = "0.39.0" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_replicate-0.39.0-py3-none-any.whl", hash = "sha256:074de415aa96d8d00062c70676e6bec9d43c0db7d674d517b99e9a522cb62c49"}, {file = "opentelemetry_instrumentation_replicate-0.39.0.tar.gz", hash = "sha256:7f113f3bdd6bf1be3872b3de5d595a63ed368874697c2f1388bdaada18479c57"}, @@ -1040,6 +1082,7 @@ version = "1.32.0" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_proto-1.32.0-py3-none-any.whl", hash = "sha256:f699269dc037e18fba05442580a8682c9fbd0f4c7f5addfed82c44be0c53c5ff"}, {file = "opentelemetry_proto-1.32.0.tar.gz", hash = "sha256:f8b70ae52f4ef8a4e4c0760e87c9071e07ece2618c080d4839bef44c0156cd44"}, @@ -1054,6 +1097,7 @@ version = "1.32.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_sdk-1.32.0-py3-none-any.whl", hash = "sha256:ed252d035c22a15536c1f603ca089298daab60850fc2f5ddfa95d95cc1c043ea"}, {file = "opentelemetry_sdk-1.32.0.tar.gz", hash = "sha256:5ff07fb371d1ab1189fa7047702e2e888b5403c5efcbb18083cae0d5aa5f58d2"}, @@ -1070,6 +1114,7 @@ version = "0.53b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions-0.53b0-py3-none-any.whl", hash = "sha256:561da89f766ab51615c0e72b12329e0a1bc16945dbd62c8646ffc74e36a1edff"}, {file = "opentelemetry_semantic_conventions-0.53b0.tar.gz", hash = "sha256:05b7908e1da62d72f9bf717ed25c72f566fe005a2dd260c61b11e025f2552cf6"}, @@ -1085,6 +1130,7 @@ version = "0.4.3" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"}, {file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"}, @@ -1096,6 +1142,7 @@ version = "5.3.2" description = "Orderly set" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "orderly_set-5.3.2-py3-none-any.whl", hash = "sha256:81250ce092333db454a70e5d7ef1409ec4d3002e0d2c7546d710f4639f20f19d"}, {file = "orderly_set-5.3.2.tar.gz", hash = "sha256:5fd6d917788d0e2196582f38a1c4b74591963d4df9be24ae5a51ba4cea2c987f"}, @@ -1107,6 +1154,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1118,6 +1166,7 @@ version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, @@ -1204,6 +1253,7 @@ version = "1.20.2" description = "parse() is the opposite of format()" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"}, {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"}, @@ -1215,6 +1265,7 @@ version = "0.6.4" description = "Simplifies to build parse types based on the parse module" optional = false python-versions = "!=3.0.*,!=3.1.*,>=2.7" +groups = ["dev"] files = [ {file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"}, {file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"}, @@ -1225,9 +1276,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} six = ">=1.15" [package.extras] -develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"] docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] -testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] +testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"] [[package]] name = "pluggy" @@ -1235,6 +1286,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1250,6 +1302,7 @@ version = "5.29.4" description = "" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, @@ -1270,6 +1323,7 @@ version = "19.0.1" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"}, {file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"}, @@ -1324,6 +1378,7 @@ version = "2.11.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"}, {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"}, @@ -1337,7 +1392,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -1345,6 +1400,7 @@ version = "2.33.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"}, {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"}, @@ -1456,6 +1512,7 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -1478,6 +1535,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -1496,6 +1554,7 @@ version = "1.7.0" description = "Adds the ability to retry flaky tests in CI environments" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"}, {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"}, @@ -1513,6 +1572,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1527,6 +1587,7 @@ version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, @@ -1541,6 +1602,7 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -1552,6 +1614,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1614,6 +1677,7 @@ version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, @@ -1630,6 +1694,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -1733,6 +1798,7 @@ version = "1.0.4" description = "Python client for Replicate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"}, {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"}, @@ -1750,6 +1816,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1771,6 +1838,7 @@ version = "0.24.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"}, {file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"}, @@ -1894,6 +1962,7 @@ version = "0.5.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, @@ -1921,6 +1990,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -1932,6 +2002,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -1943,6 +2014,7 @@ version = "0.9.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, @@ -1990,6 +2062,7 @@ version = "0.21.1" description = "" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"}, {file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"}, @@ -2022,6 +2095,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2063,6 +2138,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -2084,6 +2160,7 @@ version = "4.23.0.20241208" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"}, {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"}, @@ -2098,6 +2175,7 @@ version = "5.29.1.20250403" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"}, {file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"}, @@ -2109,6 +2187,7 @@ version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, @@ -2120,6 +2199,7 @@ version = "2.32.0.20250328" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, @@ -2134,6 +2214,7 @@ version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, @@ -2145,6 +2226,7 @@ version = "0.4.0" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, @@ -2159,6 +2241,7 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["dev"] files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -2170,13 +2253,14 @@ version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2187,6 +2271,7 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -2275,20 +2360,21 @@ version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.9,<4" content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d" diff --git a/pyproject.toml b/pyproject.toml index f2f6b7c7..cc530e25 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "humanloop" [tool.poetry] name = "humanloop" -version = "0.8.30" +version = "0.8.31" description = "" readme = "README.md" authors = [] diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index 751292f1..210cdddb 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.30", + "X-Fern-SDK-Version": "0.8.31", } headers["X-API-KEY"] = self.api_key return headers diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py new file mode 100644 index 00000000..ac5a5eba --- /dev/null +++ b/src/humanloop/eval_utils/__init__.py @@ -0,0 +1,4 @@ +from .run import log_with_evaluation_context, run_eval +from .types import File + +__all__ = ["run_eval", "log_with_evaluation_context", "File"] diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py new file mode 100644 index 00000000..c840ba86 --- /dev/null +++ b/src/humanloop/eval_utils/context.py @@ -0,0 +1,26 @@ +from typing import Callable, TypedDict + + +class EvaluationContext(TypedDict): + """Context Log to Humanloop. + + Per datapoint state that is set when an Evaluation is ran. + """ + + """Required for associating a Log with the Evaluation Run.""" + source_datapoint_id: str + + """Overloaded .log method call.""" + upload_callback: Callable[[str], None] + + """ID of the evaluated File.""" + file_id: str + + """Path of the evaluated File.""" + path: str + + """Required for associating a Log with the Evaluation Run.""" + run_id: str + + +EVALUATION_CONTEXT_VARIABLE_NAME = "__EVALUATION_CONTEXT" diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py new file mode 100644 index 00000000..3d1a5c9e --- /dev/null +++ b/src/humanloop/eval_utils/run.py @@ -0,0 +1,755 @@ +""" +Evaluation utils for the Humanloop SDK. + +This module provides a set of utilities to aid running Eval workflows on Humanloop +where you are managing the runtime of your application in your code. + +Functions in this module should be accessed via the Humanloop client. They should +not be called directly. +""" + +import copy +import inspect +import json +import logging +import sys +import threading +import time +import types +import typing +from concurrent.futures import ThreadPoolExecutor +from contextvars import ContextVar +from datetime import datetime +from functools import partial +from logging import INFO +from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union + +from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse +from humanloop.core.api_error import ApiError +from humanloop.eval_utils.context import EvaluationContext +from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File + +# We use TypedDicts for requests, which is consistent with the rest of the SDK +from humanloop.evaluators.client import EvaluatorsClient +from humanloop.flows.client import FlowsClient +from humanloop.prompts.client import PromptsClient +from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict +from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict +from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict +from humanloop.requests import PromptKernelRequestParams as PromptDict +from humanloop.requests import ToolKernelRequestParams as ToolDict +from humanloop.tools.client import ToolsClient +from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats +from humanloop.types import DatapointResponse as Datapoint +from humanloop.types import EvaluationResponse, EvaluationStats + +# Responses are Pydantic models and we leverage them for improved request validation +from humanloop.types import FlowKernelRequest as Flow +from humanloop.types import NumericEvaluatorStatsResponse as NumericStats +from humanloop.types import PromptKernelRequest as Prompt +from humanloop.types import ToolKernelRequest as Tool +from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse +from humanloop.types.create_flow_log_response import CreateFlowLogResponse +from humanloop.types.create_prompt_log_response import CreatePromptLogResponse +from humanloop.types.create_tool_log_response import CreateToolLogResponse +from humanloop.types.evaluation_run_response import EvaluationRunResponse +from humanloop.types.run_stats_response import RunStatsResponse +from pydantic import ValidationError + +if typing.TYPE_CHECKING: + from humanloop.client import BaseHumanloop + +# Setup logging +logger = logging.getLogger(__name__) +logger.setLevel(level=INFO) +console_handler = logging.StreamHandler() +logger.setLevel(INFO) +formatter = logging.Formatter("%(message)s") +console_handler.setFormatter(formatter) +if not logger.hasHandlers(): + logger.addHandler(console_handler) + +EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator] +Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] +FileType = Literal["flow", "prompt", "tool", "evaluator"] + + +# ANSI escape codes for logging colors +YELLOW = "\033[93m" +CYAN = "\033[96m" +GREEN = "\033[92m" +RED = "\033[91m" +RESET = "\033[0m" + + +CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) + + +def log_with_evaluation_context( + client: CLIENT_TYPE, + evaluation_context_variable: ContextVar[Optional[EvaluationContext]], +) -> CLIENT_TYPE: + """ + Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT. + + This makes the overloaded log actions be aware of whether the created Log is + part of an Evaluation (e.g. one started by eval_utils.run_eval). + """ + + def _is_evaluated_file( + evaluation_context: EvaluationContext, + log_args: dict, + ) -> bool: + """Check if the File that will Log against is part of the current Evaluation. + + The user of the .log API can refer to the File that owns that Log either by + ID or Path. This function matches against any of them in EvaluationContext. + """ + if evaluation_context is None: + return False + return evaluation_context.get("file_id") == log_args.get("id") or evaluation_context.get( + "path" + ) == log_args.get("path") + + # Copy the original log method in a hidden attribute + client._log = client.log + + def _overload_log( + self, + **kwargs, + ) -> Union[ + CreatePromptLogResponse, + CreateToolLogResponse, + CreateFlowLogResponse, + CreateEvaluatorLogResponse, + ]: + try: + evaluation_context = evaluation_context_variable.get() + except LookupError: + # If the Evaluation Context is not set, an Evaluation is not running + evaluation_context = None + + if _is_evaluated_file(evaluation_context=evaluation_context, log_args=kwargs): + # If the .log API user does not provide the source_datapoint_id or run_id, + # override them with the values from the EvaluationContext + # _is_evaluated_file ensures that evaluation_context is not None + for attribute in ["source_datapoint_id", "run_id"]: + if attribute not in kwargs or kwargs[attribute] is None: + kwargs[attribute] = evaluation_context[attribute] + + # Call the original .log method + logger.debug( + "Logging %s inside _overloaded_log on Thread %s", + kwargs, + evaluation_context, + threading.get_ident(), + ) + response = self._log(**kwargs) + + if _is_evaluated_file( + evaluation_context=evaluation_context, + log_args=kwargs, + ): + # Call the callback so the Evaluation can be updated + # _is_evaluated_file ensures that evaluation_context is not None + evaluation_context["upload_callback"](log_id=response.id) + + # Mark the Evaluation Context as consumed + evaluation_context_variable.set(None) + + return response + + # Replace the original log method with the overloaded one + client.log = types.MethodType(_overload_log, client) + # Return the client with the overloaded log method + logger.debug("Overloaded the .log method of %s", client) + return client + + +class _SimpleProgressBar: + """Thread-safe progress bar for the console.""" + + def __init__(self, total: int): + if total <= 0: + self._total = 1 + else: + self._total = total + self._progress = 0 + self._lock = threading.Lock() + self._start_time = None + + def increment(self): + """Increment the progress bar by one finished task.""" + with self._lock: + self._progress += 1 + if self._start_time is None: + self._start_time = time.time() + + bar_length = 40 + block = int(round(bar_length * self._progress / self._total)) + bar = "#" * block + "-" * (bar_length - block) + + percentage = (self._progress / self._total) * 100 + elapsed_time = time.time() - self._start_time + time_per_item = elapsed_time / self._progress if self._progress > 0 else 0 + eta = (self._total - self._progress) * time_per_item + + progress_display = f"[{bar}] {self._progress}/{self._total}" + progress_display += f" ({percentage:.2f}%)" + + if self._progress < self._total: + progress_display += f" | ETA: {int(eta)}s" + else: + progress_display += " | DONE" + + sys.stderr.write("\r") # Move the cursor to the beginning of the line + sys.stderr.write("\033[K") # Clear the line from the cursor to the end + sys.stderr.write(progress_display) + + if self._progress >= self._total: + sys.stderr.write("\n") + + +def run_eval( + client: "BaseHumanloop", + file: File, + name: Optional[str], + dataset: Dataset, + evaluation_context_variable: ContextVar[Optional[EvaluationContext]], + evaluators: Optional[Sequence[Evaluator]] = None, + workers: int = 4, +) -> List[EvaluatorCheck]: + """ + Evaluate your function for a given `Dataset` and set of `Evaluators`. + + :param client: the Humanloop API client. + :param file: the Humanloop file being evaluated, including a function to run over the dataset. + :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File. + :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation. + :param evaluators: define how judgments are provided for this Evaluation. + :param workers: the number of threads to process datapoints using your function concurrently. + :return: per Evaluator checks. + """ + + if hasattr(file["callable"], "file"): + # When the decorator inside `file` is a decorated function, + # we need to validate that the other parameters of `file` + # match the attributes of the decorator + inner_file: File = file["callable"].file + if "path" in file and inner_file["path"] != file["path"]: + raise ValueError( + "`path` attribute specified in the `file` does not match the File path of the decorated function." + ) + if "version" in file and inner_file["version"] != file["version"]: + raise ValueError( + "`version` attribute in the `file` does not match the File version of the decorated function." + ) + if "type" in file and inner_file["type"] != file["type"]: + raise ValueError( + "`type` attribute of `file` argument does not match the File type of the decorated function." + ) + if "id" in file: + raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.") + # file on decorated function holds at least + # or more information than the `file` argument + file_ = copy.deepcopy(inner_file) + else: + file_ = file + + # Get or create the file on Humanloop + version = file_.pop("version", {}) + + # Raise error if one of path or id not provided + if not file_.get("path") and not file_.get("id"): + raise ValueError("You must provide a path or id in your `file`.") + + # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` + try: + type_ = typing.cast(FileType, file_.pop("type")) + logger.info( + f"{CYAN}Evaluating your {type_} function corresponding to `{file_.get('path') or file_.get('id')}` on Humanloop{RESET} \n\n" + ) + except KeyError as _: + type_ = "flow" + logger.warning("No `file` type specified, defaulting to flow.") + + # If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop. + function_ = typing.cast(Optional[Callable], file_.pop("callable", None)) + if function_ is None: + if type_ == "flow": + raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.") + else: + logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.") + + file_dict = {**file_, **version} + hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse] + + if type_ == "flow": + # Be more lenient with Flow versions as they are arbitrary json + try: + Flow.model_validate(version) + except ValidationError: + flow_version = {"attributes": version} + file_dict = {**file_, **flow_version} + hl_file = client.flows.upsert(**file_dict) + + elif type_ == "prompt": + try: + Prompt.model_validate(version) + except ValidationError as error_: + logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)") + raise error_ + try: + hl_file = client.prompts.upsert(**file_dict) + except ApiError as error_: + raise error_ + + elif type_ == "tool": + try: + Tool.model_validate(version) + except ValidationError as error_: + logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)") + raise error_ + hl_file = client.tools.upsert(**file_dict) + + elif type_ == "evaluator": + hl_file = client.evaluators.upsert(**file_dict) + + else: + raise NotImplementedError(f"Unsupported File type: {type_}") + + # Upsert the Dataset + if "action" not in dataset: + dataset["action"] = "set" + if "datapoints" not in dataset: + dataset["datapoints"] = [] + # Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path. + dataset["action"] = "add" + hl_dataset = client.datasets.upsert( + **dataset, + ) + hl_dataset = client.datasets.get( + id=hl_dataset.id, + version_id=hl_dataset.version_id, + include_datapoints=True, + ) + + # Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id` + local_evaluators: List[tuple[EvaluatorResponse, Callable]] = [] + if evaluators: + for evaluator_request in evaluators: + # If a callable is provided for an Evaluator, we treat it as External + eval_function = evaluator_request.get("callable") + if eval_function is not None: + # TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally + if function_ is None: + raise ValueError( + "Local Evaluators are only supported when generating Logs locally using your " + f"{type_}'s `callable`. Please provide a `callable` for your file in order " + "to run Evaluators locally." + ) + spec = ExternalEvaluator( + arguments_type=evaluator_request["args_type"], + return_type=evaluator_request["return_type"], + attributes={"code": inspect.getsource(eval_function)}, + evaluator_type="external", + ) + evaluator = client.evaluators.upsert( + id=evaluator_request.get("id"), + path=evaluator_request.get("path"), + spec=spec, + ) + local_evaluators.append((evaluator, eval_function)) + + # function_ cannot be None, cast it for type checking + function_ = typing.cast(Callable, function_) + + # Validate upfront that the local Evaluators and Dataset fit + requires_target = False + for local_evaluator, _ in local_evaluators: + if local_evaluator.spec.arguments_type == "target_required": + requires_target = True + break + if requires_target: + missing_target = 0 + for _datapoint in hl_dataset.datapoints: + if not _datapoint.target: + missing_target += 1 + if missing_target > 0: + raise ValueError( + f"{missing_target} Datapoints have no target. A target " + f"is required for the Evaluator: {local_evaluator.path}" + ) + + # Get or create the Evaluation based on the name + evaluation = None + try: + evaluation = client.evaluations.create( + name=name, + evaluators=[{"path": e["path"]} for e in evaluators], + file={"id": hl_file.id}, + ) + except ApiError as error_: + # If the name exists, go and get it # TODO: Update API GET to allow querying by name and file. + if error_.status_code == 409: + evals = client.evaluations.list(file_id=hl_file.id, size=50) + for page in evals.iter_pages(): + evaluation = next((e for e in page.items if e.name == name), None) + else: + raise error_ + if not evaluation: + raise ValueError(f"Evaluation with name {name} not found.") + + # Create a new Run + run: EvaluationRunResponse = client.evaluations.create_run( + id=evaluation.id, + dataset={"version_id": hl_dataset.version_id}, + version={"version_id": hl_file.version_id}, + orchestrated=False if function_ is not None else True, + use_existing_logs=False, + ) + # Every Run will generate a new batch of Logs + run_id = run.id + + _PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) + + # Define the function to execute the `callable` in parallel and Log to Humanloop + def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str): + def upload_callback(log_id: str): + """Logic ran after the Log has been created.""" + _run_local_evaluators( + client=client, + log_id=log_id, + datapoint=dp, + local_evaluators=local_evaluators, + ) + _PROGRESS_BAR.increment() + + datapoint_dict = dp.dict() + # Set the Evaluation Context for current datapoint + evaluation_context_variable.set( + EvaluationContext( + source_datapoint_id=dp.id, + upload_callback=upload_callback, + file_id=file_id, + run_id=run_id, + path=file_path, + ) + ) + logger.debug( + "process_datapoint on Thread %s: evaluating Datapoint %s with EvaluationContext %s", + threading.get_ident(), + datapoint_dict, + # .get() is safe since process_datapoint is always called in the context of an Evaluation + evaluation_context_variable.get(), + ) + # TODO: shouldn't this only be defined in case where we actually need to log? + log_func = _get_log_func( + client=client, + file_type=type_, + file_id=hl_file.id, + version_id=hl_file.version_id, + run_id=run_id, + ) + start_time = datetime.now() + try: + if "messages" in datapoint_dict and datapoint_dict["messages"] is not None: + output = function_( + **datapoint_dict["inputs"], + messages=datapoint_dict["messages"], + ) + else: + output = function_(**datapoint_dict["inputs"]) + + if not isinstance(output, str): + try: + output = json.dumps(output) + except Exception: + # throw error if it fails to serialize + raise ValueError(f"Your {type_}'s `callable` must return a string or a JSON serializable object.") + + # .get() is safe since process_datapoint is always called in the context of an Evaluation + context_variable = evaluation_context_variable.get() + if context_variable is not None: + # Evaluation Context has not been consumed + # function_ is a plain callable so we need to create a Log + logger.debug( + "process_datapoint on Thread %s: function_ %s is a simple callable, context was not consumed", + threading.get_ident(), + function_.__name__, + ) + log_func( + inputs=dp.inputs, + output=output, + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: + log_func( + inputs=dp.inputs, + error=str(e), + source_datapoint_id=dp.id, + run_id=run_id, + start_time=start_time, + end_time=datetime.now(), + ) + logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}") + + # Execute the function and send the logs to Humanloop in parallel + logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n") + logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}") + logger.info(f"{CYAN}Run ID: {run_id}{RESET}") + + # Generate locally if a file `callable` is provided + if function_: + logger.info( + f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} " + ) + with ThreadPoolExecutor(max_workers=workers) as executor: + for datapoint in hl_dataset.datapoints: + executor.submit( + process_datapoint, + datapoint, + hl_file.id, + hl_file.path, + run_id, + ) + else: + # TODO: trigger run when updated API is available + logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}") + + # Wait for the Evaluation to complete then print the results + complete = False + + while not complete: + stats = client.evaluations.get_stats(id=evaluation.id) + logger.info(f"\r{stats.progress}") + run_stats = next( + (run_stats for run_stats in stats.run_stats if run_stats.run_id == run_id), + None, + ) + complete = run_stats is not None and run_stats.status == "completed" + if not complete: + time.sleep(5) + + # Print Evaluation results + logger.info(stats.report) + + checks: List[EvaluatorCheck] = [] + + # Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run. + # (Or the logs would not be helpful) + if any(evaluator.get("threshold") is not None for evaluator in evaluators) or len(stats.run_stats) > 1: + for evaluator in evaluators: + score, delta = _check_evaluation_improvement( + evaluation=evaluation, + stats=stats, + evaluator_path=evaluator["path"], + run_id=run_id, + )[1:] + threshold_check = None + threshold = evaluator.get("threshold") + if threshold is not None: + threshold_check = _check_evaluation_threshold( + evaluation=evaluation, + stats=stats, + evaluator_path=evaluator["path"], + threshold=threshold, + run_id=run_id, + ) + checks.append( + EvaluatorCheck( + path=evaluator["path"], + # TODO: Add back in with number valence on Evaluators + # improvement_check=improvement_check, + score=score, + delta=delta, + threshold=threshold, + threshold_check=threshold_check, + evaluation_id=evaluation.id, + ) + ) + + logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n") + return checks + + +def _get_log_func( + client: "BaseHumanloop", + file_type: FileType, + file_id: str, + version_id: str, + run_id: str, +) -> Callable: + """Returns the appropriate log function pre-filled with common parameters.""" + log_request = { + # TODO: why does the Log `id` field refer to the file ID in the API? + # Why are both `id` and `version_id` needed in the API? + "id": file_id, + "version_id": version_id, + "run_id": run_id, + } + if file_type == "flow": + return partial(client.flows.log, **log_request, trace_status="complete") + elif file_type == "prompt": + return partial(client.prompts.log, **log_request) + elif file_type == "evaluator": + return partial(client.evaluators.log, **log_request) + elif file_type == "tool": + return partial(client.tools.log, **log_request) + else: + raise NotImplementedError(f"Unsupported File version: {file_type}") + + +def _get_score_from_evaluator_stat( + stat: Union[NumericStats, BooleanStats], +) -> Union[float, None]: + """Get the score from an Evaluator Stat.""" + score = None + if isinstance(stat, BooleanStats): + if stat.total_logs: + score = round(stat.num_true / stat.total_logs, 2) + elif isinstance(stat, NumericStats): + score = round(stat.mean, 2) + else: + raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}") + return score + + +def _get_evaluator_stats_by_path( + stat: RunStatsResponse, + evaluation: EvaluationResponse, +) -> Dict[str, Union[NumericStats, BooleanStats]]: + """Get the Evaluator stats by path.""" + # TODO: Update the API so this is not necessary + evaluators_by_id = {evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators} + evaluator_stats_by_path = { + evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat + for evaluator_stat in stat.evaluator_stats + } + return evaluator_stats_by_path + + +def _check_evaluation_threshold( + evaluation: EvaluationResponse, + stats: EvaluationStats, + evaluator_path: str, + threshold: float, + run_id: str, +) -> bool: + """Checks if the latest version has an average Evaluator result above a threshold.""" + # TODO: Update the API so this is not necessary + evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=next( + (stat for stat in stats.run_stats if stat.run_id == run_id), + None, + ), + evaluation=evaluation, + ) + if evaluator_path in evaluator_stats_by_path: + evaluator_stat = evaluator_stats_by_path[evaluator_path] + score = _get_score_from_evaluator_stat(stat=evaluator_stat) + if score >= threshold: + logger.info( + f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" + ) + return True + else: + logger.info( + f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" + ) + return False + else: + raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") + + +def _check_evaluation_improvement( + evaluation: EvaluationResponse, + evaluator_path: str, + stats: EvaluationStats, + run_id: str, +) -> Tuple[bool, float, float]: + """ + Check the latest version has improved across for a specific Evaluator. + + :returns: A tuple of (improvement, latest_score, delta since previous score) + """ + # TODO: Update the API so this is not necessary + + latest_evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=next( + (stat for stat in stats.run_stats if stat.run_id == run_id), + None, + ), + evaluation=evaluation, + ) + if len(stats.run_stats) == 1: + logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}") + return True, 0, 0 + + previous_evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1 + evaluation=evaluation, + ) + if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path: + latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path] + previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path] + latest_score = _get_score_from_evaluator_stat(stat=latest_evaluator_stat) + previous_score = _get_score_from_evaluator_stat(stat=previous_evaluator_stat) + if latest_score is None or previous_score is None: + raise ValueError(f"Could not find score for Evaluator {evaluator_path}.") + diff = round(latest_score - previous_score, 2) + if diff >= 0: + logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + return True, latest_score, diff + else: + logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + return False, latest_score, diff + else: + raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") + + +def _run_local_evaluators( + client: "BaseHumanloop", + log_id: str, + datapoint: Optional[Datapoint], + local_evaluators: list[tuple[EvaluatorResponse, Callable]], +): + """Run local Evaluators on the Log and send the judgments to Humanloop.""" + # Need to get the full log to pass to the evaluators + log = client.logs.get(id=log_id) + if not isinstance(log, dict): + log_dict = log.dict() + else: + log_dict = log + datapoint_dict = datapoint.dict() if datapoint else None + for local_evaluator, eval_function in local_evaluators: + start_time = datetime.now() + try: + if local_evaluator.spec.arguments_type == "target_required": + judgement = eval_function( + log_dict, + datapoint_dict, + ) + else: + judgement = eval_function(log_dict) + + _ = client.evaluators.log( + version_id=local_evaluator.version_id, + parent_id=log_id, + judgment=judgement, + id=local_evaluator.id, + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: + _ = client.evaluators.log( + parent_id=log_id, + id=local_evaluator.id, + error=str(e), + start_time=start_time, + end_time=datetime.now(), + ) + logger.warning(f"\nEvaluator {local_evaluator.path} failed with error {str(e)}") diff --git a/src/humanloop/eval_utils/types.py b/src/humanloop/eval_utils/types.py new file mode 100644 index 00000000..845a8542 --- /dev/null +++ b/src/humanloop/eval_utils/types.py @@ -0,0 +1,91 @@ +from typing import Callable, Literal, Optional, Sequence, TypedDict, Union + +from pydantic import BaseModel +from typing_extensions import NotRequired + +from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict +from humanloop.requests import CreateDatapointRequestParams as DatapointDict +from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator + +# We use TypedDicts for requests, which is consistent with the rest of the SDK +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict +from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict +from humanloop.requests import PromptKernelRequestParams as PromptDict +from humanloop.requests import ToolKernelRequestParams as ToolDict +from humanloop.types import ( + EvaluatorArgumentsType, + EvaluatorReturnTypeEnum, +) + +# Responses are Pydantic models and we leverage them for improved request validation +from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo + +EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator] +Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] +FileType = Literal["flow", "prompt", "tool", "evaluator"] + + +class Identifiers(TypedDict): + """Common identifiers for the objects required to run an Evaluation.""" + + id: NotRequired[str] + """The ID of the File on Humanloop.""" + path: NotRequired[str] + """The path of the File on Humanloop.""" + + +class File(Identifiers): + """A File on Humanloop (Flow, Prompt, Tool, Evaluator).""" + + type: NotRequired[FileType] + """The type of File this callable relates to on Humanloop.""" + version: NotRequired[Version] + """The contents uniquely define the version of the File on Humanloop.""" + callable: NotRequired[Callable] + """The function being evaluated. + It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`. + If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`. + It should return a string or json serializable output. + """ + + +class Dataset(Identifiers): + datapoints: NotRequired[Sequence[DatapointDict]] + """The datapoints to map your function over to produce the outputs required by the evaluation.""" + action: NotRequired[UpdateDatasetAction] + """How to update the Dataset given the provided Datapoints; + `set` replaces the existing Datapoints and `add` appends to the existing Datapoints.""" + + +class Evaluator(Identifiers): + """The Evaluator to provide judgments for this Evaluation.""" + + args_type: NotRequired[EvaluatorArgumentsType] + """The type of arguments the Evaluator expects - only required for local Evaluators.""" + return_type: NotRequired[EvaluatorReturnTypeEnum] + """The type of return value the Evaluator produces - only required for local Evaluators.""" + callable: NotRequired[Callable] + """The function to run on the logs to produce the judgment - only required for local Evaluators.""" + threshold: NotRequired[float] + """The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail.""" + + +class EvaluatorCheck(BaseModel): + """Summary data for an Evaluator check.""" + + path: str + """The path of the Evaluator used in the check.""" + # TODO: Add number valence and improvement check + # improvement_check: bool + # """Whether the latest version of your function has improved across the Dataset for a specific Evaluator.""" + score: float + """The score of the latest version of your function for a specific Evaluator.""" + delta: float + """The change in score since the previous version of your function for a specific Evaluator.""" + threshold: Optional[float] + """The threshold to check the Evaluator against.""" + threshold_check: Optional[bool] + """Whether the latest version has an average Evaluator result above a threshold.""" + evaluation_id: str + """The ID of the corresponding Evaluation.""" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py new file mode 100644 index 00000000..544d2e7b --- /dev/null +++ b/src/humanloop/otel/exporter.py @@ -0,0 +1,365 @@ +import contextvars +import logging +import threading +import time +import typing +from queue import Empty as EmptyQueue +from queue import Queue +from threading import Thread +from typing import Any, Optional + +from opentelemetry import trace +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult + +from humanloop.core import ApiError as HumanloopApiError +from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_FLOW_PREREQUISITES_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span +from humanloop.requests.flow_kernel_request import FlowKernelRequestParams +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams +from humanloop.requests.tool_kernel_request import ToolKernelRequestParams + +if typing.TYPE_CHECKING: + from humanloop.client import Humanloop + + +logger = logging.getLogger("humanloop.sdk") + + +class HumanloopSpanExporter(SpanExporter): + """Upload Spans created by SDK decorators to Humanloop. + + Spans not created by Humanloop SDK decorators will be ignored. + """ + + DEFAULT_NUMBER_THREADS = 4 + + def __init__( + self, + client: "Humanloop", + worker_threads: Optional[int] = None, + ) -> None: + """Upload Spans created by SDK decorators to Humanloop. + + Spans not created by Humanloop SDK decorators will be ignored. + """ + super().__init__() + self._client = client + # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace + self._span_id_to_uploaded_log_id: dict[int, Optional[str]] = {} + # Work queue for the threads uploading the spans + self._upload_queue: Queue = Queue() + # Worker threads to export the spans + self._threads: list[Thread] = [ + Thread( + target=self._do_work, + daemon=True, + ) + for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS) + ] + # Signals threads no more work will arrive and + # they should wind down if the queue is empty + self._shutdown: bool = False + for thread in self._threads: + thread.start() + logger.debug("Exporter Thread %s started", thread.ident) + # Flow Log Span ID mapping to children Spans that must be uploaded first + self._flow_log_prerequisites: dict[int, set[int]] = {} + + def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult: + def is_evaluated_file( + span: ReadableSpan, + evaluation_context: Optional[EvaluationContext], + ) -> bool: + if evaluation_context is None: + return False + + return span.attributes.get(HUMANLOOP_PATH_KEY) == evaluation_context["path"] # type: ignore + + if not self._shutdown: + try: + evaluation_context = self._client.evaluation_context_variable.get() + if len(spans) > 1: + raise RuntimeError("HumanloopSpanExporter expected a single span when running an evaluation") + if not is_evaluated_file(spans[0], evaluation_context): + evaluation_context = None + except LookupError: + # No ongoing Evaluation happening + evaluation_context = None + for span in spans: + if is_humanloop_span(span): + # We pass the EvaluationContext from the eval_run utility thread to + # the export thread so the .log action works as expected + evaluation_context_copy = None + for context_var, context_var_value in contextvars.copy_context().items(): + if context_var.name == EVALUATION_CONTEXT_VARIABLE_NAME: + evaluation_context_copy = context_var_value + self._upload_queue.put( + ( + span, + evaluation_context_copy, + ), + ) + logger.debug( + "[HumanloopSpanExporter] Span %s %s with EvaluationContext %s added to upload queue", + span.context.span_id, + span.name, + evaluation_context_copy, + ) + # Reset the EvaluationContext so run eval does not + # create a duplicate Log + if evaluation_context is not None and is_evaluated_file( + spans[0], + evaluation_context, + ): + logger.debug( + "[HumanloopSpanExporter] EvaluationContext %s marked as exhausted for Log in Span %s", + evaluation_context, + spans[0].attributes, + ) + # Mark the EvaluationContext as used + self._client.evaluation_context_variable.set(None) + return SpanExportResult.SUCCESS + else: + logger.warning("[HumanloopSpanExporter] Shutting down, not accepting new spans") + return SpanExportResult.FAILURE + + def shutdown(self) -> None: + self._shutdown = True + for thread in self._threads: + thread.join() + logger.debug("[HumanloopSpanExporter] Exporter Thread %s joined", thread.ident) + + def force_flush(self, timeout_millis: int = 10000) -> bool: + self._shutdown = True + for thread in self._threads: + thread.join(timeout=timeout_millis) + self._upload_queue.join() + + return True + + def _do_work(self): + """Upload spans to Humanloop. + + Ran by worker threads. The threads use the self._shutdown flag to wait + for Spans to arrive. Setting a timeout on self._upload_queue.get() risks + shutting down the thread early as no Spans are produced e.g. while waiting + for user input into the instrumented feature or application. + + Each thread will upload a Span to Humanloop, provided the Span has all its + dependencies uploaded. The dependency happens in a Flow Trace context, where + the Trace parent must be uploaded first. The Span Processor will send in Spans + bottoms-up, while the upload of a Trace happens top-down. If a Span did not + have its span uploaded yet, it will be re-queued to be uploaded later. + """ + + # Do work while the Exporter was not instructed to + # wind down or the queue is not empty + while self._upload_queue.qsize() > 0 or not self._shutdown: + try: + thread_args: tuple[ReadableSpan, EvaluationContext] # type: ignore + # Don't block or the thread will never be notified of the shutdown + thread_args = self._upload_queue.get( + block=False, + ) # type: ignore + span_to_export, evaluation_context = thread_args + # Set the EvaluationContext for the thread so the .log action works as expected + # NOTE: Expecting the evaluation thread to send a single span so we are + # not resetting the EvaluationContext in the scope of the export thread + self._client.evaluation_context_variable.set(evaluation_context) + except EmptyQueue: + continue + if span_to_export.parent is None: + # Span is not part of a Flow Log + self._export_span_dispatch(span_to_export) + logger.debug( + "[HumanloopSpanExporter] _do_work on Thread %s: Dispatching span %s %s", + threading.get_ident(), + span_to_export.context.span_id, + span_to_export.name, + ) + elif span_to_export.parent.span_id in self._span_id_to_uploaded_log_id: + # Span is part of a Flow and its parent has been uploaded + self._export_span_dispatch(span_to_export) + logger.debug( + "[HumanloopSpanExporter] _do_work on Thread %s: Dispatching span %s %s", + threading.get_ident(), + span_to_export.context.span_id, + span_to_export.name, + ) + else: + # Requeue the Span and upload after its parent + self._upload_queue.put((span_to_export, evaluation_context)) + self._upload_queue.task_done() + + def _mark_span_completed(self, span_id: int) -> None: + for flow_log_span_id, flow_children_span_ids in self._flow_log_prerequisites.items(): + if span_id in flow_children_span_ids: + flow_children_span_ids.remove(span_id) + if len(flow_children_span_ids) == 0: + # All logs in the Trace have been uploaded, mark the Flow Log as complete + flow_log_id = self._span_id_to_uploaded_log_id[flow_log_span_id] + if flow_log_id is None: + logger.error( + "[HumanloopSpanExporter] Cannot complete Flow log %s, log ID is None", + flow_log_span_id, + ) + else: + self._client.flows.update_log(log_id=flow_log_id, trace_status="complete") + break + + def _export_span_dispatch(self, span: ReadableSpan) -> None: + hl_file = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY) + file_type = span._attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore + parent_span_id = span.parent.span_id if span.parent else None + + while parent_span_id and self._span_id_to_uploaded_log_id.get(parent_span_id) is None: + logger.debug( + "[HumanloopSpanExporter] _export_span_dispatch on Thread %s Span %s %s waiting for parent %s to be uploaded", + threading.get_ident(), + span.context.span_id, + span.name, + parent_span_id, + ) + + logger.debug( + "[HumanloopSpanExporter] Exporting span %s with file type %s", + span, + file_type, + ) + + if file_type == "prompt": + export_func = self._export_prompt + elif file_type == "tool": + export_func = self._export_tool + elif file_type == "flow": + export_func = self._export_flow + else: + raise NotImplementedError(f"Unknown span type: {hl_file}") + export_func(span=span) + + def _export_prompt(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_FILE_KEY, + ) + log_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_LOG_KEY, + ) + # NOTE: Due to OTel conventions, attributes with value of None are removed + # If not present, instantiate as empty dictionary + if "inputs" not in log_object: + log_object["inputs"] = {} + if "messages" not in log_object: + log_object["messages"] = [] + if "tools" not in file_object["prompt"]: + file_object["prompt"]["tools"] = [] + + path: str = file_object["path"] + prompt: PromptKernelRequestParams = file_object["prompt"] + + span_parent_id = span.parent.span_id if span.parent else None + trace_parent_id = self._span_id_to_uploaded_log_id[span_parent_id] if span_parent_id else None + + if "attributes" not in prompt or not prompt["attributes"]: + prompt["attributes"] = {} + + try: + log_response = self._client.prompts.log( + path=path, + prompt=prompt, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id + except HumanloopApiError: + self._span_id_to_uploaded_log_id[span.context.span_id] = None + self._mark_span_completed(span_id=span.context.span_id) + + def _export_tool(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_FILE_KEY, + ) + log_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_LOG_KEY, + ) + + path: str = file_object["path"] + tool: ToolKernelRequestParams = file_object["tool"] + + span_parent_id = span.parent.span_id if span.parent else None + trace_parent_id = self._span_id_to_uploaded_log_id[span_parent_id] if span_parent_id else None + + # API expects an empty dictionary if user does not supply attributes + if not tool.get("attributes"): + tool["attributes"] = {} + if not tool.get("setup_values"): + tool["setup_values"] = {} + if "parameters" in tool["function"] and "properties" not in tool["function"]["parameters"]: + tool["function"]["parameters"]["properties"] = {} + + try: + log_response = self._client.tools.log( + path=path, + tool=tool, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id + except HumanloopApiError: + self._span_id_to_uploaded_log_id[span.context.span_id] = None + self._mark_span_completed(span_id=span.context.span_id) + + def _export_flow(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_FILE_KEY, + ) + log_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_LOG_KEY, + ) + # Spans that must be uploaded before the Flow Span is completed + try: + prerequisites: list[int] = read_from_opentelemetry_span( # type: ignore + span=span, + key=HUMANLOOP_FLOW_PREREQUISITES_KEY, + ) + self._flow_log_prerequisites[span.context.span_id] = set(prerequisites) + except KeyError: + self._flow_log_prerequisites[span.context.span_id] = set() + + path: str = file_object["path"] + flow: FlowKernelRequestParams + if not file_object.get("flow"): + flow = {"attributes": {}} + else: + flow = file_object["flow"] + + span_parent_id = span.parent.span_id if span.parent else None + trace_parent_id = self._span_id_to_uploaded_log_id[span_parent_id] if span_parent_id else None + + if "output" not in log_object: + log_object["output"] = None + try: + log_response = self._client.flows.log( + path=path, + flow=flow, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id + except HumanloopApiError as e: + logger.error(str(e)) + self._span_id_to_uploaded_log_id[span.context.span_id] = None + self._mark_span_completed(span_id=span.context.span_id) diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py index 37ca8cea..737ed0d3 100644 --- a/src/humanloop/otel/helpers.py +++ b/src/humanloop/otel/helpers.py @@ -5,7 +5,6 @@ from opentelemetry.trace import SpanKind from opentelemetry.util.types import AttributeValue - NestedDict = dict[str, Union["NestedDict", AttributeValue]] NestedList = list[Union["NestedList", NestedDict]] diff --git a/src/humanloop/types/trace_status.py b/src/humanloop/types/trace_status.py new file mode 100644 index 00000000..ea147d9f --- /dev/null +++ b/src/humanloop/types/trace_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TraceStatus = typing.Union[typing.Literal["complete", "incomplete"], typing.Any] diff --git a/src/humanloop/utilities/__init__.py b/src/humanloop/utilities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/humanloop/utilities/flow.py b/src/humanloop/utilities/flow.py new file mode 100644 index 00000000..f63573ed --- /dev/null +++ b/src/humanloop/utilities/flow.py @@ -0,0 +1,89 @@ +import logging +from functools import wraps +from typing import Any, Callable, Mapping, Optional, Sequence + +from opentelemetry.sdk.trace import Span +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.utilities.helpers import args_to_inputs +from humanloop.eval_utils.types import File +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests.flow_kernel_request import FlowKernelRequestParams + +logger = logging.getLogger("humanloop.sdk") + + +def flow( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore +): + flow_kernel["attributes"] = {k: v for k, v in flow_kernel.get("attributes", {}).items() if v is not None} + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: + span: Span + with opentelemetry_tracer.start_as_current_span("humanloop.flow") as span: # type: ignore + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "flow") + + if flow_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.flow", + value=flow_kernel, # type: ignore + ) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=None, + ) + error = str(e) + + flow_log = { + "inputs": args_to_inputs(func, args, kwargs), + "output": output_stringified, + "error": error, + } + + # Write the Flow Log to the Span on HL_LOG_OT_KEY + if flow_log: + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=flow_log, # type: ignore + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="flow", + version=FlowDict(**flow_kernel), # type: ignore + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/utilities/helpers.py b/src/humanloop/utilities/helpers.py new file mode 100644 index 00000000..d501f800 --- /dev/null +++ b/src/humanloop/utilities/helpers.py @@ -0,0 +1,21 @@ +import inspect +from typing import Any, Callable + + +def args_to_inputs(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]: + """Maps arguments to their corresponding parameter names in the function signature. + + For example: + ```python + def foo(a, b=2, c=3): + pass + + assert args_to_inputs(foo, (1, 2), {}) == {'a': 1, 'b': 2, 'c': 3} + assert args_to_inputs(foo, (1,), {'b': 8}) == {'a': 1, 'b': 8, 'c': 3} + assert args_to_inputs(foo, (1,), {}) == {'a': 1, 'b': 2, 'c': 3} + ``` + """ + signature = inspect.signature(func) + bound_args = signature.bind(*args, **kwargs) + bound_args.apply_defaults() + return dict(bound_args.arguments) diff --git a/src/humanloop/utilities/prompt.py b/src/humanloop/utilities/prompt.py new file mode 100644 index 00000000..4e0f55f5 --- /dev/null +++ b/src/humanloop/utilities/prompt.py @@ -0,0 +1,88 @@ +import logging +from functools import wraps +from typing import Any, Callable, Mapping, Optional, Sequence + +from opentelemetry.sdk.trace import Span +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.utilities.helpers import args_to_inputs +from humanloop.utilities.types import DecoratorPromptKernelRequestParams +from humanloop.eval_utils import File +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span + +logger = logging.getLogger("humanloop.sdk") + + +def prompt( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + # TODO: Template can be a list of objects? + **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore +): + def decorator(func: Callable): + @wraps(func) + def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: + span: Span + with opentelemetry_tracer.start_as_current_span("humanloop.prompt") as span: # type: ignore + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") + + if prompt_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.prompt", + value={ + **prompt_kernel, # type: ignore + "attributes": prompt_kernel.get("attributes") or None, # type: ignore + }, # type: ignore + ) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = str(e) + + prompt_log = { + "inputs": args_to_inputs(func, args, kwargs), + "output": output_stringified, + "error": error, + } + + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=prompt_log, # type: ignore + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="prompt", + version={**prompt_kernel}, # type: ignore + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/utilities/tool.py b/src/humanloop/utilities/tool.py new file mode 100644 index 00000000..c17903d1 --- /dev/null +++ b/src/humanloop/utilities/tool.py @@ -0,0 +1,505 @@ +import builtins +import inspect +import logging +import sys +import textwrap +import typing +from dataclasses import dataclass +from functools import wraps +from inspect import Parameter +from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union + +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.utilities.helpers import args_to_inputs +from humanloop.eval_utils import File +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.requests.tool_function import ToolFunctionParams +from humanloop.requests.tool_kernel_request import ToolKernelRequestParams + +if sys.version_info >= (3, 10): + import types + +logger = logging.getLogger("humanloop.sdk") + + +def tool( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore +): + def decorator(func: Callable): + enhanced_tool_kernel = _build_tool_kernel( + func=func, + attributes=tool_kernel.get("attributes"), + setup_values=tool_kernel.get("setup_values"), + strict=True, + ) + + # Mypy complains about adding attribute on function, but it's nice UX + func.json_schema = enhanced_tool_kernel["function"] # type: ignore + + @wraps(func) + def wrapper(*args, **kwargs): + with opentelemetry_tracer.start_as_current_span("humanloop.tool") as span: + # Write the Tool Kernel to the Span on HL_FILE_OT_KEY + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "tool") + if enhanced_tool_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.tool", + value=enhanced_tool_kernel, + ) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = str(e) + + # Populate known Tool Log attributes + tool_log = { + "inputs": args_to_inputs(func, args, kwargs), + "output": output_stringified, + "error": error, + } + + # Write the Tool Log to the Span on HL_LOG_OT_KEY + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=tool_log, + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="tool", + version=enhanced_tool_kernel, + callable=wrapper, + ) + + return wrapper + + return decorator + + +def _build_tool_kernel( + func: Callable, + attributes: Optional[dict[str, Optional[Any]]], + setup_values: Optional[dict[str, Optional[Any]]], + strict: bool, +) -> ToolKernelRequestParams: + """Build ToolKernelRequest object from decorated function.""" + try: + source_code = textwrap.dedent(inspect.getsource(func)) + except TypeError as e: + raise TypeError( + f"Cannot extract source code for function {func.__name__}. " + "Try decorating a plain function instead of a partial for example." + ) from e + # Remove decorator from source code by finding first 'def' + # This makes the source_code extraction idempotent whether + # the decorator is applied directly or used as a higher-order + # function + source_code = source_code[source_code.find("def") :] + kernel = ToolKernelRequestParams( + source_code=source_code, + function=_build_function_property( + func=func, + strict=strict, + ), + ) + if attributes: + kernel["attributes"] = attributes + if setup_values: + kernel["setup_values"] = setup_values + return kernel + + +def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams: + """Build `function` property inside ToolKernelRequest.""" + tool_name = func.__name__ + description = func.__doc__ + if description is None: + description = "" + return ToolFunctionParams( + name=tool_name, + description=description, + parameters=_build_function_parameters_property(func), # type: ignore + strict=strict, + ) + + +class _JSONSchemaFunctionParameters(TypedDict): + type: str + properties: dict[str, typing.Union[dict, list]] + required: list[str] + additionalProperties: Literal[False] + + +def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters: + """Build `function.parameters` property inside ToolKernelRequest.""" + properties: dict[str, Any] = {} + required: list[str] = [] + signature = inspect.signature(func) + + for parameter in signature.parameters.values(): + if parameter.kind in ( + inspect.Parameter.VAR_POSITIONAL, + inspect.Parameter.VAR_KEYWORD, + ): + raise ValueError(f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator") + + for parameter in signature.parameters.values(): + try: + parameter_signature = _parse_annotation(parameter.annotation) + except ValueError as e: + raise ValueError(f"Error parsing signature of @tool annotated function {func.__name__}: {e}") from e + param_json_schema = _annotation_parse_to_json_schema(parameter_signature) + properties[parameter.name] = param_json_schema + if not _parameter_is_optional(parameter): + required.append(parameter.name) + + if len(properties) == 0 and len(required) == 0: + # Edge case: function with no parameters + return _JSONSchemaFunctionParameters( + type="object", + properties={}, + required=[], + additionalProperties=False, + ) + return _JSONSchemaFunctionParameters( + type="object", + # False positive, expected tuple[str] but got tuple[str, ...] + required=tuple(required), # type: ignore + properties=properties, + additionalProperties=False, + ) + + +if sys.version_info >= (3, 11): + _PRIMITIVE_TYPES = Union[ + str, + int, + float, + bool, + Parameter.empty, # type: ignore + Ellipsis, # type: ignore + ] +else: + # Ellipsis not supported as type before Python 3.11 + _PRIMITIVE_TYPES = Union[ + str, + int, + float, + bool, + Parameter.empty, # type: ignore + ] + + +@dataclass +class _ParsedAnnotation: + def no_type_hint(self) -> bool: + """Check if the annotation has no type hint. + + Examples: + str -> False + list -> True + list[str] -> False + """ + raise NotImplementedError + + +@dataclass +class _ParsedPrimitiveAnnotation(_ParsedAnnotation): + annotation: _PRIMITIVE_TYPES + + def no_type_hint(self) -> bool: + return self.annotation is Parameter.empty or self.annotation is Ellipsis + + +@dataclass +class _ParsedDictAnnotation(_ParsedAnnotation): + # Both are null if no type hint e.g. dict vs dict[str, int] + key_annotation: Optional[_ParsedAnnotation] + value_annotation: Optional[_ParsedAnnotation] + + def no_type_hint(self) -> bool: + return self.key_annotation is None and self.value_annotation is None + + +@dataclass +class _ParsedTupleAnnotation(_ParsedAnnotation): + # Null if no type hint e.g. tuple vs tuple[str, int] + annotation: Optional[list[_ParsedAnnotation]] + + def no_type_hint(self) -> bool: + return self.annotation is None + + +@dataclass +class _ParsedUnionAnnotation(_ParsedAnnotation): + annotation: list[_ParsedAnnotation] + + +@dataclass +class _ParsedListAnnotation(_ParsedAnnotation): + # Null if no type hint e.g. list vs list[str] + annotation: Optional[_ParsedAnnotation] + + +@dataclass +class _ParsedOptionalAnnotation(_ParsedAnnotation): + annotation: _ParsedAnnotation + + +def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation: + """Parse constituent parts of a potentially nested type hint. + + Custom types are not supported, only built-in types and typing module types. + + """ + origin = typing.get_origin(annotation) + if origin is None: + # Either not a nested type or no type hint + # Parameter.empty is used for parameters without type hints + # Ellipsis is interpreted as Any + if annotation not in ( + str, + int, + float, + bool, + Parameter.empty, + Ellipsis, + dict, + list, + tuple, + ): + raise ValueError(f"Unsupported type hint: {annotation}") + + # Check if it's a complex type with no inner type + if annotation == builtins.dict: + return _ParsedDictAnnotation( + value_annotation=None, + key_annotation=None, + ) + if annotation == builtins.list: + return _ParsedListAnnotation( + annotation=None, + ) + if annotation == builtins.tuple: + return _ParsedTupleAnnotation( + annotation=None, + ) + + # Is a primitive type + return _ParsedPrimitiveAnnotation( + annotation=annotation, + ) + + if origin is list: + inner_annotation = _parse_annotation(typing.get_args(annotation)[0]) + return _ParsedListAnnotation( + annotation=inner_annotation, + ) + + if origin is dict: + key_type = _parse_annotation(typing.get_args(annotation)[0]) + value_type = _parse_annotation(typing.get_args(annotation)[1]) + return _ParsedDictAnnotation( + key_annotation=key_type, + value_annotation=value_type, + ) + + if origin is tuple: + return _ParsedTupleAnnotation( + annotation=[_parse_annotation(arg) for arg in typing.get_args(annotation)], + ) + + if origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType): + sub_types = typing.get_args(annotation) + if sub_types[-1] is type(None): + # type(None) in sub_types indicates Optional type + if len(sub_types) == 2: + # Union is an Optional type only + return _ParsedOptionalAnnotation( + annotation=_parse_annotation(sub_types[0]), + ) + # Union has sub_types and is Optional + return _ParsedOptionalAnnotation( + annotation=_ParsedUnionAnnotation( + annotation=[_parse_annotation(sub_type) for sub_type in sub_types[:-1]], + ) + ) + # Union type that is not Optional + return _ParsedUnionAnnotation( + annotation=[_parse_annotation(sub_type) for sub_type in sub_types], + ) + + raise ValueError(f"Unsupported origin: {origin}") + + +_JSON_SCHEMA_ANY = ["string", "integer", "number", "boolean", "object", "array", "null"] + + +def _annotation_parse_to_json_schema( + arg: _ParsedAnnotation, +) -> Mapping[str, Union[str, Mapping, Sequence]]: + """ + Convert parse result from _parse_annotation to JSON Schema for a parameter. + + The function recursively converts the nested type hints to a JSON Schema. + + Note that 'any' is not supported by JSON Schema, so we allow any type as a workaround. + """ + arg_type: Mapping[str, Union[str, Mapping, Sequence]] + + if isinstance(arg, _ParsedOptionalAnnotation): + is_optional = True + arg = arg.annotation + else: + is_optional = False + + if isinstance(arg, _ParsedUnionAnnotation): + arg_type = { + "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation], + } + + elif isinstance(arg, _ParsedTupleAnnotation): + if arg.annotation is None: + # tuple annotation with no type hints + # This is equivalent with a list, since the + # number of elements is not specified + arg_type = { + "type": "array", + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation], + } + + elif isinstance(arg, _ParsedListAnnotation): + if arg.annotation is None: + # list annotation with no type hints + if is_optional: + arg_type = { + "type": ["array", "null"], + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": _annotation_parse_to_json_schema(arg.annotation), + } + + elif isinstance(arg, _ParsedDictAnnotation): + if arg.key_annotation is None and arg.value_annotation is None: + # dict annotation with no type hints + if is_optional: + arg_type = { + "type": ["object", "null"], + "properties": { + "key": {"type": _JSON_SCHEMA_ANY}, + "value": {"type": _JSON_SCHEMA_ANY}, + }, + } + else: + arg_type = { + "type": "object", + "properties": { + "key": {"type": _JSON_SCHEMA_ANY}, + "value": {"type": _JSON_SCHEMA_ANY}, + }, + } + else: + arg_type = { + "type": "object", + "properties": { + "key": _annotation_parse_to_json_schema(arg.key_annotation), # type: ignore + "value": _annotation_parse_to_json_schema(arg.value_annotation), # type: ignore + }, + } + + elif isinstance(arg, _ParsedPrimitiveAnnotation): + if arg.annotation is builtins.str: + arg_type = {"type": "string"} + if arg.annotation is builtins.int: + arg_type = {"type": "integer"} + if arg.annotation is builtins.float: + arg_type = {"type": "number"} + if arg.annotation is builtins.bool: + arg_type = {"type": "boolean"} + if arg.annotation is Parameter.empty or arg.annotation is Ellipsis: + # JSON Schema dropped support for 'any' type, we allow any type as a workaround + arg_type = {"type": _JSON_SCHEMA_ANY} + + else: + raise ValueError(f"Unsupported annotation type: {arg}") + + if is_optional: + if isinstance(arg, _ParsedUnionAnnotation): + for type_option in arg_type["anyOf"]: + if ( + isinstance(type_option["type"], list) # type: ignore + and "null" not in type_option["type"] # type: ignore + ): # type: ignore + type_option["type"] = [*type_option["type"], "null"] # type: ignore + elif not isinstance(type_option["type"], list): # type: ignore + type_option["type"] = [type_option["type"], "null"] # type: ignore + else: + if isinstance(arg_type["type"], list) and "null" not in arg_type["type"]: # type: ignore + arg_type = {**arg_type, "type": [*arg_type["type"], "null"]} # type: ignore + elif not isinstance(arg_type["type"], list): # type: ignore + arg_type = {**arg_type, "type": [arg_type["type"], "null"]} # type: ignore + + return arg_type + + +def _parameter_is_optional( + parameter: inspect.Parameter, +) -> bool: + """Check if tool parameter is mandatory. + + Examples: + Optional[T] -> True + T | None -> True + T -> False + """ + # Check if the parameter can be None, either via Optional[T] or T | None type hint + origin = typing.get_origin(parameter.annotation) + # sub_types refers to T inside the annotation + sub_types = typing.get_args(parameter.annotation) + return ( + (origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType)) + and len(sub_types) > 0 + and sub_types[-1] is type(None) + ) diff --git a/src/humanloop/utilities/types.py b/src/humanloop/utilities/types.py new file mode 100644 index 00000000..f52f0178 --- /dev/null +++ b/src/humanloop/utilities/types.py @@ -0,0 +1,12 @@ +from typing_extensions import NotRequired + +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams + + +class DecoratorPromptKernelRequestParams(PromptKernelRequestParams): + """See :class:`PromptKernelRequestParams` for more information. + + Allows the `model` field to be optional for Prompt decorator. + """ + + model: NotRequired[str] # type: ignore diff --git a/tests/utilities/__init__.py b/tests/utilities/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/utilities/test_flow_decorator.py b/tests/utilities/test_flow_decorator.py new file mode 100644 index 00000000..da895ee0 --- /dev/null +++ b/tests/utilities/test_flow_decorator.py @@ -0,0 +1,287 @@ +import os +import random +import string +import time + +from unittest.mock import patch +import pytest +from openai import OpenAI +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.sdk.trace import ReadableSpan + +from humanloop.utilities.flow import flow +from humanloop.utilities.prompt import prompt +from humanloop.utilities.tool import tool +from humanloop.otel.constants import HUMANLOOP_FILE_KEY +from humanloop.otel.exporter import HumanloopSpanExporter +from humanloop.otel.helpers import read_from_opentelemetry_span + + +def _test_scenario( + opentelemetry_tracer: Tracer, +): + @tool(opentelemetry_tracer=opentelemetry_tracer) + def _random_string() -> str: + """Return a random string.""" + return "".join( + random.choices( + string.ascii_letters + string.digits, + k=10, + ) + ) + + @prompt( # type: ignore + opentelemetry_tracer=opentelemetry_tracer, + path=None, + template="You are an assistant on the following topics: {topics}.", + ) + def _call_llm(messages: list[ChatCompletionMessageParam]) -> str: + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + return ( + client.chat.completions.create( + model="gpt-4o", + messages=messages, + temperature=0.8, + ) + .choices[0] + .message.content + ) + _random_string() + + @flow( + opentelemetry_tracer=opentelemetry_tracer, + attributes={"foo": "bar", "baz": 7}, + ) + def _agent_call(messages: list[dict]) -> str: + return _call_llm(messages=messages) + + @flow( # type: ignore + opentelemetry_tracer=opentelemetry_tracer, + ) + def _flow_over_flow(messages: list[dict]) -> str: + return _agent_call(messages=messages) + + return _random_string, _call_llm, _agent_call, _flow_over_flow + + +def test_decorators_without_flow( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_hl_test_configuration + + _call_llm = _test_scenario(tracer)[1] + + # GIVEN a call to @prompt annotated function that calls a @tool + _call_llm( + [ + { + "role": "system", + "content": "You are an assistant on the following topics: greetings in foreign languages.", + }, + { + "role": "user", + "content": "Hello, how are you?", + }, + ] + ) + # WHEN exporting the spans + # Wait for the prompt span to be exported; It was waiting + # on the OpenAI call span to finish first + time.sleep(1) + spans = exporter.get_finished_spans() + + # THEN 3 spans arrive at the exporter + assert len(spans) == 3 + + for i in range(3): + if spans[i].name == "humanloop.tool": + tool_span = spans[i] + elif spans[i].name == "humanloop.prompt": + prompt_span = spans[i] + + assert read_from_opentelemetry_span( + span=tool_span, + key=HUMANLOOP_FILE_KEY, + )["tool"] + assert read_from_opentelemetry_span( + span=prompt_span, + key=HUMANLOOP_FILE_KEY, + )["prompt"] + + +def test_decorators_with_flow_decorator( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN a @flow entrypoint to an instrumented application + tracer, exporter = opentelemetry_hl_test_configuration + + _agent_call = _test_scenario(tracer)[2] + + # WHEN calling the Flow + _agent_call( + [ + { + "role": "system", + "content": "You are an assistant on the following topics: greetings in foreign languages.", + }, + { + "role": "user", + "content": "Hello, how are you?", + }, + ] + ) + + # THEN 4 spans arrive at the exporter + spans = exporter.get_finished_spans() + assert len(spans) == 4 + + for i in range(4): + if spans[i].name == "humanloop.flow": + flow_span = spans[i] + elif spans[i].name == "humanloop.prompt": + prompt_span = spans[i] + elif spans[i].name == "humanloop.tool": + tool_span = spans[i] + + # THEN the span are returned bottom to top + assert read_from_opentelemetry_span(span=tool_span, key=HUMANLOOP_FILE_KEY)["tool"] + assert read_from_opentelemetry_span(span=prompt_span, key=HUMANLOOP_FILE_KEY)["prompt"] + assert read_from_opentelemetry_span(span=flow_span, key=HUMANLOOP_FILE_KEY)["flow"] + + +def test_flow_decorator_flow_in_flow( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[dict], +): + # GIVEN A configured OpenTelemetry tracer and exporter + tracer, exporter = opentelemetry_hl_test_configuration + + _flow_over_flow = _test_scenario(tracer)[3] + + # WHEN Calling the _test_flow_in_flow function with specific messages + _flow_over_flow(call_llm_messages) + + # Wait for the Prompt span to be exported; It was asynchronously waiting + # on the OpenAI call span to finish first + time.sleep(1) + + # THEN 5 spans arrive at the exporter + spans = exporter.get_finished_spans() + assert len(spans) == 5 + + for i in range(5): + if spans[i].name == "humanloop.flow" and spans[i].parent is None: + flow_span = spans[i] + elif spans[i].name == "humanloop.flow" and spans[i].parent: + nested_flow_span = spans[i] + elif spans[i].name == "humanloop.prompt": + prompt_span = spans[i] + elif spans[i].name == "humanloop.tool": + tool_span = spans[i] + + assert read_from_opentelemetry_span(span=tool_span, key=HUMANLOOP_FILE_KEY)["tool"] + assert read_from_opentelemetry_span(span=prompt_span, key=HUMANLOOP_FILE_KEY)["prompt"] + assert read_from_opentelemetry_span(span=nested_flow_span, key=HUMANLOOP_FILE_KEY)["flow"] != {} + with pytest.raises(KeyError): + read_from_opentelemetry_span(span=flow_span, key=HUMANLOOP_FILE_KEY)["flow"] != {} + + +def test_flow_decorator_with_hl_exporter( + call_llm_messages: list[dict], + opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], +): + # NOTE: type ignore comments are caused by the MagicMock used to mock _client + # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter + tracer, exporter = opentelemetry_hl_with_exporter_test_configuration + + _agent_call = _test_scenario(tracer)[2] + + with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: + # WHEN calling the @flow decorated function + _agent_call(call_llm_messages) + + # Exporter is threaded, need to wait threads shutdown + time.sleep(3) + + assert len(mock_export_method.call_args_list) == 4 + + for i in range(4): + span = mock_export_method.call_args_list[i][0][0][0] + if span.name == "humanloop.flow": + flow_span = span + elif span.name == "humanloop.prompt": + prompt_span = span + elif span.name == "humanloop.tool": + tool_span = span + + assert read_from_opentelemetry_span( + span=flow_span, + key=HUMANLOOP_FILE_KEY, + )["flow"]["attributes"] == { # type: ignore[index,call-overload] + "foo": "bar", + "baz": 7, + } + # THEN the second uploaded span is the Prompt + assert "prompt" in read_from_opentelemetry_span( + span=prompt_span, + key=HUMANLOOP_FILE_KEY, + ) + # THEN the first uploaded span is the Tool + assert "tool" in read_from_opentelemetry_span( + span=tool_span, + key=HUMANLOOP_FILE_KEY, + ) + + # THEN the first Log uploaded is the Flow + first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore + assert "flow" in first_log + exporter._client.flows.log.assert_called_once() # type: ignore + flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore + assert flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7} + flow_log_id = exporter._client.flows.log.return_value.id # type: ignore + + # THEN the second Log uploaded is the Prompt + exporter._client.prompts.log.assert_called_once() # type: ignore + prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore + assert prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id + assert prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8 + prompt_log_id = exporter._client.prompts.log.return_value.id # type: ignore + + # THEN the final Log uploaded is the Tool + exporter._client.tools.log.assert_called_once() # type: ignore + tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore + assert tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id + + +def test_flow_decorator_hl_exporter_flow_inside_flow( + call_llm_messages: list[dict], + opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], +): + # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter + tracer, exporter = opentelemetry_hl_with_exporter_test_configuration + + _flow_over_flow = _test_scenario(tracer)[3] + + with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: + # WHEN calling the @flow decorated function + _flow_over_flow(call_llm_messages) + + # Exporter is threaded, need to wait threads shutdown + time.sleep(3) + + # THEN 5 spans are arrive at the exporter + assert len(mock_export_method.call_args_list) == 5 + + # THEN one of the flows is nested inside the other + spans: list[ReadableSpan] = [mock_export_method.call_args_list[i][0][0][0] for i in range(1, 5)] + counter = 0 + for span in spans: + if span.name == "humanloop.flow": + counter += 1 + if span.parent: + nested_flow_span = span + else: + flow_span = span + # We are certain span_id exists for these 2 spans + assert nested_flow_span.parent.span_id == flow_span.context.span_id # type: ignore diff --git a/tests/utilities/test_prompt_decorator.py b/tests/utilities/test_prompt_decorator.py new file mode 100644 index 00000000..96bffeda --- /dev/null +++ b/tests/utilities/test_prompt_decorator.py @@ -0,0 +1,321 @@ +import os +import time +from typing import Optional + +import cohere +import pytest + +# replicate has no typing stubs +import replicate # type: ignore +from anthropic import Anthropic +from anthropic.types.message_param import MessageParam +from dotenv import load_dotenv +from groq import Groq +from groq import NotFoundError as GroqNotFoundError +from humanloop.utilities.prompt import prompt +from humanloop.otel.constants import HUMANLOOP_FILE_KEY +from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span +from humanloop.types.model_providers import ModelProviders +from humanloop.types.prompt_kernel_request import PromptKernelRequest +from openai import OpenAI +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter + +# replicate has no typing stubs, ruff wants this import placed here +from replicate.exceptions import ModelError as ReplicateModelError # type: ignore + +_PROVIDER_AND_MODEL = [ + ("openai", "gpt-4o"), + ("groq", "llama3-8b-8192"), + ("cohere", "command"), + ("replicate", "meta/meta-llama-3-8b-instruct"), + ("anthropic", "claude-3-opus-latest"), +] + + +def _test_scenario(opentelemetry_tracer: Tracer, **kwargs): + """ + Set up the function decorated with @prompt. + + Normally the opentelemetry_tracer would be passed in by the Humanloop client. + In a test environment, the Tracer is obtained from a fixture and the test + call this function to setup the decorated function that is tested. + """ + + @prompt(opentelemetry_tracer=opentelemetry_tracer, **kwargs) + def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]: + load_dotenv() + if provider == "openai": + # NOTE: These tests check if instrumentors are capable of intercepting OpenAI + # provider calls. Could not find a way to intercept them coming from a Mock. + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore + return ( + client.chat.completions.create( + model=model, + messages=messages, # type: ignore + temperature=0.8, + ) + .choices[0] + .message.content + ) + if provider == "anthropic": + client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore + messages_anthropic_format = [ + MessageParam( + content=message["content"], + role="user" if message["role"] in ("user", "system") else "assistant", + ) + for message in messages + ] + return ( + client.messages.create( # type: ignore + model=model, + messages=messages_anthropic_format, + max_tokens=200, + temperature=0.8, + ) + .content[0] + .text + ) + if provider == "groq": + try: + client = Groq( # type: ignore + # This is the default and can be omitted + api_key=os.environ.get("GROQ_API_KEY"), + ) + return ( + client.chat.completions.create( + messages=messages, # type: ignore + model=model, + temperature=0.8, + ) + .choices[0] + .message.content + ) + except GroqNotFoundError: + # NOTE: Tests in this file are integration tests that rely on live LLM provider + # clients. If a test fails, it might be flaky. If this happens, consider adding + # a skip mechanism similar to Groq + pytest.skip("GROQ not available") + if provider == "cohere": + client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore + messages_cohere_format: list[cohere.Message] = [] + for message in messages: + if message["role"] == "system": + messages_cohere_format.append(cohere.SystemMessage(message=message["content"])) + elif message["role"] == "user": + messages_cohere_format.append(cohere.UserMessage(message=message["content"])) + elif message["role"] == "assistant": + messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"])) + return client.chat( # type: ignore + chat_history=messages_cohere_format, + model=model, + max_tokens=200, + message=messages[-1]["content"], + temperature=0.8, + ).text + if provider == "replicate": + # TODO: Instrumentor only picks up methods on module-level, not client level + # This should be documented somewhere or changed + replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY") + try: + output = "" + for event in replicate.run( + model, + input={ + "prompt": messages[0]["content"] + " " + messages[-1]["content"], + "temperature": 0.8, + }, + ): + output += str(event) + except ReplicateModelError: + pytest.skip("Replicate not available") + if not output: + pytest.skip("Replicate not available") + return output + raise ValueError(f"Unknown provider: {provider}") + + return _call_llm_base + + +# LLM provider might not be available, retry the test +@pytest.mark.flaky(retries=3, delay=60) +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator( + provider_model: tuple[str, str], + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration without HumanloopSpanProcessor + tracer, exporter = opentelemetry_test_configuration + # WHEN using the Prompt decorator + + call_llm = _test_scenario(tracer) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + + # Wait for the Prompt span to be exported, it is waiting + # asynchronously for the LLM provider call span to finish + time.sleep(1) + + # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt + spans = exporter.get_finished_spans() + assert len(spans) == 2 + assert not is_humanloop_span(span=spans[0]) + assert is_humanloop_span(span=spans[1]) + # THEN the Prompt span is not enhanced with information from the LLM provider + assert is_humanloop_span(spans[1]) + # THEN no information is added to the Prompt span without the HumanloopSpanProcessor + assert spans[1].attributes.get("prompt") is None # type: ignore + + +# LLM provider might not be available, retry the test +@pytest.mark.flaky(retries=3, delay=60) +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator_with_hl_processor( + provider_model: tuple[str, str], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor + tracer, exporter = opentelemetry_hl_test_configuration + # WHEN using the Prompt decorator + + call_llm = _test_scenario(opentelemetry_tracer=tracer) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + + # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt + + # Wait for the Prompt span to be exported, it is waiting + # asynchronously for the LLM provider call span to finish + time.sleep(1) + + spans = exporter.get_finished_spans() + assert len(spans) == 2 + assert not is_humanloop_span(span=spans[0]) + assert is_humanloop_span(span=spans[1]) + # THEN the Prompt span is enhanced with information and forms a correct PromptKernel + prompt_kernel = PromptKernelRequest.model_validate( + read_from_opentelemetry_span( + span=spans[1], + key=HUMANLOOP_FILE_KEY, + )["prompt"] # type: ignore + ) + # THEN temperature is intercepted from LLM provider call + assert prompt_kernel.temperature == 0.8 + # THEN the provider intercepted from LLM provider call + assert prompt_kernel.provider == provider + # THEN model is intercepted from LLM provider call + assert prompt_kernel.model == model + # THEN top_p is not present since it's not present in the LLM provider call + assert prompt_kernel.top_p is None + + +# LLM provider might not be available, retry the test +@pytest.mark.flaky(retries=3, delay=60) +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator_with_defaults( + provider_model: tuple[str, str], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor + tracer, exporter = opentelemetry_hl_test_configuration + # WHEN using the Prompt decorator with default values + + call_llm = _test_scenario( + opentelemetry_tracer=tracer, + temperature=0.9, + top_p=0.1, + template="You are an assistant on the following topics: {topics}.", + path=None, + ) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + + # Wait for the Prompt span to be exported, it is waiting + # asynchronously for the LLM provider call span to finish + time.sleep(1) + + spans = exporter.get_finished_spans() + # THEN the Prompt span is enhanced with information and forms a correct PromptKernel + prompt = PromptKernelRequest.model_validate( + read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["prompt"] # type: ignore + ) + # THEN temperature intercepted from LLM provider call is overridden by default value + assert prompt.temperature == 0.9 + # THEN top_p is taken from decorator default value + assert prompt.top_p == 0.1 + # THEN the provider intercepted from LLM provider call + assert prompt.model == model + + +# LLM provider might not be available, retry the test +@pytest.mark.flaky(retries=3, delay=60) +@pytest.mark.parametrize( + "attributes_test_expected", + [ + ( + {"foo": "bar"}, + {"foo": "bar"}, + ), + ( + {}, + None, + ), + ( + None, + None, + ), + ], +) +def test_prompt_attributes( + attributes_test_expected: tuple[dict[str, str], dict[str, str]], + call_llm_messages: list[ChatCompletionMessageParam], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + test_attributes, expected_attributes = attributes_test_expected + tracer, exporter = opentelemetry_hl_test_configuration + + call_llm = _test_scenario( + opentelemetry_tracer=tracer, + path=None, + attributes=test_attributes, + ) + + call_llm( + provider="openai", + model="gpt-4o", + messages=call_llm_messages, + ) + + # Wait for the Prompt span to be exported, it is waiting + # asynchronously for the LLM provider call span to finish + time.sleep(1) + + assert len(exporter.get_finished_spans()) == 2 + + prompt_kernel = PromptKernelRequest.model_validate( + read_from_opentelemetry_span( + span=exporter.get_finished_spans()[1], + key=HUMANLOOP_FILE_KEY, + )["prompt"] # type: ignore + ) + assert prompt_kernel.attributes == expected_attributes diff --git a/tests/utilities/test_tool_decorator.py b/tests/utilities/test_tool_decorator.py new file mode 100644 index 00000000..983c93f6 --- /dev/null +++ b/tests/utilities/test_tool_decorator.py @@ -0,0 +1,567 @@ +import sys +import time +from typing import Any, Optional, TypedDict, Union + +import pytest +from humanloop.utilities.tool import tool +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY +from humanloop.otel.helpers import read_from_opentelemetry_span +from jsonschema.protocols import Validator +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter + + +def test_calculator_decorator( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN a test OpenTelemetry configuration + tracer, exporter = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + # WHEN calling the @tool decorated function + result = calculator(operation="add", num1=1, num2=2) + assert result == 3 + # THEN a single span is created and the log and file attributes are correctly set + spans = exporter.get_finished_spans() + assert len(spans) == 1 + hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_FILE_KEY) + hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_LOG_KEY) + assert hl_log["output"] == str(result) == "3" + assert hl_log["inputs"] == { + "operation": "add", + "num1": 1, + "num2": 2, + } + assert hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers." + # TODO: pydantic is inconsistent by dumping either tuple or list + assert calculator.json_schema == hl_file["tool"]["function"] + + Validator.check_schema(calculator.json_schema) + + +def test_union_type(opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter]): + tracer, _ = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def foo(a: Union[int, float], b: float) -> float: + return a + b + + assert foo.json_schema["parameters"]["properties"]["a"] == { + "anyOf": [ + {"type": "integer"}, + {"type": "number"}, + ] + } + assert foo.json_schema["parameters"]["properties"]["b"] == {"type": "number"} + assert foo.json_schema["parameters"]["required"] == ("a", "b") + + Validator.check_schema(foo.json_schema) + + +def test_not_required_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def test_calculator(a: Optional[float], b: float) -> float: + if a is None: + a = 0 + return a + b + + assert test_calculator(3, 4) == 7 + assert len(exporter.get_finished_spans()) == 1 + assert test_calculator.json_schema["parameters"]["properties"]["a"] == { + "type": ["number", "null"], + } + + Validator.check_schema(test_calculator.json_schema) + + +def test_no_annotation_on_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b) -> float: + if a is None: + a = 0 + return a + b + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` is of `any` type + # NOTE: JSONSchema dropped support for 'any' type, we include all types + # as a workaround + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_dict_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: dict) -> float: + if a is None: + a = 0 + return a + b["c"] + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + # on both keys and values + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": "object", + "properties": { + "key": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + "value": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_list_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: Optional[list]) -> float: + if a is None: + a = 0 + sum = a + if b is None: + return sum + for val in b: + sum += val + return sum + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": ["array", "null"], + "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + "required": (), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + +def test_tuple_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: Optional[tuple]) -> float: + if a is None: + a = 0 + sum = a + if b is None: + return sum + for val in b: + sum += val + return sum + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": ["array", "null"], + "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + "required": (), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + +def test_function_without_return_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on the return value + # WHEN building the Tool kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: Optional[float], b: float) -> float: + """Add two numbers.""" + if a is None: + a = 0 + return a + b + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_list_annotation_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, exporter = opentelemetry_test_configuration + + # WHEN defining a tool with a list parameter + @tool(opentelemetry_tracer=tracer) + def foo(to_join: list[str]) -> str: + return " ".join(to_join) + + assert "a b c" == foo(to_join=["a", "b", "c"]) + + # THEN the function call results in a Span + assert len(exporter.get_finished_spans()) == 1 + # THEN the argument is correctly described in the JSON schema + assert foo.json_schema["parameters"]["properties"]["to_join"] == { # type: ignore + "type": "array", + "items": {"type": "string"}, + } + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_list_in_list_parameter_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a list of lists parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def nested_plain_join(to_join: list[list[str]]): + return " ".join([val for sub_list in to_join for val in sub_list]) + + # THEN the JSON schema is correctly built and parameter is correctly described + assert nested_plain_join.json_schema["parameters"]["properties"]["to_join"] == { + "type": "array", + "items": { + "type": "array", + "items": {"type": "string"}, + }, + } + + # THEN the JSONSchema is valid + Validator.check_schema(nested_plain_join.json_schema) + + +def test_complex_dict_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a dictionary parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: dict[Union[int, str], list[str]]): + return a + + # THEN the parameter is correctly described + assert foo.json_schema["parameters"]["properties"]["a"] == { + "type": "object", + "properties": { + "key": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "value": {"type": "array", "items": {"type": "string"}}, + }, + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_tuple_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a tuple parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: Optional[tuple[int, Optional[str], float]]): + return a + + # THEN the parameter is correctly described + assert foo.json_schema["parameters"]["properties"]["a"] == { + "type": ["array", "null"], + "items": [ + {"type": "integer"}, + {"type": ["string", "null"]}, + {"type": "number"}, + ], + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_tool_no_args( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition without arguments + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(): + return 42 + + # THEN the JSON schema is correctly built + assert foo.json_schema == { + "description": "", + "name": "foo", + "parameters": { + "properties": {}, + "required": [], + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_custom_types_throws( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a user-defined type + class Foo(TypedDict): + a: int # type: ignore + b: int # type: ignore + + # WHEN defining a tool with a parameter of that type + with pytest.raises(ValueError) as exc: + + @tool(opentelemetry_tracer=tracer) + def foo_bar(foo: Foo): + return foo.a + foo.b # type: ignore + + # THEN a ValueError is raised + assert exc.value.args[0].startswith("Error parsing signature of @tool annotated function foo_bar") + + +def test_tool_as_higher_order_function( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_hl_test_configuration + + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + higher_order_fn_tool = tool(opentelemetry_tracer=tracer)(calculator) + + @tool(opentelemetry_tracer=tracer) # type: ignore + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + higher_order_fn_tool(operation="add", num1=1, num2=2) + calculator(operation="add", num1=1, num2=2) + + # Processor handles HL spans asynchronously, wait for them + time.sleep(1) + + assert len(spans := exporter.get_finished_spans()) == 2 + + hl_file_higher_order_fn = read_from_opentelemetry_span( + span=spans[0], + key=HUMANLOOP_FILE_KEY, + ) + hl_file_decorated_fn = read_from_opentelemetry_span( + span=spans[1], + key=HUMANLOOP_FILE_KEY, + ) + assert hl_file_higher_order_fn["tool"]["source_code"] == hl_file_decorated_fn["tool"]["source_code"] # type: ignore + + +if sys.version_info >= (3, 10): + # Testing that function parsing for Tool decorator + # works with Python 3.10 and above syntax + + def test_python310_syntax( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], + ): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `|` for Optional + @tool(opentelemetry_tracer=tracer) + def calculator(a: float, b: float | None = None) -> float: + # NOTE: dummy function, only testing its signature not correctness + if a is None: + a = 0 + return a + b # type: ignore + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": "number"}, + "b": {"type": ["number", "null"]}, + }, + "required": ("a",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + def test_python310_union_syntax( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], + ): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `|` for Union + @tool(opentelemetry_tracer=tracer) + def calculator(a: float, b: float | int | str) -> float: + # NOTE: dummy function, only testing its signature not correctness + return a + b # type: ignore + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": "number"}, + "b": {"anyOf": [{"type": "number"}, {"type": "integer"}, {"type": "string"}]}, + }, + "required": ("a", "b"), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + def test_python_list_ellipsis( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], + ): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `...` + @tool(opentelemetry_tracer=tracer) + def calculator(b: ...) -> float | None: # type: ignore + # NOTE: dummy function, only testing its signature not correctness + if isinstance(b, list): + return sum(b) + return None + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + # THEN b is of any type + "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + }