diff --git a/.fernignore b/.fernignore index 3a38428d..0dd0039a 100644 --- a/.fernignore +++ b/.fernignore @@ -1,7 +1,12 @@ # Specify files that shouldn't be modified by Fern -src/humanloop/eval_utils.py +src/humanloop/eval_utils/* src/humanloop/prompt_utils.py src/humanloop/client.py mypy.ini README.md + +# Directories used by SDK decorators + +src/humanloop/decorators/* +src/humanloop/otel/* diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fa41c1f2..3d587f63 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.12 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 @@ -26,7 +26,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.9 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 @@ -35,6 +35,38 @@ jobs: - name: Test run: poetry run pytest -rP . + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }} + GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} + COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} + test_3_12: + # Run the test suite with Python 3.12 too + # Some tool decorator tests assert the ability to parse the signature + # of functions that use typing features introduced in Python 3.10 e.g. '|' + runs-on: ubuntu-20.04 + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: 3.12 + - name: Bootstrap poetry + run: | + curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 + - name: Install dependencies + run: poetry install + + - name: Test + run: poetry run pytest -rP . + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }} + GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} + COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} publish: needs: [compile, test] @@ -46,7 +78,7 @@ jobs: - name: Set up python uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: 3.9 - name: Bootstrap poetry run: | curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 diff --git a/.gitignore b/.gitignore index 0da665fe..2f7c4926 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,7 @@ dist/ __pycache__/ poetry.toml .ruff_cache/ +.idea +.vscode +.DS_Store +.env diff --git a/poetry.lock b/poetry.lock index 9eff35b0..c8c2b217 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "annotated-types" @@ -11,18 +11,39 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anthropic" +version = "0.39.0" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anthropic-0.39.0-py3-none-any.whl", hash = "sha256:ea17093ae0ce0e1768b0c46501d6086b5bcd74ff39d68cd2d6396374e9de7c09"}, + {file = "anthropic-0.39.0.tar.gz", hash = "sha256:94671cc80765f9ce693f76d63a97ee9bef4c2d6063c044e983d21a2e262f63ba"}, +] + [package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] [[package]] name = "anyio" -version = "4.5.2" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, - {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -36,6 +57,25 @@ doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] trio = ["trio (>=0.26.1)"] +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + [[package]] name = "certifi" version = "2024.8.30" @@ -47,6 +87,146 @@ files = [ {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] +[[package]] +name = "charset-normalizer" +version = "3.4.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, +] + +[[package]] +name = "cohere" +version = "5.11.3" +description = "" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "cohere-5.11.3-py3-none-any.whl", hash = "sha256:96a0414af083337610e2f6de18f53ffaf5cb3f7aee763605d493c95ff981ad9f"}, + {file = "cohere-5.11.3.tar.gz", hash = "sha256:a6587e7ef66ab377f37fdc13e5679375c4a45aef9d2047662a3e7737df7c6599"}, +] + +[package.dependencies] +fastavro = ">=1.9.4,<2.0.0" +httpx = ">=0.21.2" +httpx-sse = "0.4.0" +parameterized = ">=0.9.0,<0.10.0" +pydantic = ">=1.9.2" +pydantic-core = ">=2.18.2,<3.0.0" +requests = ">=2.0.0,<3.0.0" +tokenizers = ">=0.15,<1" +types-requests = ">=2.0.0,<3.0.0" +typing_extensions = ">=4.0.0" + +[package.extras] +aws = ["boto3 (>=1.34.0,<2.0.0)", "sagemaker (>=2.232.1,<3.0.0)"] + [[package]] name = "colorama" version = "0.4.6" @@ -58,6 +238,34 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -72,6 +280,126 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "fastavro" +version = "1.9.7" +description = "Fast read/write of AVRO files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastavro-1.9.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc811fb4f7b5ae95f969cda910241ceacf82e53014c7c7224df6f6e0ca97f52f"}, + {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb8749e419a85f251bf1ac87d463311874972554d25d4a0b19f6bdc56036d7cf"}, + {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f9bafa167cb4d1c3dd17565cb5bf3d8c0759e42620280d1760f1e778e07fc"}, + {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e87d04b235b29f7774d226b120da2ca4e60b9e6fdf6747daef7f13f218b3517a"}, + {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b525c363e267ed11810aaad8fbdbd1c3bd8837d05f7360977d72a65ab8c6e1fa"}, + {file = "fastavro-1.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:6312fa99deecc319820216b5e1b1bd2d7ebb7d6f221373c74acfddaee64e8e60"}, + {file = "fastavro-1.9.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec8499dc276c2d2ef0a68c0f1ad11782b2b956a921790a36bf4c18df2b8d4020"}, + {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d9d96f98052615ab465c63ba8b76ed59baf2e3341b7b169058db104cbe2aa0"}, + {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919f3549e07a8a8645a2146f23905955c35264ac809f6c2ac18142bc5b9b6022"}, + {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9de1fa832a4d9016724cd6facab8034dc90d820b71a5d57c7e9830ffe90f31e4"}, + {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1d09227d1f48f13281bd5ceac958650805aef9a4ef4f95810128c1f9be1df736"}, + {file = "fastavro-1.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:2db993ae6cdc63e25eadf9f93c9e8036f9b097a3e61d19dca42536dcc5c4d8b3"}, + {file = "fastavro-1.9.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4e1289b731214a7315884c74b2ec058b6e84380ce9b18b8af5d387e64b18fc44"}, + {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eac69666270a76a3a1d0444f39752061195e79e146271a568777048ffbd91a27"}, + {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9be089be8c00f68e343bbc64ca6d9a13e5e5b0ba8aa52bcb231a762484fb270e"}, + {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d576eccfd60a18ffa028259500df67d338b93562c6700e10ef68bbd88e499731"}, + {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee9bf23c157bd7dcc91ea2c700fa3bd924d9ec198bb428ff0b47fa37fe160659"}, + {file = "fastavro-1.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:b6b2ccdc78f6afc18c52e403ee68c00478da12142815c1bd8a00973138a166d0"}, + {file = "fastavro-1.9.7-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7313def3aea3dacface0a8b83f6d66e49a311149aa925c89184a06c1ef99785d"}, + {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f5644737ad21d18af97d909dba099b9e7118c237be7e4bd087c7abde7e4f0"}, + {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2af559f30383b79cf7d020a6b644c42ffaed3595f775fe8f3d7f80b1c43dfdc5"}, + {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:edc28ab305e3c424de5ac5eb87b48d1e07eddb6aa08ef5948fcda33cc4d995ce"}, + {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ec2e96bdabd58427fe683329b3d79f42c7b4f4ff6b3644664a345a655ac2c0a1"}, + {file = "fastavro-1.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:3b683693c8a85ede496ebebe115be5d7870c150986e34a0442a20d88d7771224"}, + {file = "fastavro-1.9.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:58f76a5c9a312fbd37b84e49d08eb23094d36e10d43bc5df5187bc04af463feb"}, + {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56304401d2f4f69f5b498bdd1552c13ef9a644d522d5de0dc1d789cf82f47f73"}, + {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fcce036c6aa06269fc6a0428050fcb6255189997f5e1a728fc461e8b9d3e26b"}, + {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:17de68aae8c2525f5631d80f2b447a53395cdc49134f51b0329a5497277fc2d2"}, + {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7c911366c625d0a997eafe0aa83ffbc6fd00d8fd4543cb39a97c6f3b8120ea87"}, + {file = "fastavro-1.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:912283ed48578a103f523817fdf0c19b1755cea9b4a6387b73c79ecb8f8f84fc"}, + {file = "fastavro-1.9.7.tar.gz", hash = "sha256:13e11c6cb28626da85290933027cd419ce3f9ab8e45410ef24ce6b89d20a1f6c"}, +] + +[package.extras] +codecs = ["cramjam", "lz4", "zstandard"] +lz4 = ["lz4"] +snappy = ["cramjam"] +zstandard = ["zstandard"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "fsspec" +version = "2024.10.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, + {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "groq" +version = "0.11.0" +description = "The official Python library for the groq API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "groq-0.11.0-py3-none-any.whl", hash = "sha256:e328531c979542e563668c62260aec13b43a6ee0ca9e2fb22dff1d26f8c8ce54"}, + {file = "groq-0.11.0.tar.gz", hash = "sha256:dbb9aefedf388ddd4801ec7bf3eba7f5edb67948fec0cd2829d97244059f42a7"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +typing-extensions = ">=4.7,<5" + [[package]] name = "h11" version = "0.14.0" @@ -140,6 +468,40 @@ files = [ {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, ] +[[package]] +name = "huggingface-hub" +version = "0.26.2" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"}, + {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + [[package]] name = "idna" version = "3.10" @@ -154,6 +516,25 @@ files = [ [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -165,50 +546,174 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jiter" +version = "0.7.1" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935f10b802bc1ce2b2f61843e498c7720aa7f4e4bb7797aa8121eab017293c3d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9cd3cccccabf5064e4bb3099c87bf67db94f805c1e62d1aefd2b7476e90e0ee2"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aa919ebfc5f7b027cc368fe3964c0015e1963b92e1db382419dadb098a05192"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ae2d01e82c94491ce4d6f461a837f63b6c4e6dd5bb082553a70c509034ff3d4"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f9568cd66dbbdab67ae1b4c99f3f7da1228c5682d65913e3f5f95586b3cb9a9"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ecbf4e20ec2c26512736284dc1a3f8ed79b6ca7188e3b99032757ad48db97dc"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1a0508fddc70ce00b872e463b387d49308ef02b0787992ca471c8d4ba1c0fa1"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f84c9996664c460f24213ff1e5881530abd8fafd82058d39af3682d5fd2d6316"}, + {file = "jiter-0.7.1-cp310-none-win32.whl", hash = "sha256:c915e1a1960976ba4dfe06551ea87063b2d5b4d30759012210099e712a414d9f"}, + {file = "jiter-0.7.1-cp310-none-win_amd64.whl", hash = "sha256:75bf3b7fdc5c0faa6ffffcf8028a1f974d126bac86d96490d1b51b3210aa0f3f"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"}, + {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"}, + {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"}, + {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"}, + {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"}, + {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"}, + {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c65a3ce72b679958b79d556473f192a4dfc5895e8cc1030c9f4e434690906076"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e80052d3db39f9bb8eb86d207a1be3d9ecee5e05fdec31380817f9609ad38e60"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a497859c4f3f7acd71c8bd89a6f9cf753ebacacf5e3e799138b8e1843084e3"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c1288bc22b9e36854a0536ba83666c3b1fb066b811019d7b682c9cf0269cdf9f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b096ca72dd38ef35675e1d3b01785874315182243ef7aea9752cb62266ad516f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbd52c50b605af13dbee1a08373c520e6fcc6b5d32f17738875847fea4e2cd"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af29c5c6eb2517e71ffa15c7ae9509fa5e833ec2a99319ac88cc271eca865519"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f114a4df1e40c03c0efbf974b376ed57756a1141eb27d04baee0680c5af3d424"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:191fbaee7cf46a9dd9b817547bf556facde50f83199d07fc48ebeff4082f9df4"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e2b445e5ee627fb4ee6bbceeb486251e60a0c881a8e12398dfdff47c56f0723"}, + {file = "jiter-0.7.1-cp38-none-win32.whl", hash = "sha256:47ac4c3cf8135c83e64755b7276339b26cd3c7ddadf9e67306ace4832b283edf"}, + {file = "jiter-0.7.1-cp38-none-win_amd64.whl", hash = "sha256:60b49c245cd90cde4794f5c30f123ee06ccf42fb8730a019a2870cd005653ebd"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8f212eeacc7203256f526f550d105d8efa24605828382cd7d296b703181ff11d"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9e247079d88c00e75e297e6cb3a18a039ebcd79fefc43be9ba4eb7fb43eb726"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0aacaa56360139c53dcf352992b0331f4057a0373bbffd43f64ba0c32d2d155"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc1b55314ca97dbb6c48d9144323896e9c1a25d41c65bcb9550b3e0c270ca560"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f281aae41b47e90deb70e7386558e877a8e62e1693e0086f37d015fa1c102289"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93c20d2730a84d43f7c0b6fb2579dc54335db742a59cf9776d0b80e99d587382"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81ccccd8069110e150613496deafa10da2f6ff322a707cbec2b0d52a87b9671"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a7d5e85766eff4c9be481d77e2226b4c259999cb6862ccac5ef6621d3c8dcce"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f52ce5799df5b6975439ecb16b1e879d7655e1685b6e3758c9b1b97696313bfb"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0c91a0304373fdf97d56f88356a010bba442e6d995eb7773cbe32885b71cdd8"}, + {file = "jiter-0.7.1-cp39-none-win32.whl", hash = "sha256:5c08adf93e41ce2755970e8aa95262298afe2bf58897fb9653c47cd93c3c6cdc"}, + {file = "jiter-0.7.1-cp39-none-win_amd64.whl", hash = "sha256:6592f4067c74176e5f369228fb2995ed01400c9e8e1225fb73417183a5e635f0"}, + {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"}, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2024.10.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +files = [ + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + [[package]] name = "mypy" -version = "1.0.1" +version = "1.13.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, - {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, - {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, - {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, - {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, - {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, - {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, - {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, - {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, - {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, - {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, - {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, - {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, - {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, - {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, - {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, - {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, - {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, - {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, - {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, -] - -[package.dependencies] -mypy-extensions = ">=0.4.3" + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] +mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] @@ -222,6 +727,207 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "openai" +version = "1.54.3" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"}, + {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "opentelemetry-api" +version = "1.27.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.4.0" + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.48b0" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"}, + {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +setuptools = ">=16.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-anthropic" +version = "0.33.9" +description = "OpenTelemetry Anthropic instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_anthropic-0.33.9-py3-none-any.whl", hash = "sha256:443fc46d7de9d95a86efebb4de1119672ba86f6da113cc7e1bb8129ce9978439"}, + {file = "opentelemetry_instrumentation_anthropic-0.33.9.tar.gz", hash = "sha256:1866e832a777cfd407f83b3782f0788e702a9ede02eaaf7b6680d32f0c03d1e2"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" + +[[package]] +name = "opentelemetry-instrumentation-bedrock" +version = "0.33.9" +description = "OpenTelemetry Bedrock instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_bedrock-0.33.9-py3-none-any.whl", hash = "sha256:b6e1ac590b3c0c5bb1df0266feb9d6e349df396d4b3d1a0da5377cb8e6e16816"}, + {file = "opentelemetry_instrumentation_bedrock-0.33.9.tar.gz", hash = "sha256:4441e5f2093edb1cbcd05298a39d180ea88d6efeb1bbe355886a97a57f6b542e"}, +] + +[package.dependencies] +anthropic = ">=0.17.0" +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" + +[[package]] +name = "opentelemetry-instrumentation-cohere" +version = "0.33.9" +description = "OpenTelemetry Cohere instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_cohere-0.33.9-py3-none-any.whl", hash = "sha256:a94ab72d0c438a154236f9907acee1a07f581408dbd8b06f0cb9301ef29b656b"}, + {file = "opentelemetry_instrumentation_cohere-0.33.9.tar.gz", hash = "sha256:931f24768337026a933cb7dd4850530e0545772f08abaf37f4664f1e768b73db"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" + +[[package]] +name = "opentelemetry-instrumentation-groq" +version = "0.33.9" +description = "OpenTelemetry Groq instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_groq-0.33.9-py3-none-any.whl", hash = "sha256:52256832c06f9d1ba8c11efce0854f012e7900c313e410a02c8feb85b0e35407"}, + {file = "opentelemetry_instrumentation_groq-0.33.9.tar.gz", hash = "sha256:d83201c516a760fdc478413b855c6d9fb1aed48eb8d4166fa2dc7c762058f6b1"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" + +[[package]] +name = "opentelemetry-instrumentation-openai" +version = "0.33.9" +description = "OpenTelemetry OpenAI instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_openai-0.33.9-py3-none-any.whl", hash = "sha256:9a54ec31a66c212cd42b7f02701beecea4068effdf227b11c96fecfbc6544f40"}, + {file = "opentelemetry_instrumentation_openai-0.33.9.tar.gz", hash = "sha256:5989a6049e63a09a6e9d699c077f7bbc932c0bda5a08f9ec0f4e88fd0c38d8b7"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" +tiktoken = ">=0.6.0,<1" + +[[package]] +name = "opentelemetry-instrumentation-replicate" +version = "0.33.9" +description = "OpenTelemetry Replicate instrumentation" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_instrumentation_replicate-0.33.9-py3-none-any.whl", hash = "sha256:cf2a0b83dfd150cb7a6827d405b088ed0a46beec7f652bfcc4acb5ffd3d2044a"}, + {file = "opentelemetry_instrumentation_replicate-0.33.9.tar.gz", hash = "sha256:e18f2ce224ae1efc2158263aaec6c7b487d7498da9a08d1a594df484e86fce88"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0,<2.0.0" +opentelemetry-instrumentation = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions = ">=0.48b0,<0.49" +opentelemetry-semantic-conventions-ai = "0.4.2" + +[[package]] +name = "opentelemetry-sdk" +version = "1.27.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, +] + +[package.dependencies] +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.48b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.27.0" + +[[package]] +name = "opentelemetry-semantic-conventions-ai" +version = "0.4.2" +description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"}, + {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"}, +] + [[package]] name = "packaging" version = "24.2" @@ -233,6 +939,51 @@ files = [ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] +[[package]] +name = "parameterized" +version = "0.9.0" +description = "Parameterized testing with any Python test framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, + {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, +] + +[package.extras] +dev = ["jinja2"] + +[[package]] +name = "parse" +version = "1.20.2" +description = "parse() is the opposite of format()" +optional = false +python-versions = "*" +files = [ + {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"}, + {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"}, +] + +[[package]] +name = "parse-type" +version = "0.6.4" +description = "Simplifies to build parse types based on the parse module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,>=2.7" +files = [ + {file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"}, + {file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"}, +] + +[package.dependencies] +parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} +six = ">=1.15" + +[package.extras] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"] +docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] +testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] + [[package]] name = "pluggy" version = "1.5.0" @@ -426,6 +1177,337 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2024.11.6" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +files = [ + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, +] + +[[package]] +name = "replicate" +version = "1.0.3" +description = "Python client for Replicate" +optional = false +python-versions = ">=3.8" +files = [ + {file = "replicate-1.0.3-py3-none-any.whl", hash = "sha256:8c49d63444b7ea9ac1d6af99eb23a01efb5b7f079cc8a020d6f52b38843db1da"}, + {file = "replicate-1.0.3.tar.gz", hash = "sha256:0fd9ca5230fe67c42e4508dd96a5b1414b3fefa5342f8921dbb63c74266cb130"}, +] + +[package.dependencies] +httpx = ">=0.21.0,<1" +packaging = "*" +pydantic = ">1.10.7" +typing-extensions = ">=4.5.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rpds-py" +version = "0.21.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664"}, + {file = "rpds_py-0.21.0-cp310-none-win32.whl", hash = "sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682"}, + {file = "rpds_py-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8"}, + {file = "rpds_py-0.21.0-cp311-none-win32.whl", hash = "sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a"}, + {file = "rpds_py-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11"}, + {file = "rpds_py-0.21.0-cp312-none-win32.whl", hash = "sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952"}, + {file = "rpds_py-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976"}, + {file = "rpds_py-0.21.0-cp313-none-win32.whl", hash = "sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202"}, + {file = "rpds_py-0.21.0-cp313-none-win_amd64.whl", hash = "sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed"}, + {file = "rpds_py-0.21.0-cp39-none-win32.whl", hash = "sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8"}, + {file = "rpds_py-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89"}, + {file = "rpds_py-0.21.0.tar.gz", hash = "sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db"}, +] + [[package]] name = "ruff" version = "0.5.7" @@ -453,6 +1535,26 @@ files = [ {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, ] +[[package]] +name = "setuptools" +version = "75.4.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +files = [ + {file = "setuptools-75.4.0-py3-none-any.whl", hash = "sha256:b3c5d862f98500b06ffdf7cc4499b48c46c317d8d56cb30b5c8bce4d88f5c216"}, + {file = "setuptools-75.4.0.tar.gz", hash = "sha256:1dc484f5cf56fd3fe7216d7b8df820802e7246cfb534a1db2aa64f14fcb9cdcb"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.7.0)"] +core = ["importlib-metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12,<1.14)", "pytest-mypy"] + [[package]] name = "six" version = "1.16.0" @@ -475,17 +1577,228 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +[[package]] +name = "tiktoken" +version = "0.8.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, + {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, + {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, + {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, + {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, + {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, + {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, + {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, + {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, + {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, + {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, + {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tokenizers" +version = "0.20.3" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"}, + {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d"}, + {file = "tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f"}, + {file = "tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"}, + {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"}, + {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"}, + {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"}, + {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"}, + {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"}, + {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:9adda1ff5fb9dcdf899ceca672a4e2ce9e797adb512a6467305ca3d8bfcfbdd0"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:6dde2cae6004ba7a3badff4a11911cae03ebf23e97eebfc0e71fef2530e5074f"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4a7fd678b35614fca708579eb95b7587a5e8a6d328171bd2488fd9f27d82be4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b80e3c7283a01a356bd2210f53d1a4a5d32b269c2024389ed0173137708d50e"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8cc0e8176b762973758a77f0d9c4467d310e33165fb74173418ca3734944da4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5634b2e2f5f3d2b4439d2d74066e22eb4b1f04f3fea05cb2a3c12d89b5a3bcd"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4ba635165bc1ea46f2da8e5d80b5f70f6ec42161e38d96dbef33bb39df73964"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e4c7c64172e7789bd8b07aa3087ea87c4c4de7e90937a2aa036b5d92332536"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1f74909ef7675c26d4095a817ec3393d67f3158ca4836c233212e5613ef640c4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9b81321a1e05b16487d312b4264984513f8b4a7556229cafac6e88c2036b09"}, + {file = "tokenizers-0.20.3-cp37-none-win32.whl", hash = "sha256:ab48184cd58b4a03022a2ec75b54c9f600ffea9a733612c02325ed636f353729"}, + {file = "tokenizers-0.20.3-cp37-none-win_amd64.whl", hash = "sha256:60ac483cebee1c12c71878523e768df02fa17e4c54412966cb3ac862c91b36c1"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3229ef103c89583d10b9378afa5d601b91e6337530a0988e17ca8d635329a996"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac52cc24bad3de865c7e65b1c4e7b70d00938a8ae09a92a453b8f676e714ad5"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04627b7b502fa6a2a005e1bd446fa4247d89abcb1afaa1b81eb90e21aba9a60f"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c27ceb887f0e81a3c377eb4605dca7a95a81262761c0fba308d627b2abb98f2b"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65ab780194da4e1fcf5670523a2f377c4838ebf5249efe41fa1eddd2a84fb49d"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d343134f47159e81f7f242264b0eb222e6b802f37173c8d7d7b64d5c9d1388"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2475bb004ab2009d29aff13b5047bfdb3d4b474f0aa9d4faa13a7f34dbbbb43"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b6583a65c01db1197c1eb36857ceba8ec329d53afadd268b42a6b04f4965724"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d00ba208358c037eeab7bfc00a905adc67b2d31b68ab40ed09d75881e114ea"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7a39e5bedc817bda395a798dfe2d9c5f7c71153c90d381b5135a0328d9520"}, + {file = "tokenizers-0.20.3-cp38-none-win32.whl", hash = "sha256:84d40ee0f8550d64d3ea92dd7d24a8557a9172165bdb986c9fb2503b4fe4e3b6"}, + {file = "tokenizers-0.20.3-cp38-none-win_amd64.whl", hash = "sha256:205a45246ed7f1718cf3785cff88450ba603352412aaf220ace026384aa3f1c0"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921"}, + {file = "tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064"}, + {file = "tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f3558a7ae6a6d38a77dfce12172a1e2e1bf3e8871e744a1861cd7591ea9ebe24"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d53029fe44bc70c3ff14ef512460a0cf583495a0f8e2f4b70e26eb9438e38a9"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a2a56397b2bec5a629b516b23f0f8a3e4f978c7488d4a299980f8375954b85"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e5bfaae740ef9ece000f8a07e78ac0e2b085c5ce9648f8593ddf0243c9f76d"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fbaf3ea28fedfb2283da60e710aff25492e795a7397cad8a50f1e079b65a5a70"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c47c037116310dc976eb96b008e41b9cfaba002ed8005848d4d632ee0b7ba9ae"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c31751f0721f58f5e19bb27c1acc259aeff860d8629c4e1a900b26a1979ada8e"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c697cbd3be7a79ea250ea5f380d6f12e534c543cfb137d5c734966b3ee4f34cc"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48971b88ef9130bf35b41b35fd857c3c4dae4a9cd7990ebc7fc03e59cc92438"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e615de179bbe060ab33773f0d98a8a8572b5883dd7dac66c1de8c056c7e748c"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da1ec842035ed9999c62e45fbe0ff14b7e8a7e02bb97688cc6313cf65e5cd755"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6ee4954c1dd23aadc27958dad759006e71659d497dcb0ef0c7c87ea992c16ebd"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3eda46ca402751ec82553a321bf35a617b76bbed7586e768c02ccacbdda94d6d"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb"}, + {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + [[package]] name = "tomli" -version = "2.0.2" +version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, +] + +[[package]] +name = "tqdm" +version = "4.67.0" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "types-jsonschema" +version = "4.23.0.20240813" +description = "Typing stubs for jsonschema" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"}, + {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"}, ] +[package.dependencies] +referencing = "*" + [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -497,6 +1810,20 @@ files = [ {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] +[[package]] +name = "types-requests" +version = "2.32.0.20241016" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, +] + +[package.dependencies] +urllib3 = ">=2" + [[package]] name = "typing-extensions" version = "4.12.2" @@ -508,7 +1835,122 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + [metadata] lock-version = "2.0" -python-versions = "^3.8" -content-hash = "2432f04327a2d8503e175bf13ddf16c3c5b9992b344c9b1e1faf3e444e388903" +python-versions = ">=3.9,<4" +content-hash = "26f6c5843461d01e9766383cef4b4f4febb0b95ea43db90bbedbe905793a0cfd" diff --git a/pyproject.toml b/pyproject.toml index b5eda9b1..8012d868 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,6 @@ classifiers = [ "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -27,24 +26,43 @@ packages = [ { include = "humanloop", from = "src"} ] + [project.urls] Repository = 'https://github.com/humanloop/humanloop-python' [tool.poetry.dependencies] -python = "^3.8" +python = ">=3.9,<4" httpx = ">=0.21.2" httpx-sse = "0.4.0" pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" -typing_extensions = ">= 4.0.0" +typing_extensions = ">=4.0.0" +parse = ">=1" +opentelemetry-sdk = "<=1.27.0" +opentelemetry-api = "<=1.27.0" +opentelemetry-instrumentation-openai = ">=0.20" +opentelemetry-instrumentation-cohere = ">=0.20" +opentelemetry-instrumentation-anthropic = ">=0.20" +opentelemetry-instrumentation-replicate = ">=0.20" +opentelemetry-instrumentation-groq = ">=0.29" +opentelemetry-instrumentation-bedrock = ">=0.15" -[tool.poetry.dev-dependencies] -mypy = "1.0.1" +[tool.poetry.group.dev.dependencies] +parse-type = ">=0.6.4" +anthropic = ">=0.37.1" +groq = ">=0.11.0" +cohere = ">=3.0" +replicate = ">=1.0.3" +jsonschema = "^4.23.0" +types-jsonschema = "^4.23.0.20240813" +mypy = "^1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" -python-dateutil = "^2.9.0" +python-dateutil = "^2.8.2" types-python-dateutil = "^2.9.0.20240316" ruff = "^0.5.6" +python-dotenv = "^1.0.1" +openai = "^1.52.2" [tool.pytest.ini_options] testpaths = [ "tests" ] diff --git a/reference.md b/reference.md index 948512c8..c4bba671 100644 --- a/reference.md +++ b/reference.md @@ -1,5 +1,7 @@ # Reference + ## Prompts +
client.prompts.log(...)
@@ -21,6 +23,7 @@ Instead of targeting an existing version explicitly, you can instead pass in Prompt details in the request body. In this case, we will check if the details correspond to an existing version of the Prompt. If they do not, we will create a new version. This is helpful in the case where you are storing or deriving your Prompt details in code. +
@@ -71,6 +74,7 @@ client.prompts.log( ) ``` + @@ -85,7 +89,7 @@ client.prompts.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to. - +
@@ -93,7 +97,7 @@ client.prompts.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -101,7 +105,7 @@ client.prompts.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. - +
@@ -109,7 +113,7 @@ client.prompts.log(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -117,7 +121,7 @@ client.prompts.log(
**id:** `typing.Optional[str]` — ID for an existing Prompt. - +
@@ -125,7 +129,7 @@ client.prompts.log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider. - +
@@ -133,7 +137,7 @@ client.prompts.log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output. - +
@@ -141,7 +145,7 @@ client.prompts.log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model. - +
@@ -149,7 +153,7 @@ client.prompts.log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt. - +
@@ -157,7 +161,7 @@ client.prompts.log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output. - +
@@ -165,7 +169,7 @@ client.prompts.log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished. - +
@@ -173,21 +177,22 @@ client.prompts.log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. - +
-**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]` +**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: -Controls how the model uses tools. The following options are supported: -- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. -- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - +
@@ -195,7 +200,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new. - +
@@ -203,7 +208,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -211,7 +216,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -219,15 +224,15 @@ Controls how the model uses tools. The following options are supported:
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -235,7 +240,7 @@ Controls how the model uses tools. The following options are supported:
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -243,7 +248,7 @@ Controls how the model uses tools. The following options are supported:
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -251,7 +256,7 @@ Controls how the model uses tools. The following options are supported:
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -259,7 +264,7 @@ Controls how the model uses tools. The following options are supported:
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. - +
@@ -267,7 +272,7 @@ Controls how the model uses tools. The following options are supported:
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. - +
@@ -275,7 +280,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -283,7 +288,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -291,7 +296,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -299,7 +304,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -307,7 +312,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -315,7 +320,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -323,7 +328,7 @@ Controls how the model uses tools. The following options are supported:
**prompt_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -331,7 +336,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -339,13 +344,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -365,6 +369,7 @@ Controls how the model uses tools. The following options are supported: Update a Log. Update the details of a Log with the given ID. + @@ -390,6 +395,7 @@ client.prompts.update_log( ) ``` + @@ -404,7 +410,7 @@ client.prompts.update_log(
**id:** `str` — Unique identifier for Prompt. - +
@@ -412,7 +418,7 @@ client.prompts.update_log(
**log_id:** `str` — Unique identifier for the Log. - +
@@ -420,7 +426,7 @@ client.prompts.update_log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider. - +
@@ -428,7 +434,7 @@ client.prompts.update_log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output. - +
@@ -436,7 +442,7 @@ client.prompts.update_log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model. - +
@@ -444,7 +450,7 @@ client.prompts.update_log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt. - +
@@ -452,7 +458,7 @@ client.prompts.update_log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output. - +
@@ -460,7 +466,7 @@ client.prompts.update_log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished. - +
@@ -468,21 +474,22 @@ client.prompts.update_log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. - +
-**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]` +**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]` -Controls how the model uses tools. The following options are supported: -- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. -- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +Controls how the model uses tools. The following options are supported: + +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - +
@@ -490,15 +497,15 @@ Controls how the model uses tools. The following options are supported:
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -506,7 +513,7 @@ Controls how the model uses tools. The following options are supported:
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -514,7 +521,7 @@ Controls how the model uses tools. The following options are supported:
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -522,7 +529,7 @@ Controls how the model uses tools. The following options are supported:
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -530,7 +537,7 @@ Controls how the model uses tools. The following options are supported:
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. - +
@@ -538,7 +545,7 @@ Controls how the model uses tools. The following options are supported:
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. - +
@@ -546,7 +553,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -554,7 +561,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -562,7 +569,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -570,7 +577,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -578,7 +585,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -586,13 +593,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -621,6 +627,7 @@ Instead of targeting an existing version explicitly, you can instead pass in Prompt details in the request body. In this case, we will check if the details correspond to an existing version of the Prompt. If they do not, we will create a new version. This is helpful in the case where you are storing or deriving your Prompt details in code. + @@ -696,6 +703,7 @@ for chunk in response: yield chunk ``` + @@ -710,7 +718,7 @@ for chunk in response:
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to. - +
@@ -718,7 +726,7 @@ for chunk in response:
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -726,7 +734,7 @@ for chunk in response:
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -734,7 +742,7 @@ for chunk in response:
**id:** `typing.Optional[str]` — ID for an existing Prompt. - +
@@ -742,21 +750,22 @@ for chunk in response:
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. - +
-**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]` +**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: -Controls how the model uses tools. The following options are supported: -- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. -- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - +
@@ -764,7 +773,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new. - +
@@ -772,7 +781,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -780,7 +789,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -788,7 +797,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -796,7 +805,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -804,7 +813,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -812,7 +821,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -820,7 +829,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -828,7 +837,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -836,7 +845,7 @@ Controls how the model uses tools. The following options are supported:
**prompts_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -844,7 +853,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -852,7 +861,7 @@ Controls how the model uses tools. The following options are supported:
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - +
@@ -860,7 +869,7 @@ Controls how the model uses tools. The following options are supported:
**num_samples:** `typing.Optional[int]` — The number of generations. - +
@@ -868,7 +877,7 @@ Controls how the model uses tools. The following options are supported:
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - +
@@ -876,7 +885,7 @@ Controls how the model uses tools. The following options are supported:
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response - +
@@ -884,7 +893,7 @@ Controls how the model uses tools. The following options are supported:
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - +
@@ -892,13 +901,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -927,6 +935,7 @@ Instead of targeting an existing version explicitly, you can instead pass in Prompt details in the request body. In this case, we will check if the details correspond to an existing version of the Prompt. If they do not, we will create a new version. This is helpful in the case where you are storing or deriving your Prompt details in code. + @@ -954,6 +963,7 @@ client.prompts.call( ) ``` + @@ -968,7 +978,7 @@ client.prompts.call(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to. - +
@@ -976,7 +986,7 @@ client.prompts.call(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -984,7 +994,7 @@ client.prompts.call(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -992,7 +1002,7 @@ client.prompts.call(
**id:** `typing.Optional[str]` — ID for an existing Prompt. - +
@@ -1000,21 +1010,22 @@ client.prompts.call(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. - +
-**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]` +**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]` -Controls how the model uses tools. The following options are supported: -- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. -- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. -- `'required'` means the model can decide to call one or more of the provided tools. +Controls how the model uses tools. The following options are supported: + +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. - +
@@ -1022,7 +1033,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new. - +
@@ -1030,7 +1041,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -1038,7 +1049,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -1046,7 +1057,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -1054,7 +1065,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -1062,7 +1073,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -1070,7 +1081,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -1078,7 +1089,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -1086,7 +1097,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -1094,7 +1105,7 @@ Controls how the model uses tools. The following options are supported:
**prompts_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -1102,7 +1113,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -1110,7 +1121,7 @@ Controls how the model uses tools. The following options are supported:
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. - +
@@ -1118,7 +1129,7 @@ Controls how the model uses tools. The following options are supported:
**num_samples:** `typing.Optional[int]` — The number of generations. - +
@@ -1126,7 +1137,7 @@ Controls how the model uses tools. The following options are supported:
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. - +
@@ -1134,7 +1145,7 @@ Controls how the model uses tools. The following options are supported:
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response - +
@@ -1142,7 +1153,7 @@ Controls how the model uses tools. The following options are supported:
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. - +
@@ -1150,13 +1161,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1174,6 +1184,7 @@ Controls how the model uses tools. The following options are supported:
Get a list of all Prompts. +
@@ -1203,6 +1214,7 @@ for page in response.iter_pages(): yield page ``` + @@ -1217,7 +1229,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -1225,7 +1237,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Prompts to fetch. - +
@@ -1233,7 +1245,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Prompt name. - +
@@ -1241,7 +1253,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users. - +
@@ -1249,7 +1261,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by - +
@@ -1257,7 +1269,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -1265,13 +1277,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1295,6 +1306,7 @@ Prompts are identified by the `ID` or their `path`. The parameters (i.e. the pro If you provide a commit message, then the new version will be committed; otherwise it will be uncommitted. If you try to commit an already committed version, an exception will be raised. + @@ -1337,6 +1349,7 @@ client.prompts.upsert( ) ``` + @@ -1351,7 +1364,7 @@ client.prompts.upsert(
**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) - +
@@ -1359,7 +1372,7 @@ client.prompts.upsert(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -1367,7 +1380,7 @@ client.prompts.upsert(
**id:** `typing.Optional[str]` — ID for an existing Prompt. - +
@@ -1375,22 +1388,22 @@ client.prompts.upsert(
**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used. - +
-**template:** `typing.Optional[PromptRequestTemplateParams]` +**template:** `typing.Optional[PromptRequestTemplateParams]` -The template contains the main structure and instructions for the model, including input variables for dynamic values. +The template contains the main structure and instructions for the model, including input variables for dynamic values. For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. -For completion models, provide a prompt template as a string. +For completion models, provide a prompt template as a string. Input variables should be specified with double curly bracket syntax: `{{input_name}}`. - +
@@ -1398,7 +1411,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service. - +
@@ -1406,7 +1419,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt - +
@@ -1414,7 +1427,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative. - +
@@ -1422,7 +1435,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - +
@@ -1430,7 +1443,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**stop:** `typing.Optional[PromptRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. - +
@@ -1438,7 +1451,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. - +
@@ -1446,7 +1459,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. - +
@@ -1454,7 +1467,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call. - +
@@ -1462,7 +1475,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed. - +
@@ -1470,7 +1483,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - +
@@ -1478,7 +1491,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**tools:** `typing.Optional[typing.Sequence[ToolFunctionParams]]` — The tool specification that the model can choose to call if Tool calling is supported. - +
@@ -1486,7 +1499,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**linked_tools:** `typing.Optional[typing.Sequence[str]]` — The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. - +
@@ -1494,7 +1507,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - +
@@ -1502,7 +1515,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**commit_message:** `typing.Optional[str]` — Message describing the changes made. - +
@@ -1510,13 +1523,12 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1537,6 +1549,7 @@ Retrieve the Prompt with the given ID. By default, the deployed version of the Prompt is returned. Use the query parameters `version_id` or `environment` to target a specific version of the Prompt. + @@ -1561,6 +1574,7 @@ client.prompts.get( ) ``` + @@ -1575,7 +1589,7 @@ client.prompts.get(
**id:** `str` — Unique identifier for Prompt. - +
@@ -1583,7 +1597,7 @@ client.prompts.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve. - +
@@ -1591,7 +1605,7 @@ client.prompts.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -1599,13 +1613,12 @@ client.prompts.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1623,6 +1636,7 @@ client.prompts.get(
Delete the Prompt with the given ID. +
@@ -1647,6 +1661,7 @@ client.prompts.delete( ) ``` + @@ -1661,7 +1676,7 @@ client.prompts.delete(
**id:** `str` — Unique identifier for Prompt. - +
@@ -1669,13 +1684,12 @@ client.prompts.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1693,6 +1707,7 @@ client.prompts.delete(
Move the Prompt to a different path or change the name. +
@@ -1718,6 +1733,7 @@ client.prompts.move( ) ``` + @@ -1732,7 +1748,7 @@ client.prompts.move(
**id:** `str` — Unique identifier for Prompt. - +
@@ -1740,7 +1756,7 @@ client.prompts.move(
**path:** `typing.Optional[str]` — Path of the Prompt including the Prompt name, which is used as a unique identifier. - +
@@ -1748,7 +1764,7 @@ client.prompts.move(
**name:** `typing.Optional[str]` — Name of the Prompt. - +
@@ -1756,13 +1772,12 @@ client.prompts.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1780,6 +1795,7 @@ client.prompts.move(
Get a list of all the versions of a Prompt. +
@@ -1805,6 +1821,7 @@ client.prompts.list_versions( ) ``` + @@ -1819,7 +1836,7 @@ client.prompts.list_versions(
**id:** `str` — Unique identifier for Prompt. - +
@@ -1827,7 +1844,7 @@ client.prompts.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - +
@@ -1835,7 +1852,7 @@ client.prompts.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response - +
@@ -1843,13 +1860,12 @@ client.prompts.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1869,6 +1885,7 @@ client.prompts.list_versions( Commit a version of the Prompt with a commit message. If the version is already committed, an exception will be raised. + @@ -1895,6 +1912,7 @@ client.prompts.commit( ) ``` + @@ -1909,7 +1927,7 @@ client.prompts.commit(
**id:** `str` — Unique identifier for Prompt. - +
@@ -1917,7 +1935,7 @@ client.prompts.commit(
**version_id:** `str` — Unique identifier for the specific version of the Prompt. - +
@@ -1925,7 +1943,7 @@ client.prompts.commit(
**commit_message:** `str` — Message describing the changes made. - +
@@ -1933,13 +1951,12 @@ client.prompts.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -1960,6 +1977,7 @@ Deploy Prompt to an Environment. Set the deployed version for the specified Environment. This Prompt will be used for calls made to the Prompt in this Environment. + @@ -1986,6 +2004,7 @@ client.prompts.set_deployment( ) ``` + @@ -2000,7 +2019,7 @@ client.prompts.set_deployment(
**id:** `str` — Unique identifier for Prompt. - +
@@ -2008,7 +2027,7 @@ client.prompts.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - +
@@ -2016,7 +2035,7 @@ client.prompts.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Prompt. - +
@@ -2024,13 +2043,12 @@ client.prompts.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2051,6 +2069,7 @@ Remove deployed Prompt from the Environment. Remove the deployed version for the specified Environment. This Prompt will no longer be used for calls made to the Prompt in this Environment. + @@ -2076,6 +2095,7 @@ client.prompts.remove_deployment( ) ``` + @@ -2090,7 +2110,7 @@ client.prompts.remove_deployment(
**id:** `str` — Unique identifier for Prompt. - +
@@ -2098,7 +2118,7 @@ client.prompts.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. - +
@@ -2106,13 +2126,12 @@ client.prompts.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2130,6 +2149,7 @@ client.prompts.remove_deployment(
List all Environments and their deployed versions for the Prompt. +
@@ -2154,6 +2174,7 @@ client.prompts.list_environments( ) ``` + @@ -2168,7 +2189,7 @@ client.prompts.list_environments(
**id:** `str` — Unique identifier for Prompt. - +
@@ -2176,13 +2197,12 @@ client.prompts.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2203,6 +2223,7 @@ Activate and deactivate Evaluators for monitoring the Prompt. An activated Evaluator will automatically be run on all new Logs within the Prompt for monitoring purposes. + @@ -2228,6 +2249,7 @@ client.prompts.update_monitoring( ) ``` + @@ -2241,8 +2263,8 @@ client.prompts.update_monitoring(
-**id:** `str` - +**id:** `str` +
@@ -2252,7 +2274,7 @@ client.prompts.update_monitoring( **activate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] ]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. - + @@ -2262,7 +2284,7 @@ client.prompts.update_monitoring( **deactivate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] ]` — Evaluators to deactivate. These will not be run on new Logs. - + @@ -2270,18 +2292,18 @@ client.prompts.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Tools +
client.tools.log(...)
@@ -2303,6 +2325,7 @@ Instead of targeting an existing version explicitly, you can instead pass in Tool details in the request body. In this case, we will check if the details correspond to an existing version of the Tool, if not we will create a new version. This is helpful in the case where you are storing or deriving your Tool details in code. +
@@ -2343,6 +2366,7 @@ client.tools.log( ) ``` + @@ -2357,7 +2381,7 @@ client.tools.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to. - +
@@ -2365,7 +2389,7 @@ client.tools.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -2373,7 +2397,7 @@ client.tools.log(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -2381,7 +2405,7 @@ client.tools.log(
**id:** `typing.Optional[str]` — ID for an existing Tool. - +
@@ -2389,7 +2413,7 @@ client.tools.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -2397,7 +2421,7 @@ client.tools.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -2405,15 +2429,15 @@ client.tools.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -2421,7 +2445,7 @@ client.tools.log(
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -2429,7 +2453,7 @@ client.tools.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -2437,7 +2461,7 @@ client.tools.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -2445,7 +2469,7 @@ client.tools.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. - +
@@ -2453,7 +2477,7 @@ client.tools.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. - +
@@ -2461,7 +2485,7 @@ client.tools.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -2469,7 +2493,7 @@ client.tools.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -2477,7 +2501,7 @@ client.tools.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -2485,7 +2509,7 @@ client.tools.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -2493,7 +2517,7 @@ client.tools.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -2501,7 +2525,7 @@ client.tools.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -2509,7 +2533,7 @@ client.tools.log(
**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -2517,7 +2541,7 @@ client.tools.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -2525,7 +2549,7 @@ client.tools.log(
**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new. - +
@@ -2533,13 +2557,12 @@ client.tools.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -2559,6 +2582,7 @@ client.tools.log( Update a Log. Update the details of a Log with the given ID. + @@ -2584,6 +2608,7 @@ client.tools.update( ) ``` + @@ -2598,7 +2623,7 @@ client.tools.update(
**id:** `str` — Unique identifier for Prompt. - +
@@ -2606,7 +2631,7 @@ client.tools.update(
**log_id:** `str` — Unique identifier for the Log. - +
@@ -2614,15 +2639,15 @@ client.tools.update(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -2630,7 +2655,7 @@ client.tools.update(
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -2638,7 +2663,7 @@ client.tools.update(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -2646,7 +2671,7 @@ client.tools.update(
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -2654,7 +2679,7 @@ client.tools.update(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. - +
@@ -2662,7 +2687,7 @@ client.tools.update(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. - +
@@ -2670,7 +2695,7 @@ client.tools.update(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -2678,7 +2703,7 @@ client.tools.update(
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -2686,7 +2711,7 @@ client.tools.update(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -2694,7 +2719,7 @@ client.tools.update(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -2702,7 +2727,7 @@ client.tools.update(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -2710,13 +2735,12 @@ client.tools.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2734,6 +2758,7 @@ client.tools.update(
Get a list of all Tools. +
@@ -2763,6 +2788,7 @@ for page in response.iter_pages(): yield page ``` + @@ -2777,7 +2803,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination. - +
@@ -2785,7 +2811,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch. - +
@@ -2793,7 +2819,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name. - +
@@ -2801,7 +2827,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users. - +
@@ -2809,7 +2835,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by - +
@@ -2817,7 +2843,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -2825,13 +2851,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2855,6 +2880,7 @@ Tools are identified by the `ID` or their `path`. The name, description and para If you provide a commit message, then the new version will be committed; otherwise it will be uncommitted. If you try to commit an already committed version, an exception will be raised. + @@ -2889,6 +2915,7 @@ client.tools.upsert( ) ``` + @@ -2903,7 +2930,7 @@ client.tools.upsert(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -2911,7 +2938,7 @@ client.tools.upsert(
**id:** `typing.Optional[str]` — ID for an existing Tool. - +
@@ -2919,7 +2946,7 @@ client.tools.upsert(
**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling. - +
@@ -2927,7 +2954,7 @@ client.tools.upsert(
**source_code:** `typing.Optional[str]` — Code source of the Tool. - +
@@ -2935,7 +2962,7 @@ client.tools.upsert(
**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ - +
@@ -2943,7 +2970,7 @@ client.tools.upsert(
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. - +
@@ -2951,7 +2978,7 @@ client.tools.upsert(
**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool. - +
@@ -2959,7 +2986,7 @@ client.tools.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made. - +
@@ -2967,13 +2994,12 @@ client.tools.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -2994,6 +3020,7 @@ Retrieve the Tool with the given ID. By default, the deployed version of the Tool is returned. Use the query parameters `version_id` or `environment` to target a specific version of the Tool. + @@ -3018,6 +3045,7 @@ client.tools.get( ) ``` + @@ -3032,7 +3060,7 @@ client.tools.get(
**id:** `str` — Unique identifier for Tool. - +
@@ -3040,7 +3068,7 @@ client.tools.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve. - +
@@ -3048,7 +3076,7 @@ client.tools.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -3056,13 +3084,12 @@ client.tools.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3080,6 +3107,7 @@ client.tools.get(
Delete the Tool with the given ID. +
@@ -3104,6 +3132,7 @@ client.tools.delete( ) ``` + @@ -3118,7 +3147,7 @@ client.tools.delete(
**id:** `str` — Unique identifier for Tool. - +
@@ -3126,13 +3155,12 @@ client.tools.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3150,6 +3178,7 @@ client.tools.delete(
Move the Tool to a different path or change the name. +
@@ -3175,6 +3204,7 @@ client.tools.move( ) ``` + @@ -3189,7 +3219,7 @@ client.tools.move(
**id:** `str` — Unique identifier for Tool. - +
@@ -3197,7 +3227,7 @@ client.tools.move(
**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier. - +
@@ -3205,7 +3235,7 @@ client.tools.move(
**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier. - +
@@ -3213,13 +3243,12 @@ client.tools.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3237,6 +3266,7 @@ client.tools.move(
Get a list of all the versions of a Tool. +
@@ -3262,6 +3292,7 @@ client.tools.list_versions( ) ``` + @@ -3276,7 +3307,7 @@ client.tools.list_versions(
**id:** `str` — Unique identifier for the Tool. - +
@@ -3284,7 +3315,7 @@ client.tools.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - +
@@ -3292,7 +3323,7 @@ client.tools.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response - +
@@ -3300,13 +3331,12 @@ client.tools.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3326,6 +3356,7 @@ client.tools.list_versions( Commit a version of the Tool with a commit message. If the version is already committed, an exception will be raised. + @@ -3352,6 +3383,7 @@ client.tools.commit( ) ``` + @@ -3366,7 +3398,7 @@ client.tools.commit(
**id:** `str` — Unique identifier for Tool. - +
@@ -3374,7 +3406,7 @@ client.tools.commit(
**version_id:** `str` — Unique identifier for the specific version of the Tool. - +
@@ -3382,7 +3414,7 @@ client.tools.commit(
**commit_message:** `str` — Message describing the changes made. - +
@@ -3390,13 +3422,12 @@ client.tools.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3417,6 +3448,7 @@ Deploy Tool to an Environment. Set the deployed version for the specified Environment. This Prompt will be used for calls made to the Tool in this Environment. + @@ -3443,6 +3475,7 @@ client.tools.set_deployment( ) ``` + @@ -3457,7 +3490,7 @@ client.tools.set_deployment(
**id:** `str` — Unique identifier for Tool. - +
@@ -3465,7 +3498,7 @@ client.tools.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - +
@@ -3473,7 +3506,7 @@ client.tools.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Tool. - +
@@ -3481,13 +3514,12 @@ client.tools.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3508,6 +3540,7 @@ Remove deployed Tool from the Environment. Remove the deployed version for the specified Environment. This Tool will no longer be used for calls made to the Tool in this Environment. + @@ -3533,6 +3566,7 @@ client.tools.remove_deployment( ) ``` + @@ -3547,7 +3581,7 @@ client.tools.remove_deployment(
**id:** `str` — Unique identifier for Tool. - +
@@ -3555,7 +3589,7 @@ client.tools.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. - +
@@ -3563,13 +3597,12 @@ client.tools.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3587,6 +3620,7 @@ client.tools.remove_deployment(
List all Environments and their deployed versions for the Tool. +
@@ -3611,6 +3645,7 @@ client.tools.list_environments( ) ``` + @@ -3625,7 +3660,7 @@ client.tools.list_environments(
**id:** `str` — Unique identifier for Tool. - +
@@ -3633,13 +3668,12 @@ client.tools.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -3660,6 +3694,7 @@ Activate and deactivate Evaluators for monitoring the Tool. An activated Evaluator will automatically be run on all new Logs within the Tool for monitoring purposes. + @@ -3685,6 +3720,7 @@ client.tools.update_monitoring( ) ``` + @@ -3698,8 +3734,8 @@ client.tools.update_monitoring(
-**id:** `str` - +**id:** `str` +
@@ -3709,7 +3745,7 @@ client.tools.update_monitoring( **activate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] ]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. - + @@ -3719,7 +3755,7 @@ client.tools.update_monitoring( **deactivate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] ]` — Evaluators to deactivate. These will not be run on new Logs. - + @@ -3727,18 +3763,18 @@ client.tools.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Datasets +
client.datasets.list(...)
@@ -3752,6 +3788,7 @@ client.tools.update_monitoring(
List all Datasets. +
@@ -3781,6 +3818,7 @@ for page in response.iter_pages(): yield page ``` + @@ -3795,7 +3833,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination. - +
@@ -3803,7 +3841,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch. - +
@@ -3811,7 +3849,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name. - +
@@ -3819,7 +3857,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. - +
@@ -3827,7 +3865,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by - +
@@ -3835,7 +3873,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -3843,13 +3881,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -3883,6 +3920,7 @@ an exception will be raised. Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + @@ -3932,6 +3970,7 @@ client.datasets.upsert( ) ``` + @@ -3946,7 +3985,7 @@ client.datasets.upsert(
**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. - +
@@ -3954,7 +3993,7 @@ client.datasets.upsert(
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - +
@@ -3962,7 +4001,7 @@ client.datasets.upsert(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. - +
@@ -3970,7 +4009,7 @@ client.datasets.upsert(
**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -3978,23 +4017,23 @@ client.datasets.upsert(
**id:** `typing.Optional[str]` — ID for an existing Dataset. - +
-**action:** `typing.Optional[UpdateDatesetAction]` +**action:** `typing.Optional[UpdateDatesetAction]` The action to take with the provided Datapoints. - - If `"set"`, the created version will only contain the Datapoints provided in this request. - - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. - - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. +- If `"set"`, the created version will only contain the Datapoints provided in this request. +- If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. +- If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. - +
@@ -4002,7 +4041,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - +
@@ -4010,7 +4049,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**commit_message:** `typing.Optional[str]` — Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. - +
@@ -4018,13 +4057,12 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4050,6 +4088,7 @@ retrieve Datapoints for a large Dataset. By default, the deployed version of the Dataset is returned. Use the query parameters `version_id` or `environment` to target a specific version of the Dataset. + @@ -4076,6 +4115,7 @@ client.datasets.get( ) ``` + @@ -4090,7 +4130,7 @@ client.datasets.get(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4098,7 +4138,7 @@ client.datasets.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve. - +
@@ -4106,7 +4146,7 @@ client.datasets.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -4114,7 +4154,7 @@ client.datasets.get(
**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. - +
@@ -4122,13 +4162,12 @@ client.datasets.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4146,6 +4185,7 @@ client.datasets.get(
Delete the Dataset with the given ID. +
@@ -4170,6 +4210,7 @@ client.datasets.delete( ) ``` + @@ -4184,7 +4225,7 @@ client.datasets.delete(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4192,13 +4233,12 @@ client.datasets.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4216,6 +4256,7 @@ client.datasets.delete(
Move the Dataset to a different path or change the name. +
@@ -4240,6 +4281,7 @@ client.datasets.move( ) ``` + @@ -4254,7 +4296,7 @@ client.datasets.move(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4262,7 +4304,7 @@ client.datasets.move(
**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier. - +
@@ -4270,7 +4312,7 @@ client.datasets.move(
**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier. - +
@@ -4278,13 +4320,12 @@ client.datasets.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4302,6 +4343,7 @@ client.datasets.move(
List all Datapoints for the Dataset with the given ID. +
@@ -4332,6 +4374,7 @@ for page in response.iter_pages(): yield page ``` + @@ -4346,7 +4389,7 @@ for page in response.iter_pages():
**id:** `str` — Unique identifier for Dataset. - +
@@ -4354,7 +4397,7 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve. - +
@@ -4362,7 +4405,7 @@ for page in response.iter_pages():
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -4370,7 +4413,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -4378,7 +4421,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch. - +
@@ -4386,13 +4429,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4410,6 +4452,7 @@ for page in response.iter_pages():
Get a list of the versions for a Dataset. +
@@ -4435,6 +4478,7 @@ client.datasets.list_versions( ) ``` + @@ -4449,7 +4493,7 @@ client.datasets.list_versions(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4457,7 +4501,7 @@ client.datasets.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - +
@@ -4465,7 +4509,7 @@ client.datasets.list_versions(
**include_datapoints:** `typing.Optional[typing.Literal["latest_committed"]]` — If set to 'latest_committed', include the Datapoints for the latest committed version. Defaults to `None`. - +
@@ -4473,13 +4517,12 @@ client.datasets.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4499,6 +4542,7 @@ client.datasets.list_versions( Commit a version of the Dataset with a commit message. If the version is already committed, an exception will be raised. + @@ -4525,6 +4569,7 @@ client.datasets.commit( ) ``` + @@ -4539,7 +4584,7 @@ client.datasets.commit(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4547,7 +4592,7 @@ client.datasets.commit(
**version_id:** `str` — Unique identifier for the specific version of the Dataset. - +
@@ -4555,7 +4600,7 @@ client.datasets.commit(
**commit_message:** `str` — Message describing the changes made. - +
@@ -4563,13 +4608,12 @@ client.datasets.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4594,6 +4638,7 @@ If either `version_id` or `environment` is provided, the new version will be bas with the Datapoints from the CSV file added to the existing Datapoints in the version. If neither `version_id` nor `environment` is provided, the new version will be based on the version of the Dataset that is deployed to the default Environment. + @@ -4619,6 +4664,7 @@ client.datasets.upload_csv( ) ``` + @@ -4633,17 +4679,17 @@ client.datasets.upload_csv(
**id:** `str` — Unique identifier for the Dataset - +
-**file:** `from __future__ import annotations +**file:** `from **future** import annotations core.File` — See core.File for more documentation - +
@@ -4651,7 +4697,7 @@ core.File` — See core.File for more documentation
**commit_message:** `str` — Commit message for the new Dataset version. - +
@@ -4659,7 +4705,7 @@ core.File` — See core.File for more documentation
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. - +
@@ -4667,7 +4713,7 @@ core.File` — See core.File for more documentation
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. - +
@@ -4675,13 +4721,12 @@ core.File` — See core.File for more documentation
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4701,6 +4746,7 @@ core.File` — See core.File for more documentation Deploy Dataset to Environment. Set the deployed version for the specified Environment. + @@ -4727,6 +4773,7 @@ client.datasets.set_deployment( ) ``` + @@ -4741,7 +4788,7 @@ client.datasets.set_deployment(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4749,7 +4796,7 @@ client.datasets.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - +
@@ -4757,7 +4804,7 @@ client.datasets.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Dataset. - +
@@ -4765,13 +4812,12 @@ client.datasets.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4791,6 +4837,7 @@ client.datasets.set_deployment( Remove deployed Dataset from Environment. Remove the deployed version for the specified Environment. + @@ -4816,6 +4863,7 @@ client.datasets.remove_deployment( ) ``` + @@ -4830,7 +4878,7 @@ client.datasets.remove_deployment(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4838,7 +4886,7 @@ client.datasets.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. - +
@@ -4846,13 +4894,12 @@ client.datasets.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -4870,6 +4917,7 @@ client.datasets.remove_deployment(
List all Environments and their deployed versions for the Dataset. +
@@ -4894,6 +4942,7 @@ client.datasets.list_environments( ) ``` + @@ -4908,7 +4957,7 @@ client.datasets.list_environments(
**id:** `str` — Unique identifier for Dataset. - +
@@ -4916,18 +4965,18 @@ client.datasets.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Evaluators +
client.evaluators.log(...)
@@ -4943,6 +4992,7 @@ client.datasets.list_environments( Submit Evaluator judgment for an existing Log. Creates a new Log. The evaluated Log will be set as the parent of the created Log. +
@@ -4967,6 +5017,7 @@ client.evaluators.log( ) ``` + @@ -4981,7 +5032,7 @@ client.evaluators.log(
**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent. - +
@@ -4989,7 +5040,7 @@ client.evaluators.log(
**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against. - +
@@ -4997,7 +5048,7 @@ client.evaluators.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -5005,7 +5056,7 @@ client.evaluators.log(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -5013,7 +5064,7 @@ client.evaluators.log(
**id:** `typing.Optional[str]` — ID for an existing Evaluator. - +
@@ -5021,7 +5072,7 @@ client.evaluators.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +
@@ -5029,7 +5080,7 @@ client.evaluators.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
@@ -5037,15 +5088,15 @@ client.evaluators.log(
**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -5053,7 +5104,7 @@ client.evaluators.log(
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -5061,7 +5112,7 @@ client.evaluators.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -5069,7 +5120,7 @@ client.evaluators.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -5077,7 +5128,7 @@ client.evaluators.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs. - +
@@ -5085,7 +5136,7 @@ client.evaluators.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs. - +
@@ -5093,7 +5144,7 @@ client.evaluators.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -5101,7 +5152,7 @@ client.evaluators.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -5109,7 +5160,7 @@ client.evaluators.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -5117,7 +5168,7 @@ client.evaluators.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -5125,7 +5176,7 @@ client.evaluators.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -5133,7 +5184,7 @@ client.evaluators.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -5141,7 +5192,7 @@ client.evaluators.log(
**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -5149,7 +5200,7 @@ client.evaluators.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -5157,7 +5208,7 @@ client.evaluators.log(
**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log. - +
@@ -5165,15 +5216,26 @@ client.evaluators.log(
**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]` - +<<<<<<< HEAD +**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]` + +======= +**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]` + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -5181,13 +5243,12 @@ client.evaluators.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -5205,6 +5266,7 @@ client.evaluators.log(
Get a list of all Evaluators. +
@@ -5234,6 +5296,7 @@ for page in response.iter_pages(): yield page ``` + @@ -5248,7 +5311,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination. - +
@@ -5256,7 +5319,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch. - +
@@ -5264,7 +5327,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name. - +
@@ -5272,7 +5335,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users. - +
@@ -5280,7 +5343,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by - +
@@ -5288,7 +5351,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -5296,13 +5359,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5326,6 +5388,7 @@ Evaluators are identified by the `ID` or their `path`. The spec provided determi If you provide a commit message, then the new version will be committed; otherwise it will be uncommitted. If you try to commit an already committed version, an exception will be raised. + @@ -5357,6 +5420,7 @@ client.evaluators.upsert( ) ``` + @@ -5370,8 +5434,14 @@ client.evaluators.upsert(
-**spec:** `EvaluatorRequestSpecParams` - +<<<<<<< HEAD +**spec:** `EvaluatorRequestSpecParams` + +======= +**spec:** `EvaluatorRequestSpecParams` + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -5379,7 +5449,7 @@ client.evaluators.upsert(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -5387,7 +5457,7 @@ client.evaluators.upsert(
**id:** `typing.Optional[str]` — ID for an existing Evaluator. - +
@@ -5395,7 +5465,7 @@ client.evaluators.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made. - +
@@ -5403,13 +5473,12 @@ client.evaluators.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5430,6 +5499,7 @@ Retrieve the Evaluator with the given ID. By default, the deployed version of the Evaluator is returned. Use the query parameters `version_id` or `environment` to target a specific version of the Evaluator. + @@ -5454,6 +5524,7 @@ client.evaluators.get( ) ``` + @@ -5468,7 +5539,7 @@ client.evaluators.get(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -5476,7 +5547,7 @@ client.evaluators.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve. - +
@@ -5484,7 +5555,7 @@ client.evaluators.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -5492,13 +5563,12 @@ client.evaluators.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5516,6 +5586,7 @@ client.evaluators.get(
Delete the Evaluator with the given ID. +
@@ -5540,6 +5611,7 @@ client.evaluators.delete( ) ``` + @@ -5554,7 +5626,7 @@ client.evaluators.delete(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -5562,13 +5634,12 @@ client.evaluators.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5586,6 +5657,7 @@ client.evaluators.delete(
Move the Evaluator to a different path or change the name. +
@@ -5611,6 +5683,7 @@ client.evaluators.move( ) ``` + @@ -5625,7 +5698,7 @@ client.evaluators.move(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -5633,7 +5706,7 @@ client.evaluators.move(
**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier. - +
@@ -5641,7 +5714,7 @@ client.evaluators.move(
**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier. - +
@@ -5649,13 +5722,12 @@ client.evaluators.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5673,6 +5745,7 @@ client.evaluators.move(
Get a list of all the versions of an Evaluator. +
@@ -5697,6 +5770,7 @@ client.evaluators.list_versions( ) ``` + @@ -5711,7 +5785,7 @@ client.evaluators.list_versions(
**id:** `str` — Unique identifier for the Evaluator. - +
@@ -5719,7 +5793,7 @@ client.evaluators.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - +
@@ -5727,7 +5801,7 @@ client.evaluators.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response - +
@@ -5735,13 +5809,12 @@ client.evaluators.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5761,6 +5834,7 @@ client.evaluators.list_versions( Commit a version of the Evaluator with a commit message. If the version is already committed, an exception will be raised. + @@ -5787,6 +5861,7 @@ client.evaluators.commit( ) ``` + @@ -5801,7 +5876,7 @@ client.evaluators.commit(
**id:** `str` — Unique identifier for Prompt. - +
@@ -5809,7 +5884,7 @@ client.evaluators.commit(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator. - +
@@ -5817,7 +5892,7 @@ client.evaluators.commit(
**commit_message:** `str` — Message describing the changes made. - +
@@ -5825,13 +5900,12 @@ client.evaluators.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5852,6 +5926,7 @@ Deploy Evaluator to an Environment. Set the deployed version for the specified Environment. This Evaluator will be used for calls made to the Evaluator in this Environment. + @@ -5878,6 +5953,7 @@ client.evaluators.set_deployment( ) ``` + @@ -5892,7 +5968,7 @@ client.evaluators.set_deployment(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -5900,7 +5976,7 @@ client.evaluators.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - +
@@ -5908,7 +5984,7 @@ client.evaluators.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator. - +
@@ -5916,13 +5992,12 @@ client.evaluators.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -5943,6 +6018,7 @@ Remove deployed Evaluator from the Environment. Remove the deployed version for the specified Environment. This Evaluator will no longer be used for calls made to the Evaluator in this Environment. + @@ -5968,6 +6044,7 @@ client.evaluators.remove_deployment( ) ``` + @@ -5982,7 +6059,7 @@ client.evaluators.remove_deployment(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -5990,7 +6067,7 @@ client.evaluators.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. - +
@@ -5998,13 +6075,12 @@ client.evaluators.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6022,6 +6098,7 @@ client.evaluators.remove_deployment(
List all Environments and their deployed versions for the Evaluator. +
@@ -6046,6 +6123,7 @@ client.evaluators.list_environments( ) ``` + @@ -6060,7 +6138,7 @@ client.evaluators.list_environments(
**id:** `str` — Unique identifier for Evaluator. - +
@@ -6068,13 +6146,12 @@ client.evaluators.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6095,6 +6172,7 @@ Activate and deactivate Evaluators for monitoring the Evaluator. An activated Evaluator will automatically be run on all new Logs within the Evaluator for monitoring purposes. + @@ -6119,6 +6197,7 @@ client.evaluators.update_monitoring( ) ``` + @@ -6132,8 +6211,8 @@ client.evaluators.update_monitoring(
-**id:** `str` - +**id:** `str` +
@@ -6143,7 +6222,7 @@ client.evaluators.update_monitoring( **activate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] ]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. - + @@ -6153,7 +6232,7 @@ client.evaluators.update_monitoring( **deactivate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] ]` — Evaluators to deactivate. These will not be run on new Logs. - + @@ -6161,18 +6240,18 @@ client.evaluators.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Flows +
client.flows.log(...)
@@ -6189,6 +6268,7 @@ Log to a Flow. You can use query parameters `version_id`, or `environment`, to target an existing version of the Flow. Otherwise, the default deployed version will be chosen. +
@@ -6241,6 +6321,7 @@ client.flows.log( ) ``` + @@ -6255,7 +6336,7 @@ client.flows.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to. - +
@@ -6263,7 +6344,7 @@ client.flows.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. - +
@@ -6271,7 +6352,7 @@ client.flows.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. - +
@@ -6279,7 +6360,7 @@ client.flows.log(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -6287,7 +6368,7 @@ client.flows.log(
**id:** `typing.Optional[str]` — ID for an existing Flow. - +
@@ -6295,7 +6376,7 @@ client.flows.log(
**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added. - +
@@ -6303,7 +6384,7 @@ client.flows.log(
**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added. - +
@@ -6311,15 +6392,15 @@ client.flows.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - +
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +
@@ -6327,7 +6408,7 @@ client.flows.log(
**error:** `typing.Optional[str]` — Error message if the log is an error. - +
@@ -6335,7 +6416,7 @@ client.flows.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +
@@ -6343,7 +6424,7 @@ client.flows.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements. - +
@@ -6351,7 +6432,7 @@ client.flows.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. - +
@@ -6359,7 +6440,7 @@ client.flows.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. - +
@@ -6367,7 +6448,7 @@ client.flows.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. - +
@@ -6375,7 +6456,7 @@ client.flows.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from. - +
@@ -6383,7 +6464,7 @@ client.flows.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. - +
@@ -6391,7 +6472,7 @@ client.flows.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. - +
@@ -6399,7 +6480,7 @@ client.flows.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. - +
@@ -6407,7 +6488,7 @@ client.flows.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log. - +
@@ -6415,7 +6496,7 @@ client.flows.log(
**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. - +
@@ -6423,7 +6504,7 @@ client.flows.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. - +
@@ -6431,7 +6512,7 @@ client.flows.log(
**log_id:** `typing.Optional[str]` — The identifier for the Log. If not specified, a default ID will be generated. This allows additional Logs to be appended to the trace without waiting for Humanloop to return an ID. - +
@@ -6439,7 +6520,7 @@ client.flows.log(
**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace. - +
@@ -6447,7 +6528,7 @@ client.flows.log(
**trace_status:** `typing.Optional[TraceStatus]` — Status of the Trace. When a Trace is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Traces. If you do not intend to add more Logs to the Trace after creation, set this to `complete`. - +
@@ -6455,13 +6536,12 @@ client.flows.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -6482,6 +6562,7 @@ Retrieve the Flow with the given ID. By default, the deployed version of the Flow is returned. Use the query parameters `version_id` or `environment` to target a specific version of the Flow. + @@ -6506,6 +6587,7 @@ client.flows.get( ) ``` + @@ -6520,7 +6602,7 @@ client.flows.get(
**id:** `str` — Unique identifier for Flow. - +
@@ -6528,7 +6610,7 @@ client.flows.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve. - +
@@ -6536,7 +6618,7 @@ client.flows.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. - +
@@ -6544,13 +6626,12 @@ client.flows.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6568,6 +6649,7 @@ client.flows.get(
Delete the Flow with the given ID. +
@@ -6592,6 +6674,7 @@ client.flows.delete( ) ``` + @@ -6606,7 +6689,7 @@ client.flows.delete(
**id:** `str` — Unique identifier for Flow. - +
@@ -6614,13 +6697,12 @@ client.flows.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6638,6 +6720,7 @@ client.flows.delete(
Move the Flow to a different path or change the name. +
@@ -6663,6 +6746,7 @@ client.flows.move( ) ``` + @@ -6677,7 +6761,7 @@ client.flows.move(
**id:** `str` — Unique identifier for Flow. - +
@@ -6685,7 +6769,7 @@ client.flows.move(
**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier. - +
@@ -6693,7 +6777,7 @@ client.flows.move(
**name:** `typing.Optional[str]` — Name of the Flow. - +
@@ -6701,7 +6785,7 @@ client.flows.move(
**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`. - +
@@ -6709,13 +6793,12 @@ client.flows.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6733,6 +6816,7 @@ client.flows.move(
Get a list of Flows. +
@@ -6762,6 +6846,7 @@ for page in response.iter_pages(): yield page ``` + @@ -6776,7 +6861,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -6784,7 +6869,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch. - +
@@ -6792,7 +6877,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name. - +
@@ -6800,7 +6885,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - +
@@ -6808,7 +6893,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by - +
@@ -6816,7 +6901,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -6824,13 +6909,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6854,6 +6938,7 @@ Flows can also be identified by the `ID` or their `path`. If you provide a commit message, then the new version will be committed; otherwise it will be uncommitted. If you try to commit an already committed version, an exception will be raised. + @@ -6891,6 +6976,7 @@ client.flows.upsert( ) ``` + @@ -6905,7 +6991,7 @@ client.flows.upsert(
**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version. - +
@@ -6913,7 +6999,7 @@ client.flows.upsert(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - +
@@ -6921,7 +7007,7 @@ client.flows.upsert(
**id:** `typing.Optional[str]` — ID for an existing Flow. - +
@@ -6929,7 +7015,7 @@ client.flows.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made. - +
@@ -6937,13 +7023,12 @@ client.flows.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -6964,6 +7049,7 @@ Update the status, inputs, output of a Flow Log. Marking a Flow Log as complete will trigger any monitoring Evaluators to run. Inputs and output (or error) must be provided in order to mark it as complete. + @@ -6993,6 +7079,7 @@ client.flows.update_log( ) ``` + @@ -7007,7 +7094,7 @@ client.flows.update_log(
**log_id:** `str` — Unique identifier of the Flow Log. - +
@@ -7015,7 +7102,7 @@ client.flows.update_log(
**trace_status:** `TraceStatus` — Status of the Trace. When a Trace is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on completed Traces. - +
@@ -7023,7 +7110,7 @@ client.flows.update_log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log. - +
@@ -7031,7 +7118,7 @@ client.flows.update_log(
**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this or `error`. - +
@@ -7039,7 +7126,7 @@ client.flows.update_log(
**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this or `output`. - +
@@ -7047,13 +7134,12 @@ client.flows.update_log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7071,6 +7157,7 @@ client.flows.update_log(
Get a list of all the versions of a Flow. +
@@ -7096,6 +7183,7 @@ client.flows.list_versions( ) ``` + @@ -7110,7 +7198,7 @@ client.flows.list_versions(
**id:** `str` — Unique identifier for Flow. - +
@@ -7118,7 +7206,7 @@ client.flows.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - +
@@ -7126,7 +7214,7 @@ client.flows.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response - +
@@ -7134,13 +7222,12 @@ client.flows.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7160,6 +7247,7 @@ client.flows.list_versions( Commit a version of the Flow with a commit message. If the version is already committed, an exception will be raised. + @@ -7186,6 +7274,7 @@ client.flows.commit( ) ``` + @@ -7200,7 +7289,7 @@ client.flows.commit(
**id:** `str` — Unique identifier for Flow. - +
@@ -7208,7 +7297,7 @@ client.flows.commit(
**version_id:** `str` — Unique identifier for the specific version of the Flow. - +
@@ -7216,7 +7305,7 @@ client.flows.commit(
**commit_message:** `str` — Message describing the changes made. - +
@@ -7224,13 +7313,12 @@ client.flows.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7251,6 +7339,7 @@ Deploy Flow to an Environment. Set the deployed version for the specified Environment. This Flow will be used for calls made to the Flow in this Environment. + @@ -7277,6 +7366,7 @@ client.flows.set_deployment( ) ``` + @@ -7291,7 +7381,7 @@ client.flows.set_deployment(
**id:** `str` — Unique identifier for Flow. - +
@@ -7299,7 +7389,7 @@ client.flows.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - +
@@ -7307,7 +7397,7 @@ client.flows.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Flow. - +
@@ -7315,13 +7405,12 @@ client.flows.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7342,6 +7431,7 @@ Remove deployed Flow from the Environment. Remove the deployed version for the specified Environment. This Flow will no longer be used for calls made to the Flow in this Environment. + @@ -7367,6 +7457,7 @@ client.flows.remove_deployment( ) ``` + @@ -7381,7 +7472,7 @@ client.flows.remove_deployment(
**id:** `str` — Unique identifier for Flow. - +
@@ -7389,7 +7480,7 @@ client.flows.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. - +
@@ -7397,13 +7488,12 @@ client.flows.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7421,6 +7511,7 @@ client.flows.remove_deployment(
List all Environments and their deployed versions for the Flow. +
@@ -7445,6 +7536,7 @@ client.flows.list_environments( ) ``` + @@ -7459,7 +7551,7 @@ client.flows.list_environments(
**id:** `str` — Unique identifier for Flow. - +
@@ -7467,13 +7559,12 @@ client.flows.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7494,6 +7585,7 @@ Activate and deactivate Evaluators for monitoring the Flow. An activated Evaluator will automatically be run on all new "completed" Logs within the Flow for monitoring purposes. + @@ -7519,6 +7611,7 @@ client.flows.update_monitoring( ) ``` + @@ -7532,8 +7625,8 @@ client.flows.update_monitoring(
-**id:** `str` - +**id:** `str` +
@@ -7543,7 +7636,7 @@ client.flows.update_monitoring( **activate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] ]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. - + @@ -7553,7 +7646,7 @@ client.flows.update_monitoring( **deactivate:** `typing.Optional[ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] ]` — Evaluators to deactivate. These will not be run on new Logs. - + @@ -7561,18 +7654,18 @@ client.flows.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Directories +
client.directories.list()
@@ -7586,6 +7679,7 @@ client.flows.update_monitoring(
Retrieve a list of all Directories. +
@@ -7608,6 +7702,7 @@ client = Humanloop( client.directories.list() ``` + @@ -7622,13 +7717,12 @@ client.directories.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -7646,6 +7740,7 @@ client.directories.list()
Creates a Directory. +
@@ -7668,6 +7763,7 @@ client = Humanloop( client.directories.create() ``` + @@ -7682,7 +7778,7 @@ client.directories.create()
**name:** `typing.Optional[str]` — Name of the directory to create. - +
@@ -7690,7 +7786,7 @@ client.directories.create()
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Starts with `dir_`. - +
@@ -7698,7 +7794,7 @@ client.directories.create()
**path:** `typing.Optional[str]` — Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. - +
@@ -7706,13 +7802,12 @@ client.directories.create()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7730,6 +7825,7 @@ client.directories.create()
Fetches a directory by ID. +
@@ -7754,6 +7850,7 @@ client.directories.get( ) ``` + @@ -7768,7 +7865,7 @@ client.directories.get(
**id:** `str` — String ID of directory. Starts with `dir_`. - +
@@ -7776,13 +7873,12 @@ client.directories.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7802,6 +7898,7 @@ client.directories.get( Delete the Directory with the given ID. The Directory must be empty (i.e. contain no Directories or Files). + @@ -7826,6 +7923,7 @@ client.directories.delete( ) ``` + @@ -7840,7 +7938,7 @@ client.directories.delete(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`. - +
@@ -7848,13 +7946,12 @@ client.directories.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -7872,6 +7969,7 @@ client.directories.delete(
Update the Directory with the given ID. +
@@ -7896,6 +7994,7 @@ client.directories.update( ) ``` + @@ -7910,7 +8009,7 @@ client.directories.update(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`. - +
@@ -7918,7 +8017,7 @@ client.directories.update(
**name:** `typing.Optional[str]` — Name to set for the directory. - +
@@ -7926,7 +8025,7 @@ client.directories.update(
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Specify this to move directories. Starts with `dir_`. - +
@@ -7934,7 +8033,7 @@ client.directories.update(
**path:** `typing.Optional[str]` — Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. - +
@@ -7942,18 +8041,18 @@ client.directories.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Files +
client.files.list(...)
@@ -7967,6 +8066,7 @@ client.directories.update(
Get a paginated list of files. +
@@ -7989,6 +8089,7 @@ client = Humanloop( client.files.list() ``` + @@ -8003,7 +8104,7 @@ client.files.list()
**page:** `typing.Optional[int]` — Page offset for pagination. - +
@@ -8011,7 +8112,7 @@ client.files.list()
**size:** `typing.Optional[int]` — Page size for pagination. Number of files to fetch. - +
@@ -8019,7 +8120,7 @@ client.files.list()
**name:** `typing.Optional[str]` — Case-insensitive filter for file name. - +
@@ -8027,7 +8128,7 @@ client.files.list()
**type:** `typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]]` — List of file types to filter for. - +
@@ -8035,7 +8136,7 @@ client.files.list()
**environment:** `typing.Optional[str]` — Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. - +
@@ -8043,7 +8144,7 @@ client.files.list()
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by - +
@@ -8051,7 +8152,7 @@ client.files.list()
**order:** `typing.Optional[SortOrder]` — Direction to sort by. - +
@@ -8059,18 +8160,18 @@ client.files.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
## Evaluations +
client.evaluations.list(...)
@@ -8084,6 +8185,12 @@ client.files.list()
Retrieve a list of Evaluations for the specified File. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8114,6 +8221,7 @@ for page in response.iter_pages(): yield page ``` + @@ -8128,7 +8236,7 @@ for page in response.iter_pages():
**file_id:** `str` — Filter by File ID. Only Evaluations for the specified File will be returned. - +
@@ -8136,7 +8244,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -8144,7 +8252,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluations to fetch. - +
@@ -8152,13 +8260,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -8180,6 +8287,7 @@ Create an Evaluation. Create a new Evaluation by specifying the File to evaluate, and a name for the Evaluation. You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + @@ -8204,6 +8312,7 @@ client.evaluations.create( ) ``` + @@ -8218,7 +8327,12 @@ client.evaluations.create(
**evaluators:** `typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams]` — The Evaluators used to evaluate. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8226,7 +8340,7 @@ client.evaluations.create(
**file:** `typing.Optional[FileRequestParams]` — The File to associate with the Evaluation. This File contains the Logs you're evaluating. - +
@@ -8234,7 +8348,7 @@ client.evaluations.create(
**name:** `typing.Optional[str]` — Name of the Evaluation to help identify it. Must be unique within the associated File. - +
@@ -8242,13 +8356,12 @@ client.evaluations.create(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8268,6 +8381,12 @@ client.evaluations.create( Add Evaluators to an Evaluation. The Evaluators will be run on the Logs generated for the Evaluation. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8293,6 +8412,7 @@ client.evaluations.add_evaluators( ) ``` + @@ -8307,7 +8427,7 @@ client.evaluations.add_evaluators(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8315,7 +8435,12 @@ client.evaluations.add_evaluators(
**evaluators:** `typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams]` — The Evaluators to add to this Evaluation. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8323,13 +8448,12 @@ client.evaluations.add_evaluators(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8349,6 +8473,12 @@ client.evaluations.add_evaluators( Remove an Evaluator from an Evaluation. The Evaluator will no longer be run on the Logs in the Evaluation. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8374,6 +8504,7 @@ client.evaluations.remove_evaluator( ) ``` + @@ -8388,7 +8519,7 @@ client.evaluations.remove_evaluator(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8396,7 +8527,7 @@ client.evaluations.remove_evaluator(
**evaluator_version_id:** `str` — Unique identifier for Evaluator Version. - +
@@ -8404,13 +8535,12 @@ client.evaluations.remove_evaluator(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8434,6 +8564,12 @@ such as its name. To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8458,6 +8594,7 @@ client.evaluations.get( ) ``` + @@ -8472,7 +8609,7 @@ client.evaluations.get(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8480,13 +8617,12 @@ client.evaluations.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8506,6 +8642,12 @@ client.evaluations.get( Delete an Evaluation. The Runs and Evaluators in the Evaluation will not be deleted. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8530,6 +8672,7 @@ client.evaluations.delete( ) ``` + @@ -8544,7 +8687,7 @@ client.evaluations.delete(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8552,13 +8695,12 @@ client.evaluations.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8576,6 +8718,7 @@ client.evaluations.delete(
List all Runs for an Evaluation. +
@@ -8600,6 +8743,7 @@ client.evaluations.list_runs_for_evaluation( ) ``` + @@ -8614,7 +8758,7 @@ client.evaluations.list_runs_for_evaluation(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8622,13 +8766,12 @@ client.evaluations.list_runs_for_evaluation(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8659,6 +8802,7 @@ referencing a datapoint in the specified Dataset will be associated with the Run To keep updated on the progress of the Run, you can poll the Run using the `GET /evaluations/{id}/runs` endpoint and check its status. + @@ -8683,6 +8827,7 @@ client.evaluations.create_run( ) ``` + @@ -8697,7 +8842,7 @@ client.evaluations.create_run(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8705,7 +8850,12 @@ client.evaluations.create_run(
**dataset:** `typing.Optional[CreateRunRequestDatasetParams]` — Dataset to use in this Run. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8713,7 +8863,12 @@ client.evaluations.create_run(
**version:** `typing.Optional[CreateRunRequestVersionParams]` — Version to use in this Run. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8721,7 +8876,7 @@ client.evaluations.create_run(
**orchestrated:** `typing.Optional[bool]` — Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. - +
@@ -8729,7 +8884,12 @@ client.evaluations.create_run(
**use_existing_logs:** `typing.Optional[bool]` — If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8737,13 +8897,12 @@ client.evaluations.create_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8764,6 +8923,12 @@ Add an existing Run to the specified Evaluation. This is useful if you want to compare the Runs in this Evaluation with an existing Run that exists within another Evaluation. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8789,6 +8954,7 @@ client.evaluations.add_existing_run( ) ``` + @@ -8803,7 +8969,7 @@ client.evaluations.add_existing_run(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8811,7 +8977,7 @@ client.evaluations.add_existing_run(
**run_id:** `str` — Unique identifier for Run. - +
@@ -8819,13 +8985,12 @@ client.evaluations.add_existing_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8846,6 +9011,7 @@ Remove a Run from an Evaluation. The Logs and Versions used in the Run will not be deleted. If this Run is used in any other Evaluations, it will still be available in those Evaluations. + @@ -8871,6 +9037,7 @@ client.evaluations.remove_run( ) ``` + @@ -8885,7 +9052,7 @@ client.evaluations.remove_run(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8893,7 +9060,7 @@ client.evaluations.remove_run(
**run_id:** `str` — Unique identifier for Run. - +
@@ -8901,13 +9068,12 @@ client.evaluations.remove_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -8928,6 +9094,12 @@ Update an Evaluation Run. Specify `control=true` to use this Run as the control Run for the Evaluation. You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) + @@ -8953,6 +9125,7 @@ client.evaluations.update_evaluation_run( ) ``` + @@ -8967,7 +9140,7 @@ client.evaluations.update_evaluation_run(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -8975,7 +9148,7 @@ client.evaluations.update_evaluation_run(
**run_id:** `str` — Unique identifier for Run. - +
@@ -8983,7 +9156,12 @@ client.evaluations.update_evaluation_run(
**control:** `typing.Optional[bool]` — If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8991,7 +9169,12 @@ client.evaluations.update_evaluation_run(
**status:** `typing.Optional[EvaluationStatus]` — Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. - +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -8999,13 +9182,12 @@ client.evaluations.update_evaluation_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -9023,6 +9205,12 @@ client.evaluations.update_evaluation_run(
Add the specified Logs to a Run. +<<<<<<< HEAD + +======= + +> > > > > > > 32f482a (Release 0.8.9a1) +
@@ -9049,6 +9237,7 @@ client.evaluations.add_logs_to_run( ) ``` + @@ -9063,7 +9252,7 @@ client.evaluations.add_logs_to_run(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -9071,7 +9260,7 @@ client.evaluations.add_logs_to_run(
**run_id:** `str` — Unique identifier for Run. - +
@@ -9079,7 +9268,7 @@ client.evaluations.add_logs_to_run(
**log_ids:** `typing.Sequence[str]` — The IDs of the Logs to add to the Run. - +
@@ -9087,13 +9276,12 @@ client.evaluations.add_logs_to_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -9114,6 +9302,7 @@ Get Evaluation Stats. Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the corresponding Evaluator statistics (such as the mean and percentiles). + @@ -9138,6 +9327,7 @@ client.evaluations.get_stats( ) ``` + @@ -9152,7 +9342,7 @@ client.evaluations.get_stats(
**id:** `str` — Unique identifier for Evaluation. - +
@@ -9160,13 +9350,12 @@ client.evaluations.get_stats(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -9186,6 +9375,7 @@ client.evaluations.get_stats( Get the Logs associated to a specific Evaluation. This returns the Logs associated to all Runs within with the Evaluation. + @@ -9210,6 +9400,7 @@ client.evaluations.get_logs( ) ``` + @@ -9224,7 +9415,7 @@ client.evaluations.get_logs(
**id:** `str` — String ID of evaluation. Starts with `ev_` or `evr_`. - +
@@ -9232,7 +9423,7 @@ client.evaluations.get_logs(
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -9240,7 +9431,7 @@ client.evaluations.get_logs(
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch. - +
@@ -9248,18 +9439,18 @@ client.evaluations.get_logs(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- ## Logs +
client.logs.list(...)
@@ -9273,6 +9464,7 @@ client.evaluations.get_logs(
List all Logs for the given filter criteria. +
@@ -9303,6 +9495,7 @@ for page in response.iter_pages(): yield page ``` + @@ -9317,7 +9510,7 @@ for page in response.iter_pages():
**file_id:** `str` — Unique identifier for the File to list Logs for. - +
@@ -9325,7 +9518,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination. - +
@@ -9333,7 +9526,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch. - +
@@ -9341,7 +9534,7 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — If provided, only Logs belonging to the specified Version will be returned. - +
@@ -9349,7 +9542,7 @@ for page in response.iter_pages():
**version_status:** `typing.Optional[VersionStatus]` — If provided, only Logs belonging to Versions with the specified status will be returned. - +
@@ -9357,7 +9550,7 @@ for page in response.iter_pages():
**search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its inputs and output will be returned. - +
@@ -9365,7 +9558,7 @@ for page in response.iter_pages():
**metadata_search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its metadata will be returned. - +
@@ -9373,7 +9566,7 @@ for page in response.iter_pages():
**start_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created after the specified date will be returned. - +
@@ -9381,7 +9574,7 @@ for page in response.iter_pages():
**end_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created before the specified date will be returned. - +
@@ -9389,7 +9582,7 @@ for page in response.iter_pages():
**include_parent:** `typing.Optional[bool]` — If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs. - +
@@ -9397,7 +9590,15 @@ for page in response.iter_pages():
**in_trace_filter:** `typing.Optional[typing.Union[bool, typing.Sequence[bool]]]` — If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. - + +
+ + +
+
+ +**sample_n:** `typing.Optional[int]` — If provided, only a random sample of approximately N Logs will be returned. +
@@ -9405,13 +9606,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
-
@@ -9429,6 +9629,7 @@ for page in response.iter_pages():
Delete Logs with the given IDs. +
@@ -9453,6 +9654,7 @@ client.logs.delete( ) ``` + @@ -9467,7 +9669,7 @@ client.logs.delete(
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Unique identifiers for the Logs to delete. - +
@@ -9475,13 +9677,12 @@ client.logs.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- @@ -9499,6 +9700,7 @@ client.logs.delete(
Retrieve the Log with the given ID. +
@@ -9523,6 +9725,7 @@ client.logs.get( ) ``` + @@ -9537,7 +9740,7 @@ client.logs.get(
**id:** `str` — Unique identifier for Log. - +
@@ -9545,14 +9748,12 @@ client.logs.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - +
- - diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py index 4ce861b0..cba542c9 100644 --- a/src/humanloop/__init__.py +++ b/src/humanloop/__init__.py @@ -22,7 +22,6 @@ DatapointResponse, DatapointResponseTargetValue, DatasetResponse, - DatasetsRequest, DirectoryResponse, DirectoryWithParentsAndChildrenResponse, DirectoryWithParentsAndChildrenResponseFilesItem, @@ -31,10 +30,7 @@ EvaluateeRequest, EvaluateeResponse, EvaluationEvaluatorResponse, - EvaluationLogResponse, EvaluationResponse, - EvaluationRunResponse, - EvaluationRunsResponse, EvaluationStats, EvaluationStatus, EvaluationsDatasetRequest, @@ -45,8 +41,6 @@ EvaluatorAggregate, EvaluatorArgumentsType, EvaluatorConfigResponse, - EvaluatorFileId, - EvaluatorFilePath, EvaluatorJudgmentNumberLimit, EvaluatorJudgmentOptionResponse, EvaluatorLogResponse, @@ -54,14 +48,10 @@ EvaluatorResponse, EvaluatorResponseSpec, EvaluatorReturnTypeEnum, - EvaluatorVersionId, - EvaluatorsRequest, ExternalEvaluatorRequest, FeedbackType, FileEnvironmentResponse, FileEnvironmentResponseFile, - FileId, - FilePath, FileRequest, FileType, FilesToolType, @@ -94,7 +84,6 @@ NumericEvaluatorStatsResponse, ObservabilityStatus, OverallStats, - PaginatedDataEvaluationLogResponse, PaginatedDataEvaluatorResponse, PaginatedDataFlowResponse, PaginatedDataLogResponse, @@ -124,9 +113,6 @@ ProviderApiKeys, ResponseFormat, ResponseFormatType, - RunStatsResponse, - RunStatsResponseEvaluatorStatsItem, - RunVersionResponse, SelectEvaluatorStatsResponse, SortOrder, TextChatContent, @@ -147,7 +133,6 @@ ValidationErrorLocItem, VersionDeploymentResponse, VersionDeploymentResponseFile, - VersionId, VersionIdResponse, VersionIdResponseVersion, VersionReferenceResponse, @@ -159,16 +144,6 @@ from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools from .client import AsyncHumanloop, Humanloop from .environment import HumanloopEnvironment -from .evaluations import ( - AddEvaluatorsRequestEvaluatorsItem, - AddEvaluatorsRequestEvaluatorsItemParams, - CreateEvaluationRequestEvaluatorsItem, - CreateEvaluationRequestEvaluatorsItemParams, - CreateRunRequestDataset, - CreateRunRequestDatasetParams, - CreateRunRequestVersion, - CreateRunRequestVersionParams, -) from .evaluators import ( CreateEvaluatorLogRequestJudgment, CreateEvaluatorLogRequestJudgmentParams, @@ -218,8 +193,6 @@ EvaluationEvaluatorResponseParams, EvaluationLogResponseParams, EvaluationResponseParams, - EvaluationRunResponseParams, - EvaluationRunsResponseParams, EvaluationStatsParams, EvaluatorActivationDeactivationRequestActivateItemParams, EvaluatorActivationDeactivationRequestDeactivateItemParams, diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 17e8e41d..66e7d7cc 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -1,19 +1,48 @@ -import typing -from typing import Optional, List, Sequence +from contextvars import ContextVar import os +import typing +from typing import List, Optional, Sequence +from typing_extensions import Unpack + import httpx +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.trace import Tracer + +from humanloop.core.client_wrapper import SyncClientWrapper +from humanloop.decorators.types import DecoratorPromptKernelRequestParams +from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext + +from humanloop.eval_utils import log_with_evaluation_context, run_eval +from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File -from .base_client import BaseHumanloop, AsyncBaseHumanloop -from .environment import HumanloopEnvironment -from .eval_utils import _run_eval, Dataset, File, Evaluator, EvaluatorCheck -from .prompts.client import PromptsClient -from .evaluations.client import EvaluationsClient -from .prompt_utils import populate_template +from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop +from humanloop.decorators.flow import flow as flow_decorator_factory +from humanloop.decorators.prompt import prompt as prompt_decorator_factory +from humanloop.decorators.tool import tool as tool_decorator_factory +from humanloop.environment import HumanloopEnvironment +from humanloop.evaluations.client import EvaluationsClient +from humanloop.otel import instrument_provider +from humanloop.otel.exporter import HumanloopSpanExporter +from humanloop.otel.processor import HumanloopSpanProcessor +from humanloop.prompt_utils import populate_template +from humanloop.prompts.client import PromptsClient +from humanloop.requests.flow_kernel_request import FlowKernelRequestParams +from humanloop.requests.tool_kernel_request import ToolKernelRequestParams class ExtendedEvalsClient(EvaluationsClient): client: BaseHumanloop + def __init__( + self, + *, + client_wrapper: SyncClientWrapper, + evaluation_context_variable: ContextVar[Optional[EvaluationContext]], + ): + super().__init__(client_wrapper=client_wrapper) + self._evaluation_context_variable = evaluation_context_variable + def run( self, file: File, @@ -35,13 +64,14 @@ def run( if self.client is None: raise ValueError("Need Humanloop client defined to run evals") - return _run_eval( + return run_eval( client=self.client, file=file, name=name, dataset=dataset, evaluators=evaluators, workers=workers, + evaluation_context_variable=self._evaluation_context_variable, ) @@ -51,9 +81,10 @@ class ExtendedPromptsClient(PromptsClient): class Humanloop(BaseHumanloop): """ - See docstring of BaseHumanloop. + See docstring of :class:`BaseHumanloop`. - This class extends the base client that contains the auto generated SDK functionality with custom evaluation utilities. + This class extends the base client with custom evaluation utilities + and decorators for declaring Files in code. """ def __init__( @@ -65,10 +96,19 @@ def __init__( timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, + opentelemetry_tracer_provider: Optional[TracerProvider] = None, + opentelemetry_tracer: Optional[Tracer] = None, ): - """See docstring of BaseHumanloop.__init__(...) + """See docstring of :func:`BaseHumanloop.__init__(...)` - This method extends the base client with evaluation utilities. + This class extends the base client with custom evaluation utilities + and decorators for declaring Files in code. + + The Humanloop SDK File decorators use OpenTelemetry internally. You can provide a + TracerProvider and a Tracer if you'd like to integrate them with your existing + telemetry system. Otherwise, an internal TracerProvider will be used. + If you provide only the `TraceProvider`, the SDK will log under a Tracer + named `humanloop.sdk`. """ super().__init__( base_url=base_url, @@ -78,11 +118,284 @@ def __init__( follow_redirects=follow_redirects, httpx_client=httpx_client, ) - eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper) + + self.evaluation_context_variable: ContextVar[Optional[EvaluationContext]] = ContextVar( + EVALUATION_CONTEXT_VARIABLE_NAME + ) + + eval_client = ExtendedEvalsClient( + client_wrapper=self._client_wrapper, + evaluation_context_variable=self.evaluation_context_variable, + ) eval_client.client = self self.evaluations = eval_client self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper) + # Overload the .log method of the clients to be aware of Evaluation Context + # TODO: Overload the log for Evaluators and Tools once run_id is added + # to them. + self.prompts = log_with_evaluation_context( + client=self.prompts, + evaluation_context_variable=self.evaluation_context_variable, + ) + # self.evaluators = log_with_evaluation_context(client=self.evaluators) + # self.tools = log_with_evaluation_context(client=self.tools) + self.flows = log_with_evaluation_context( + client=self.flows, + evaluation_context_variable=self.evaluation_context_variable, + ) + + if opentelemetry_tracer_provider is not None: + self._tracer_provider = opentelemetry_tracer_provider + else: + self._tracer_provider = TracerProvider( + resource=Resource( + attributes={ + "instrumentor": "humanloop.sdk", + } + ), + ) + instrument_provider(provider=self._tracer_provider) + self._tracer_provider.add_span_processor( + HumanloopSpanProcessor( + exporter=HumanloopSpanExporter( + client=self, + ) + ), + ) + + if opentelemetry_tracer is None: + self._opentelemetry_tracer = self._tracer_provider.get_tracer("humanloop.sdk") + else: + self._opentelemetry_tracer = opentelemetry_tracer + + def prompt( + self, + *, + path: Optional[str] = None, + **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore + ): + """Decorator for declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code. + + The decorator intercepts calls to LLM provider APIs and creates + a new Prompt file based on the hyperparameters used in the call. + If a hyperparameter is specified in the `@prompt` decorator, then + they override any value intercepted from the LLM provider call. + + If the (Prompt)[https://humanloop.com/docs/explanation/prompts] already exists + on the specified path, a new version will be upserted when any of the above change. + + Here's an example of declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code: + + ```python + @prompt(template="You are an assistant on the following topics: {{topics}}.") + def call_llm(messages): + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + return client.chat.completions.create( + model="gpt-4o", + temperature=0.8, + frequency_penalty=0.5, + max_tokens=200, + messages=messages, + ).choices[0].message.content + ``` + + This will create a (Prompt)[https://humanloop.com/docs/explanation/prompts] with the following attributes: + + ```python + { + model: "gpt-4o", + endpoint: "chat", + template: "You are an assistant on the following topics: {{topics}}.", + provider: "openai", + max_tokens: 200, + temperature: 0.8, + frequency_penalty: 0.5, + } + + Every call to the decorated function will create a Log against the Prompt. For example: + + ```python + call_llm(messages=[ + {"role": "system", "content": "You are an assistant on the following topics: finance."} + {"role": "user", "content": "What can you do?"} + ]) + ``` + + The Prompt Log will be created with the following inputs: + ```python + { + "inputs": { + "topics": "finance" + }, + messages: [ + {"role": "system", "content": "You are an assistant on the following topics: finance."} + {"role": "user", "content": "What can you do?"} + ] + "output": "Hello, I'm an assistant that can help you with anything related to finance." + } + ``` + + The decorated function should return a string or the output should be JSON serializable. If + the output cannot be serialized, TypeError will be raised. + + If the function raises an exception, the log created by the function will have the output + field set to None and the error field set to the string representation of the exception. + + :param path: The path where the Prompt is created. If not + provided, the function name is used as the path and the File + is created in the root of your Humanloop organization workspace. + + :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams` + """ + return prompt_decorator_factory( + opentelemetry_tracer=self._opentelemetry_tracer, + path=path, + **prompt_kernel, + ) + + def tool( + self, + *, + path: Optional[str] = None, + **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore + ): + """Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code. + + The decorator inspects the wrapped function's source code, name, + argument type hints and docstring to infer the values that define + the [Tool](https://humanloop.com/docs/explanation/tools). + + If the [Tool](https://humanloop.com/docs/explanation/tools) already exists + on the specified path, a new version will be upserted when any of the + above change. + + Here's an example of declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code: + + ```python + @tool + def calculator(a: int, b: Optional[int]) -> int: + \"\"\"Add two numbers together.\"\"\" + return a + b + ``` + + This will create a [Tool](https://humanloop.com/docs/explanation/tools) with the following attributes: + ```python + { + strict: True, + function: { + "name": "calculator", + "description": "Add two numbers together.", + "parameters": { + type: "object", + properties: { + a: {type: "integer"}, + b: {type: "integer"} + }, + required: ["a"], + }, + } + } + ``` + + Every call to the decorated function will create a Log against the Tool. For example: + + ```python + calculator(a=1, b=2) + ``` + + Will create the following Log: + + ```python + { + "inputs": { + a: 1, + b: 2 + }, + "output": 3 + } + ``` + + The decorated function should return a string or the output should be JSON serializable. If + the output cannot be serialized, TypeError will be raised. + + If the function raises an exception, the log created by the function will have the output + field set to None and the error field set to the string representation of the exception. + + :param path: The path to the Tool. If not provided, the function name + will be used as the path and the File will be created in the root + of your organization's workspace. + + :param tool_kernel: Attributes that define the Tool. See `class:ToolKernelRequestParams` + """ + return tool_decorator_factory( + opentelemetry_tracer=self._opentelemetry_tracer, + path=path, + **tool_kernel, + ) + + def flow( + self, + *, + path: Optional[str] = None, + **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore + ): + """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code. + + A [Flow](https://humanloop.com/docs/explanation/flows) decorator should be added + at the entrypoint of your LLM feature. Call other functions decorated with + Humanloop SDK decorators to create a Trace of Logs on Humanloop. + + Here's an example of declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code: + ```python + @prompt(template="You are an assistant on the following topics: {{topics}}.") + def call_llm(messages): + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + return client.chat.completions.create( + model="gpt-4o", + temperature=0.8, + frequency_penalty=0.5, + max_tokens=200, + messages=messages, + ).choices[0].message.content + + @flow(attributes={"version": "v1"}) + def entrypoint(): + while True: + messages = [] + user_input = input("You: ") + if user_input == "exit": + break + messages.append({"role": "user", "content": user_input}) + response = call_llm(messages) + messages.append({"role": "assistant", "content": response}) + print(f"Assistant: {response}") + ``` + + In this example, the Flow instruments a conversational agent where the + Prompt defined in `call_llm` is called multiple times in a loop. Calling + `entrypoint` will create a Flow Trace under which multiple Prompt Logs + will be nested, allowing you to track the whole conversation session + between the user and the assistant. + + The decorated function should return a string or the output should be JSON serializable. If + the output cannot be serialized, TypeError will be raised. + + If the function raises an exception, the log created by the function will have the output + field set to None and the error field set to the string representation of the exception. + + :param path: The path to the Flow. If not provided, the function name + will be used as the path and the File will be created in the root + of your organization workspace. + + :param flow_kernel: Attributes that define the Flow. See `class:ToolKernelRequestParams` + """ + return flow_decorator_factory( + opentelemetry_tracer=self._opentelemetry_tracer, + path=path, + **flow_kernel, + ) + class AsyncHumanloop(AsyncBaseHumanloop): """ diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index d1349af7..2fb335f8 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -30,7 +30,12 @@ def get_timeout(self) -> typing.Optional[float]: class SyncClientWrapper(BaseClientWrapper): def __init__( - self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.Client, ): super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = HttpClient( @@ -43,7 +48,12 @@ def __init__( class AsyncClientWrapper(BaseClientWrapper): def __init__( - self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient + self, + *, + api_key: str, + base_url: str, + timeout: typing.Optional[float] = None, + httpx_client: httpx.AsyncClient, ): super().__init__(api_key=api_key, base_url=base_url, timeout=timeout) self.httpx_client = AsyncHttpClient( diff --git a/src/humanloop/decorators/__init__.py b/src/humanloop/decorators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py new file mode 100644 index 00000000..aa78c82d --- /dev/null +++ b/src/humanloop/decorators/flow.py @@ -0,0 +1,110 @@ +import logging +from functools import wraps +from typing import Any, Callable, Mapping, Optional, Sequence + +from opentelemetry.sdk.trace import Span +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.decorators.helpers import args_to_inputs +from humanloop.eval_utils.types import File +from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY +from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests.flow_kernel_request import FlowKernelRequestParams + +logger = logging.getLogger("humanloop.sdk") + + +def flow( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore +): + flow_kernel["attributes"] = {k: v for k, v in flow_kernel.get("attributes", {}).items() if v is not None} + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: + span: Span + with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span: # type: ignore + span_id = span.get_span_context().span_id + if span.parent: + span_parent_id = span.parent.span_id + parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id) + if parent_trace_metadata: + TRACE_FLOW_CONTEXT[span_id] = FlowContext( + trace_id=span_id, + trace_parent_id=span_parent_id, + is_flow_log=True, + ) + + else: + # The Flow Log is not nested under another Flow Log + # Set the trace_id to the current span_id + TRACE_FLOW_CONTEXT[span_id] = FlowContext( + trace_id=span_id, + trace_parent_id=None, + is_flow_log=True, + ) + + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "flow") + if flow_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.flow", + value=flow_kernel, # type: ignore + ) + + inputs = args_to_inputs(func, args, kwargs) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=None, + ) + error = str(e) + + flow_log = { + "inputs": inputs, + "output": output_stringified, + "error": error, + } + if inputs: + flow_log["inputs"] = inputs + if output: + flow_log["output"] = output + + # Write the Flow Log to the Span on HL_LOG_OT_KEY + if flow_log: + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=flow_log, # type: ignore + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="flow", + version=FlowDict(**flow_kernel), # type: ignore + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/decorators/helpers.py b/src/humanloop/decorators/helpers.py new file mode 100644 index 00000000..d501f800 --- /dev/null +++ b/src/humanloop/decorators/helpers.py @@ -0,0 +1,21 @@ +import inspect +from typing import Any, Callable + + +def args_to_inputs(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]: + """Maps arguments to their corresponding parameter names in the function signature. + + For example: + ```python + def foo(a, b=2, c=3): + pass + + assert args_to_inputs(foo, (1, 2), {}) == {'a': 1, 'b': 2, 'c': 3} + assert args_to_inputs(foo, (1,), {'b': 8}) == {'a': 1, 'b': 8, 'c': 3} + assert args_to_inputs(foo, (1,), {}) == {'a': 1, 'b': 2, 'c': 3} + ``` + """ + signature = inspect.signature(func) + bound_args = signature.bind(*args, **kwargs) + bound_args.apply_defaults() + return dict(bound_args.arguments) diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py new file mode 100644 index 00000000..c1f68a77 --- /dev/null +++ b/src/humanloop/decorators/prompt.py @@ -0,0 +1,94 @@ +import logging +from functools import wraps +from typing import Any, Callable, Mapping, Optional, Sequence + +from opentelemetry.sdk.trace import Span +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.decorators.helpers import args_to_inputs +from humanloop.decorators.types import DecoratorPromptKernelRequestParams +from humanloop.eval_utils import File +from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY +from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span + +logger = logging.getLogger("humanloop.sdk") + + +def prompt( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + # TODO: Template can be a list of objects? + **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore +): + def decorator(func: Callable): + @wraps(func) + def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any: + span: Span + with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span: # type: ignore + span_id = span.get_span_context().span_id + if span.parent: + span_parent_id = span.parent.span_id + parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id, {}) + if parent_trace_metadata: + TRACE_FLOW_CONTEXT[span_id] = FlowContext( + trace_id=parent_trace_metadata["trace_id"], + trace_parent_id=span_parent_id, + is_flow_log=False, + ) + + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt") + + if prompt_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.prompt", + value={ + **prompt_kernel, # type: ignore + "attributes": prompt_kernel.get("attributes") or None, # type: ignore + }, # type: ignore + ) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = str(e) + + prompt_log = { + "inputs": args_to_inputs(func, args, kwargs), + "output": output_stringified, + "error": error, + } + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=prompt_log, # type: ignore + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="prompt", + version={**prompt_kernel}, # type: ignore + callable=wrapper, + ) + + return wrapper + + return decorator diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py new file mode 100644 index 00000000..0662041b --- /dev/null +++ b/src/humanloop/decorators/tool.py @@ -0,0 +1,519 @@ +import builtins +import inspect +import logging +import sys +import textwrap +import typing +from dataclasses import dataclass +from functools import wraps +from inspect import Parameter +from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union + +from opentelemetry.trace import Tracer +from typing_extensions import Unpack + +from humanloop.decorators.helpers import args_to_inputs +from humanloop.eval_utils import File +from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span +from humanloop.requests.tool_function import ToolFunctionParams +from humanloop.requests.tool_kernel_request import ToolKernelRequestParams + +if sys.version_info >= (3, 10): + import types + +logger = logging.getLogger("humanloop.sdk") + + +def tool( + opentelemetry_tracer: Tracer, + path: Optional[str] = None, + **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore +): + def decorator(func: Callable): + enhanced_tool_kernel = _build_tool_kernel( + func=func, + attributes=tool_kernel.get("attributes"), + setup_values=tool_kernel.get("setup_values"), + strict=True, + ) + + # Mypy complains about adding attribute on function, but it's nice UX + func.json_schema = enhanced_tool_kernel["function"] # type: ignore + + @wraps(func) + def wrapper(*args, **kwargs): + with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span: + span_id = span.get_span_context().span_id + if span.parent: + span_parent_id = span.parent.span_id + else: + span_parent_id = None + parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id) + if parent_trace_metadata: + TRACE_FLOW_CONTEXT[span_id] = FlowContext( + span_id=span_id, + trace_parent_id=span_parent_id, + is_flow_log=False, + ) + + # Write the Tool Kernel to the Span on HL_FILE_OT_KEY + span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__) + span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "tool") + if enhanced_tool_kernel: + write_to_opentelemetry_span( + span=span, + key=f"{HUMANLOOP_FILE_KEY}.tool", + value=enhanced_tool_kernel, + ) + + # Call the decorated function + try: + output = func(*args, **kwargs) + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = None + except Exception as e: + logger.error(f"Error calling {func.__name__}: {e}") + output = None + output_stringified = jsonify_if_not_string( + func=func, + output=output, + ) + error = str(e) + + # Populate known Tool Log attributes + tool_log = { + "inputs": args_to_inputs(func, args, kwargs), + "output": output_stringified, + "error": error, + } + + # Write the Tool Log to the Span on HL_LOG_OT_KEY + write_to_opentelemetry_span( + span=span, + key=HUMANLOOP_LOG_KEY, + value=tool_log, + ) + + # Return the output of the decorated function + return output + + wrapper.file = File( # type: ignore + path=path if path else func.__name__, + type="tool", + version=enhanced_tool_kernel, + callable=wrapper, + ) + + return wrapper + + return decorator + + +def _build_tool_kernel( + func: Callable, + attributes: Optional[dict[str, Optional[Any]]], + setup_values: Optional[dict[str, Optional[Any]]], + strict: bool, +) -> ToolKernelRequestParams: + """Build ToolKernelRequest object from decorated function.""" + try: + source_code = textwrap.dedent(inspect.getsource(func)) + except TypeError as e: + raise TypeError( + f"Cannot extract source code for function {func.__name__}. " + "Try decorating a plain function instead of a partial for example." + ) from e + # Remove decorator from source code by finding first 'def' + # This makes the source_code extraction idempotent whether + # the decorator is applied directly or used as a higher-order + # function + source_code = source_code[source_code.find("def") :] + kernel = ToolKernelRequestParams( + source_code=source_code, + function=_build_function_property( + func=func, + strict=strict, + ), + ) + if attributes: + kernel["attributes"] = attributes + if setup_values: + kernel["setup_values"] = setup_values + return kernel + + +def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams: + """Build `function` property inside ToolKernelRequest.""" + tool_name = func.__name__ + description = func.__doc__ + if description is None: + description = "" + return ToolFunctionParams( + name=tool_name, + description=description, + parameters=_build_function_parameters_property(func), # type: ignore + strict=strict, + ) + + +class _JSONSchemaFunctionParameters(TypedDict): + type: str + properties: dict[str, typing.Union[dict, list]] + required: list[str] + additionalProperties: Literal[False] + + +def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters: + """Build `function.parameters` property inside ToolKernelRequest.""" + properties: dict[str, Any] = {} + required: list[str] = [] + signature = inspect.signature(func) + + for parameter in signature.parameters.values(): + if parameter.kind in ( + inspect.Parameter.VAR_POSITIONAL, + inspect.Parameter.VAR_KEYWORD, + ): + raise ValueError(f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator") + + for parameter in signature.parameters.values(): + try: + parameter_signature = _parse_annotation(parameter.annotation) + except ValueError as e: + raise ValueError(f"Error parsing signature of @tool annotated function {func.__name__}: {e}") from e + param_json_schema = _annotation_parse_to_json_schema(parameter_signature) + properties[parameter.name] = param_json_schema + if not _parameter_is_optional(parameter): + required.append(parameter.name) + + if len(properties) == 0 and len(required) == 0: + # Edge case: function with no parameters + return _JSONSchemaFunctionParameters( + type="object", + properties={}, + required=[], + additionalProperties=False, + ) + return _JSONSchemaFunctionParameters( + type="object", + # False positive, expected tuple[str] but got tuple[str, ...] + required=tuple(required), # type: ignore + properties=properties, + additionalProperties=False, + ) + + +if sys.version_info >= (3, 10): + _PRIMITIVE_TYPES = Union[ + str, + int, + float, + bool, + Parameter.empty, # type: ignore + Ellipsis, # type: ignore + ] +else: + # Ellipsis not supported in typing module before Python 3.10 + _PRIMITIVE_TYPES = Union[ + str, + int, + float, + bool, + Parameter.empty, # type: ignore + ] + + +@dataclass +class _ParsedAnnotation: + def no_type_hint(self) -> bool: + """Check if the annotation has no type hint. + + Examples: + str -> False + list -> True + list[str] -> False + """ + raise NotImplementedError + + +@dataclass +class _ParsedPrimitiveAnnotation(_ParsedAnnotation): + annotation: _PRIMITIVE_TYPES + + def no_type_hint(self) -> bool: + return self.annotation is Parameter.empty or self.annotation is Ellipsis + + +@dataclass +class _ParsedDictAnnotation(_ParsedAnnotation): + # Both are null if no type hint e.g. dict vs dict[str, int] + key_annotation: Optional[_ParsedAnnotation] + value_annotation: Optional[_ParsedAnnotation] + + def no_type_hint(self) -> bool: + return self.key_annotation is None and self.value_annotation is None + + +@dataclass +class _ParsedTupleAnnotation(_ParsedAnnotation): + # Null if no type hint e.g. tuple vs tuple[str, int] + annotation: Optional[list[_ParsedAnnotation]] + + def no_type_hint(self) -> bool: + return self.annotation is None + + +@dataclass +class _ParsedUnionAnnotation(_ParsedAnnotation): + annotation: list[_ParsedAnnotation] + + +@dataclass +class _ParsedListAnnotation(_ParsedAnnotation): + # Null if no type hint e.g. list vs list[str] + annotation: Optional[_ParsedAnnotation] + + +@dataclass +class _ParsedOptionalAnnotation(_ParsedAnnotation): + annotation: _ParsedAnnotation + + +def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation: + """Parse constituent parts of a potentially nested type hint. + + Custom types are not supported, only built-in types and typing module types. + + """ + origin = typing.get_origin(annotation) + if origin is None: + # Either not a nested type or no type hint + # Parameter.empty is used for parameters without type hints + # Ellipsis is interpreted as Any + if annotation not in ( + str, + int, + float, + bool, + Parameter.empty, + Ellipsis, + dict, + list, + tuple, + ): + raise ValueError(f"Unsupported type hint: {annotation}") + + # Check if it's a complex type with no inner type + if annotation == builtins.dict: + return _ParsedDictAnnotation( + value_annotation=None, + key_annotation=None, + ) + if annotation == builtins.list: + return _ParsedListAnnotation( + annotation=None, + ) + if annotation == builtins.tuple: + return _ParsedTupleAnnotation( + annotation=None, + ) + + # Is a primitive type + return _ParsedPrimitiveAnnotation( + annotation=annotation, + ) + + if origin is list: + inner_annotation = _parse_annotation(typing.get_args(annotation)[0]) + return _ParsedListAnnotation( + annotation=inner_annotation, + ) + + if origin is dict: + key_type = _parse_annotation(typing.get_args(annotation)[0]) + value_type = _parse_annotation(typing.get_args(annotation)[1]) + return _ParsedDictAnnotation( + key_annotation=key_type, + value_annotation=value_type, + ) + + if origin is tuple: + return _ParsedTupleAnnotation( + annotation=[_parse_annotation(arg) for arg in typing.get_args(annotation)], + ) + + if origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType): + sub_types = typing.get_args(annotation) + if sub_types[-1] is type(None): + # type(None) in sub_types indicates Optional type + if len(sub_types) == 2: + # Union is an Optional type only + return _ParsedOptionalAnnotation( + annotation=_parse_annotation(sub_types[0]), + ) + # Union has sub_types and is Optional + return _ParsedOptionalAnnotation( + annotation=_ParsedUnionAnnotation( + annotation=[_parse_annotation(sub_type) for sub_type in sub_types[:-1]], + ) + ) + # Union type that is not Optional + return _ParsedUnionAnnotation( + annotation=[_parse_annotation(sub_type) for sub_type in sub_types], + ) + + raise ValueError(f"Unsupported origin: {origin}") + + +_JSON_SCHEMA_ANY = ["string", "integer", "number", "boolean", "object", "array", "null"] + + +def _annotation_parse_to_json_schema( + arg: _ParsedAnnotation, +) -> Mapping[str, Union[str, Mapping, Sequence]]: + """ + Convert parse result from _parse_annotation to JSON Schema for a parameter. + + The function recursively converts the nested type hints to a JSON Schema. + + Note that 'any' is not supported by JSON Schema, so we allow any type as a workaround. + """ + arg_type: Mapping[str, Union[str, Mapping, Sequence]] + + if isinstance(arg, _ParsedOptionalAnnotation): + is_optional = True + arg = arg.annotation + else: + is_optional = False + + if isinstance(arg, _ParsedUnionAnnotation): + arg_type = { + "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation], + } + + elif isinstance(arg, _ParsedTupleAnnotation): + if arg.annotation is None: + # tuple annotation with no type hints + # This is equivalent with a list, since the + # number of elements is not specified + arg_type = { + "type": "array", + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation], + } + + elif isinstance(arg, _ParsedListAnnotation): + if arg.annotation is None: + # list annotation with no type hints + if is_optional: + arg_type = { + "type": ["array", "null"], + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": {"type": _JSON_SCHEMA_ANY}, + } + else: + arg_type = { + "type": "array", + "items": _annotation_parse_to_json_schema(arg.annotation), + } + + elif isinstance(arg, _ParsedDictAnnotation): + if arg.key_annotation is None and arg.value_annotation is None: + # dict annotation with no type hints + if is_optional: + arg_type = { + "type": ["object", "null"], + "properties": { + "key": {"type": _JSON_SCHEMA_ANY}, + "value": {"type": _JSON_SCHEMA_ANY}, + }, + } + else: + arg_type = { + "type": "object", + "properties": { + "key": {"type": _JSON_SCHEMA_ANY}, + "value": {"type": _JSON_SCHEMA_ANY}, + }, + } + else: + arg_type = { + "type": "object", + "properties": { + "key": _annotation_parse_to_json_schema(arg.key_annotation), # type: ignore + "value": _annotation_parse_to_json_schema(arg.value_annotation), # type: ignore + }, + } + + elif isinstance(arg, _ParsedPrimitiveAnnotation): + if arg.annotation is builtins.str: + arg_type = {"type": "string"} + if arg.annotation is builtins.int: + arg_type = {"type": "integer"} + if arg.annotation is builtins.float: + arg_type = {"type": "number"} + if arg.annotation is builtins.bool: + arg_type = {"type": "boolean"} + if arg.annotation is Parameter.empty or arg.annotation is Ellipsis: + # JSON Schema dropped support for 'any' type, we allow any type as a workaround + arg_type = {"type": _JSON_SCHEMA_ANY} + + else: + raise ValueError(f"Unsupported annotation type: {arg}") + + if is_optional: + if isinstance(arg, _ParsedUnionAnnotation): + for type_option in arg_type["anyOf"]: + if ( + isinstance(type_option["type"], list) # type: ignore + and "null" not in type_option["type"] # type: ignore + ): # type: ignore + type_option["type"] = [*type_option["type"], "null"] # type: ignore + elif not isinstance(type_option["type"], list): # type: ignore + type_option["type"] = [type_option["type"], "null"] # type: ignore + else: + if isinstance(arg_type["type"], list) and "null" not in arg_type["type"]: # type: ignore + arg_type = {**arg_type, "type": [*arg_type["type"], "null"]} # type: ignore + elif not isinstance(arg_type["type"], list): # type: ignore + arg_type = {**arg_type, "type": [arg_type["type"], "null"]} # type: ignore + + return arg_type + + +def _parameter_is_optional( + parameter: inspect.Parameter, +) -> bool: + """Check if tool parameter is mandatory. + + Examples: + Optional[T] -> True + T | None -> True + T -> False + """ + # Check if the parameter can be None, either via Optional[T] or T | None type hint + origin = typing.get_origin(parameter.annotation) + # sub_types refers to T inside the annotation + sub_types = typing.get_args(parameter.annotation) + return ( + (origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType)) + and len(sub_types) > 0 + and sub_types[-1] is type(None) + ) diff --git a/src/humanloop/decorators/types.py b/src/humanloop/decorators/types.py new file mode 100644 index 00000000..f52f0178 --- /dev/null +++ b/src/humanloop/decorators/types.py @@ -0,0 +1,12 @@ +from typing_extensions import NotRequired + +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams + + +class DecoratorPromptKernelRequestParams(PromptKernelRequestParams): + """See :class:`PromptKernelRequestParams` for more information. + + Allows the `model` field to be optional for Prompt decorator. + """ + + model: NotRequired[str] # type: ignore diff --git a/src/humanloop/eval_utils.py b/src/humanloop/eval_utils.py deleted file mode 100644 index 1e003e97..00000000 --- a/src/humanloop/eval_utils.py +++ /dev/null @@ -1,647 +0,0 @@ -""" -Evaluation utils for the Humanloop SDK. - -This module provides a set of utilities to aid running Eval workflows on Humanloop -where you are managing the runtime of your application in your code. - -Functions in this module should be accessed via the Humanloop client. They should -not be called directly. -""" - -import logging -from datetime import datetime -from functools import partial -import inspect -from logging import INFO -from pydantic import BaseModel, ValidationError -from typing import Callable, Sequence, Literal, Union, Optional, List, Dict, Tuple -from typing_extensions import NotRequired, TypedDict -import time -import sys -import json -from concurrent.futures import ThreadPoolExecutor, as_completed - -from .client import BaseHumanloop -from .core.api_error import ApiError - -# We use TypedDicts for requests, which is consistent with the rest of the SDK -from .requests import FlowKernelRequestParams as FlowDict -from .requests import PromptKernelRequestParams as PromptDict -from .requests import ToolKernelRequestParams as ToolDict -from .requests import CreateDatapointRequestParams as DatapointDict -from .requests import ExternalEvaluatorRequestParams as ExternalEvaluator -from .requests import CodeEvaluatorRequestParams as CodeEvaluatorDict -from .requests import LlmEvaluatorRequestParams as LLMEvaluatorDict -from .requests import HumanEvaluatorRequestParams as HumanEvaluatorDict - - -# Responses are Pydantic models, we leverage them for improved request validation -from .types import FlowKernelRequest as Flow -from .types import PromptKernelRequest as Prompt -from .types import ToolKernelRequest as Tool -from .types import BooleanEvaluatorStatsResponse as BooleanStats -from .types import NumericEvaluatorStatsResponse as NumericStats -from .types import ( - UpdateDatesetAction as UpdateDatasetAction, -) # TODO: fix original type typo -from .types import DatapointResponse as Datapoint -from .types import ( - EvaluationStats, - RunStatsResponse, - EvaluatorArgumentsType, - EvaluatorReturnTypeEnum, - EvaluationResponse, -) - -# Setup logging -logger = logging.getLogger(__name__) -logger.setLevel(level=INFO) -console_handler = logging.StreamHandler() -logger.setLevel(INFO) -formatter = logging.Formatter("%(message)s") -console_handler.setFormatter(formatter) -if not logger.hasHandlers(): - logger.addHandler(console_handler) - -EvaluatorDict = Union[ - CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator -] -Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] -FileType = Literal["flow", "prompt", "tool", "evaluator"] - - -# ANSI escape codes for logging colors -YELLOW = "\033[93m" -CYAN = "\033[96m" -GREEN = "\033[92m" -RED = "\033[91m" -RESET = "\033[0m" - - -class Identifiers(TypedDict): - """Common identifiers for the objects required to run an Evaluation.""" - - id: NotRequired[str] - """The ID of the File on Humanloop.""" - path: NotRequired[str] - """The path of the File on Humanloop.""" - - -class File(Identifiers): - """A File on Humanloop (Flow, Prompt, Tool, Evaluator).""" - - type: NotRequired[FileType] - """The type of File this callable relates to on Humanloop.""" - version: NotRequired[Version] - """The contents uniquely define the version of the File on Humanloop.""" - callable: Callable - """The function being evaluated. - It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`. - If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`. - It should return a string or json serializable output. - """ - - -class Dataset(Identifiers): - datapoints: NotRequired[Sequence[DatapointDict]] - """The datapoints to map your function over to produce the outputs required by the evaluation.""" - action: NotRequired[UpdateDatasetAction] - """How to update the Dataset given the provided Datapoints; - `set` replaces the existing Datapoints and `add` appends to the existing Datapoints.""" - - -class Evaluator(Identifiers): - """The Evaluator to provide judgments for this Evaluation.""" - - args_type: NotRequired[EvaluatorArgumentsType] - """The type of arguments the Evaluator expects - only required for local Evaluators.""" - return_type: NotRequired[EvaluatorReturnTypeEnum] - """The type of return value the Evaluator produces - only required for local Evaluators.""" - callable: NotRequired[Callable] - """The function to run on the logs to produce the judgment - only required for local Evaluators.""" - threshold: NotRequired[float] - """The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail.""" - - -class EvaluatorCheck(BaseModel): - """Summary data for an Evaluator check.""" - - path: str - """The path of the Evaluator used in the check.""" - # TODO: Add number valence and improvement check - # improvement_check: bool - # """Whether the latest version of your function has improved across the Dataset for a specific Evaluator.""" - score: float - """The score of the latest version of your function for a specific Evaluator.""" - delta: float - """The change in score since the previous version of your function for a specific Evaluator.""" - threshold: Optional[float] - """The threshold to check the Evaluator against.""" - threshold_check: Optional[bool] - """Whether the latest version has an average Evaluator result above a threshold.""" - evaluation_id: str - """The ID of the corresponding Evaluation.""" - - -def _run_eval( - client: BaseHumanloop, - file: File, - name: Optional[str], - dataset: Dataset, - evaluators: Optional[Sequence[Evaluator]] = None, - # logs: typing.Sequence[dict] | None = None, - workers: int = 4, -) -> List[EvaluatorCheck]: - """ - Evaluate your function for a given `Dataset` and set of `Evaluators`. - - :param client: the Humanloop API client. - :param file: the Humanloop file being evaluated, including a function to run over the dataset. - :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File. - :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation. - :param evaluators: define how judgments are provided for this Evaluation. - :param workers: the number of threads to process datapoints using your function concurrently. - :return: per Evaluator checks. - """ - - # Get or create the file on Humanloop - version = file.pop("version", {}) - - # Raise error if one of path or id not provided - if not file.get("path") and not file.get("id"): - raise ValueError("You must provide a path or id in your `file`.") - - # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` - try: - type_ = file.pop("type") - logger.info( - f"{CYAN}Evaluating your {type_} function corresponding to `{file['path']}` on Humanloop{RESET} \n\n" - ) - except KeyError as _: - type_ = "flow" - logger.warning("No `file` type specified, defaulting to flow.") - - # If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop. - function_ = None - try: - function_ = file.pop("callable") - except KeyError as _: - if type_ == "flow": - raise ValueError( - "You must provide a `callable` for your Flow `file` to run a local eval." - ) - else: - logger.info( - f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop." - ) - - file_dict = {**file, **version} - - if type_ == "flow": - # Be more lenient with Flow versions as they are arbitrary json - try: - Flow.parse_obj(version) - except ValidationError: - flow_version = {"attributes": version} - file_dict = {**file, **flow_version} - hl_file = client.flows.upsert(**file_dict) - - elif type_ == "prompt": - try: - _ = Prompt.parse_obj(version) - except ValidationError as error_: - logger.error( - msg=f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)" - ) - raise error_ - hl_file = client.prompts.upsert(**file_dict) - - elif type_ == "tool": - try: - _ = Tool.parse_obj(version) - except ValidationError as error_: - logger.error( - msg=f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n)" - ) - raise error_ - hl_file = client.tools.upsert(**file_dict) - - elif type_ == "evaluator": - hl_file = client.evaluators.upsert(**file_dict) - - else: - raise NotImplementedError(f"Unsupported File type: {type_}") - - # Upsert the Dataset - action = dataset.get( - "action", "set" - ) # set is the server default - None not allowed. - if "datapoints" not in dataset: - dataset["datapoints"] = [] - # Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path. - action = "add" - hl_dataset = client.datasets.upsert(**dataset, action=action) - hl_dataset = client.datasets.get( - id=hl_dataset.id, version_id=hl_dataset.version_id, include_datapoints=True - ) - - # Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id` - local_evaluators: List[Evaluator] = [] - if evaluators: - for evaluator in evaluators: - # If a callable is provided for an Evaluator, we treat it as External - eval_function = evaluator.get("callable") - if eval_function is not None: - # TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally - if function_ is None: - raise ValueError( - f"Local Evaluators are only supported when generating Logs locally using your {type_}'s `callable`. Please provide a `callable` for your file in order to run Evaluators locally." - ) - local_evaluators.append(evaluator) - spec = ExternalEvaluator( - arguments_type=evaluator["args_type"], - return_type=evaluator["return_type"], - attributes={"code": inspect.getsource(eval_function)}, - evaluator_type="external", - ) - _ = client.evaluators.upsert( - id=evaluator.get("id"), path=evaluator.get("path"), spec=spec - ) - - # Validate upfront that the local Evaluators and Dataset fit - requires_target = False - for local_evaluator in local_evaluators: - if local_evaluator["args_type"] == "target_required": - requires_target = True - break - if requires_target: - missing_target = 0 - for datapoint in hl_dataset.datapoints: - if not datapoint.target: - missing_target += 1 - if missing_target > 0: - raise ValueError( - f"{missing_target} Datapoints have no target. A target is required for the Evaluator: {local_evaluator['path']}" - ) - - # Get or create the Evaluation based on the name - evaluation = None - try: - evaluation = client.evaluations.create( - name=name, - evaluators=[{"path": e["path"]} for e in evaluators], - file={"id": hl_file.id}, - ) - except ApiError as error_: - # If the name exists, go and get it # TODO: Update API GET to allow querying by name and file. - if error_.status_code == 409: - evals = client.evaluations.list(file_id=hl_file.id, size=50) - for page in evals.iter_pages(): - evaluation = next((e for e in page.items if e.name == name), None) - else: - raise error_ - if not evaluation: - raise ValueError(f"Evaluation with name {name} not found.") - - # Create a new Run - run = client.evaluations.create_run( - id=evaluation.id, - dataset={"version_id": hl_dataset.version_id}, - orchestrated=False, - ) - - # Every Run will generate a new batch of Logs - run_id = run.id - log_func = _get_log_func( - client=client, - type_=type_, - file_id=hl_file.id, - version_id=hl_file.version_id, - run_id=run_id, - ) - - # Define the function to execute your function in parallel and Log to Humanloop - def process_datapoint(datapoint: Datapoint): - start_time = datetime.now() - datapoint_dict = datapoint.dict() - try: - if "messages" in datapoint_dict and datapoint_dict["messages"] is not None: - output = function_( - **datapoint_dict["inputs"], messages=datapoint_dict["messages"] - ) - else: - output = function_(**datapoint_dict["inputs"]) - - if not isinstance(output, str): - try: - output = json.dumps(output) - # throw error if it fails to serialize - except Exception as _: - raise ValueError( - f"Your {type_}'s `callable` must return a string or a JSON serializable object." - ) - log = log_func( - inputs=datapoint.inputs, - output=output, - source_datapoint_id=datapoint.id, - start_time=start_time, - end_time=datetime.now(), - ) - except Exception as e: - log = log_func( - inputs=datapoint.inputs, - error=str(e), - source_datapoint_id=datapoint.id, - start_time=start_time, - end_time=datetime.now(), - ) - logger.warning( - msg=f"\nYour {type_}'s `callable` failed for Datapoint: {datapoint.id}. \n Error: {str(e)}" - ) - - # Apply local Evaluators - for local_evaluator in local_evaluators: - try: - start_time = datetime.now() - eval_function = local_evaluator["callable"] - if local_evaluator["args_type"] == "target_required": - judgment = eval_function(log.dict(), datapoint_dict["target"]) - else: - judgment = eval_function(log.dict()) - - _ = client.evaluators.log( - parent_id=log.id, - id=local_evaluator.get("id"), - path=local_evaluator.get("path"), - judgment=judgment, - start_time=start_time, - end_time=datetime.now(), - ) - except Exception as e: - _ = client.evaluators.log( - parent_id=log.id, - path=local_evaluator.get("path"), - id=local_evaluator.get("id"), - error=str(e), - start_time=start_time, - end_time=datetime.now(), - ) - logger.warning( - f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}" - ) - - # Execute the function and send the logs to Humanloop in parallel - total_datapoints = len(hl_dataset.datapoints) - logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n") - logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}") - logger.info(f"{CYAN}Run ID: {run_id}{RESET}") - - # Generate locally if a file `callable` is provided - if function_: - logger.info( - f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} " - ) - completed_tasks = 0 - with ThreadPoolExecutor(max_workers=workers) as executor: - futures = [ - executor.submit(process_datapoint, datapoint) - for datapoint in hl_dataset.datapoints - ] - for _ in as_completed(futures): - completed_tasks += 1 - _progress_bar(total_datapoints, completed_tasks) - else: - # TODO: trigger run when updated API is available - logger.info( - f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}" - ) - - # Wait for the Evaluation to complete then print the results - complete = False - - while not complete: - stats = client.evaluations.get_stats(id=evaluation.id) - logger.info(f"\r{stats.progress}") - run_stats = next( - (run_stats for run_stats in stats.run_stats if run_stats.run_id == run_id), - None, - ) - complete = run_stats is not None and run_stats.status == "completed" - if not complete: - time.sleep(5) - - # Print Evaluation results - logger.info(stats.report) - - checks: List[EvaluatorCheck] = [] - - # Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run. - # (Or the logs would not be helpful) - if ( - any(evaluator.get("threshold") is not None for evaluator in evaluators) - or len(stats.run_stats) > 1 - ): - for evaluator in evaluators: - _, score, delta = check_evaluation_improvement( - evaluation=evaluation, - stats=stats, - evaluator_path=evaluator["path"], - run_id=run_id, - ) - threshold_check = None - threshold = evaluator.get("threshold") - if threshold is not None: - threshold_check = check_evaluation_threshold( - evaluation=evaluation, - stats=stats, - evaluator_path=evaluator["path"], - threshold=threshold, - run_id=run_id, - ) - checks.append( - EvaluatorCheck( - path=evaluator["path"], - # TODO: Add back in with number valence on Evaluators - # improvement_check=improvement_check, - score=score, - delta=delta, - threshold=threshold, - threshold_check=threshold_check, - evaluation_id=evaluation.id, - ) - ) - - logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n") - return checks - - -def _get_log_func( - client: BaseHumanloop, - type_: FileType, - file_id: str, - version_id: str, - run_id: str, -) -> Callable: - """Returns the appropriate log function pre-filled with common parameters.""" - log_request = { - # TODO: why does the Log `id` field refer to the file ID in the API? - # Why are both `id` and `version_id` needed in the API? - "id": file_id, - "version_id": version_id, - "run_id": run_id, - } - if type_ == "flow": - return partial(client.flows.log, **log_request, trace_status="complete") - elif type_ == "prompt": - return partial(client.prompts.log, **log_request) - elif type_ == "evaluator": - return partial(client.evaluators.log, **log_request) - elif type_ == "tool": - return partial(client.tools.log, **log_request) - else: - raise NotImplementedError(f"Unsupported File version: {type_}") - - -def get_score_from_evaluator_stat( - stat: Union[NumericStats, BooleanStats], -) -> Union[float, None]: - """Get the score from an Evaluator Stat.""" - score = None - if isinstance(stat, BooleanStats): - if stat.total_logs: - score = round(stat.num_true / stat.total_logs, 2) - elif isinstance(stat, NumericStats): - score = round(stat.mean, 2) - else: - pass - return score - - -def _progress_bar(total: int, progress: int): - """Simple progress bar for CLI with ETA.""" - - if total <= 0: - total = 1 - - if not hasattr(_progress_bar, "start_time"): - _progress_bar.start_time = time.time() - - bar_length = 40 - block = int(round(bar_length * progress / total)) - bar = "#" * block + "-" * (bar_length - block) - - percentage = (progress / total) * 100 - elapsed_time = time.time() - _progress_bar.start_time - time_per_item = elapsed_time / progress if progress > 0 else 0 - eta = (total - progress) * time_per_item - - progress_display = f"\r[{bar}] {progress}/{total}" - progress_display += f" ({percentage:.2f}%)" - - if progress < total: - progress_display += f" | ETA: {int(eta)}s" - else: - progress_display += " | DONE" - _progress_bar.start_time = None - - sys.stderr.write(progress_display) - - if progress >= total: - sys.stderr.write("\n") - - -def get_evaluator_stats_by_path( - stat: RunStatsResponse, evaluation: EvaluationResponse -) -> Dict[str, Union[NumericStats, BooleanStats]]: - """Get the Evaluator stats by path.""" - # TODO: Update the API so this is not necessary - evaluators_by_id = { - evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators - } - evaluator_stats_by_path = { - evaluators_by_id[ - evaluator_stat.evaluator_version_id - ].version.path: evaluator_stat - for evaluator_stat in stat.evaluator_stats - } - return evaluator_stats_by_path - - -def check_evaluation_threshold( - evaluation: EvaluationResponse, - stats: EvaluationStats, - evaluator_path: str, - threshold: float, - run_id: str, -) -> bool: - """Checks if the latest version has an average Evaluator result above a threshold.""" - # TODO: Update the API so this is not necessary - evaluator_stats_by_path = get_evaluator_stats_by_path( - stat=next((stat for stat in stats.run_stats if stat.run_id == run_id), None), - evaluation=evaluation, - ) - if evaluator_path in evaluator_stats_by_path: - evaluator_stat = evaluator_stats_by_path[evaluator_path] - score = get_score_from_evaluator_stat(stat=evaluator_stat) - if score >= threshold: - logger.info( - f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" - ) - return True - else: - logger.info( - f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" - ) - return False - else: - raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") - - -def check_evaluation_improvement( - evaluation: EvaluationResponse, - evaluator_path: str, - stats: EvaluationStats, - run_id: str, -) -> Tuple[bool, float, float]: - """ - Check the latest version has improved across for a specific Evaluator. - - :returns: A tuple of (improvement, latest_score, delta since previous score) - """ - # TODO: Update the API so this is not necessary - - latest_evaluator_stats_by_path = get_evaluator_stats_by_path( - stat=next((stat for stat in stats.run_stats if stat.run_id == run_id), None), - evaluation=evaluation, - ) - if len(stats.run_stats) == 1: - logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}") - return True, 0, 0 - - previous_evaluator_stats_by_path = get_evaluator_stats_by_path( - stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1 - evaluation=evaluation, - ) - if ( - evaluator_path in latest_evaluator_stats_by_path - and evaluator_path in previous_evaluator_stats_by_path - ): - latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path] - previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path] - latest_score = get_score_from_evaluator_stat(stat=latest_evaluator_stat) - previous_score = get_score_from_evaluator_stat(stat=previous_evaluator_stat) - if latest_score is None or previous_score is None: - raise ValueError(f"Could not find score for Evaluator {evaluator_path}.") - diff = round(latest_score - previous_score, 2) - if diff >= 0: - logger.info( - f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}" - ) - return True, latest_score, diff - else: - logger.info( - f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}" - ) - return False, latest_score, diff - else: - raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py new file mode 100644 index 00000000..ac5a5eba --- /dev/null +++ b/src/humanloop/eval_utils/__init__.py @@ -0,0 +1,4 @@ +from .run import log_with_evaluation_context, run_eval +from .types import File + +__all__ = ["run_eval", "log_with_evaluation_context", "File"] diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py new file mode 100644 index 00000000..f05b9585 --- /dev/null +++ b/src/humanloop/eval_utils/context.py @@ -0,0 +1,26 @@ +from typing import Callable, TypedDict + + +class EvaluationContext(TypedDict): + """Context Log to Humanloop. + + Global state that is set when an Evaluation is ran. + """ + + """Required for associating a Log with the Evaluation Run.""" + source_datapoint_id: str + + """Exporter calls this so the eval_utils are notified to evaluate an uploaded Log.""" + upload_callback: Callable[[dict], None] + + """ID of the evaluated File.""" + file_id: str + + """Path of the evaluated File.""" + path: str + + """Required for associating a Log with the Evaluation Run.""" + run_id: str + + +EVALUATION_CONTEXT_VARIABLE_NAME = "__EVALUATION_CONTEXT" diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py new file mode 100644 index 00000000..fad5b280 --- /dev/null +++ b/src/humanloop/eval_utils/run.py @@ -0,0 +1,773 @@ +""" +Evaluation utils for the Humanloop SDK. + +This module provides a set of utilities to aid running Eval workflows on Humanloop +where you are managing the runtime of your application in your code. + +Functions in this module should be accessed via the Humanloop client. They should +not be called directly. +""" + +import copy +import inspect +import json +import logging +import sys +import threading +import time +import types +import typing +from concurrent.futures import ThreadPoolExecutor +from contextvars import ContextVar +from datetime import datetime +from functools import partial +from logging import INFO +from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union + +from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse +from humanloop.core.api_error import ApiError +from humanloop.eval_utils.context import EvaluationContext +from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File + +# We use TypedDicts for requests, which is consistent with the rest of the SDK +from humanloop.evaluators.client import EvaluatorsClient +from humanloop.flows.client import FlowsClient +from humanloop.prompts.client import PromptsClient +from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict +from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict +from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict +from humanloop.requests import PromptKernelRequestParams as PromptDict +from humanloop.requests import ToolKernelRequestParams as ToolDict +from humanloop.tools.client import ToolsClient +from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats +from humanloop.types import DatapointResponse as Datapoint +from humanloop.types import EvaluationResponse, EvaluationStats + +# Responses are Pydantic models and we leverage them for improved request validation +from humanloop.types import FlowKernelRequest as Flow +from humanloop.types import NumericEvaluatorStatsResponse as NumericStats +from humanloop.types import PromptKernelRequest as Prompt +from humanloop.types import ToolKernelRequest as Tool +from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse +from humanloop.types.create_flow_log_response import CreateFlowLogResponse +from humanloop.types.create_prompt_log_response import CreatePromptLogResponse +from humanloop.types.create_tool_log_response import CreateToolLogResponse +from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue +from humanloop.types.evaluation_run_response import EvaluationRunResponse +from humanloop.types.run_stats_response import RunStatsResponse +from pydantic import ValidationError + +if typing.TYPE_CHECKING: + from humanloop.client import BaseHumanloop + +# Setup logging +logger = logging.getLogger(__name__) +logger.setLevel(level=INFO) +console_handler = logging.StreamHandler() +logger.setLevel(INFO) +formatter = logging.Formatter("%(message)s") +console_handler.setFormatter(formatter) +if not logger.hasHandlers(): + logger.addHandler(console_handler) + +EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator] +Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] +FileType = Literal["flow", "prompt", "tool", "evaluator"] + + +# ANSI escape codes for logging colors +YELLOW = "\033[93m" +CYAN = "\033[96m" +GREEN = "\033[92m" +RED = "\033[91m" +RESET = "\033[0m" + + +CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient) + + +def log_with_evaluation_context( + client: CLIENT_TYPE, + evaluation_context_variable: ContextVar[Optional[EvaluationContext]], +) -> CLIENT_TYPE: + """ + Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT. + + This makes the overloaded log actions be aware of whether the created Log is + part of an Evaluation (e.g. one started by eval_utils.run_eval). + """ + + def _is_evaluated_file( + evaluation_context: EvaluationContext, + log_args: dict, + ) -> bool: + """Check if the File that will Log against is part of the current Evaluation. + + The user of the .log API can refer to the File that owns that Log either by + ID or Path. This function matches against any of them in EvaluationContext. + """ + if evaluation_context is None: + return False + return evaluation_context.get("file_id") == log_args.get("id") or evaluation_context.get( + "path" + ) == log_args.get("path") + + # Copy the original log method in a hidden attribute + client._log = client.log # type: ignore + + def _overloaded_log( + self, + **kwargs, + ) -> Union[ + CreatePromptLogResponse, + CreateToolLogResponse, + CreateFlowLogResponse, + CreateEvaluatorLogResponse, + ]: + evaluation_context = evaluation_context_variable.get() + + if _is_evaluated_file( + evaluation_context=evaluation_context, # type: ignore + log_args=kwargs, + ): + # If the .log API user does not provide the source_datapoint_id or run_id, + # override them with the values from the EvaluationContext + evaluation_context = typing.cast( + EvaluationContext, + evaluation_context, + ) + for attribute in ["source_datapoint_id", "run_id"]: + if attribute not in kwargs or kwargs[attribute] is None: + kwargs[attribute] = evaluation_context[attribute] # type: ignore + + # Call the original .log method + logger.debug( + "Logging %s inside _overloaded_log on Thread %s", + kwargs, + evaluation_context, + threading.get_ident(), + ) + response = self._log(**kwargs) + + # Call the callback so the Evaluation can be updated + if _is_evaluated_file( + evaluation_context=evaluation_context, # type: ignore + log_args=kwargs, + ): + # Notify that the Log has been added to the Evaluation + # evaluation_context cannot be None + evaluation_context = typing.cast( + EvaluationContext, + evaluation_context, + ) + evaluation_context["upload_callback"]( # type: ignore + { + **kwargs, + # ID in kwargs refers to the File ID + # Replace it with the Log ID + "id": response.id, + } + ) + + # Mark the Evaluation Context as consumed + evaluation_context_variable.set(None) + + return response + + # Replace the original log method with the overloaded one + client.log = types.MethodType(_overloaded_log, client) # type: ignore + # Return the client with the overloaded log method + logger.debug("Overloaded the .log method of %s", client) + return client + + +class _SimpleProgressBar: + """Thread-safe progress bar for the console.""" + + def __init__(self, total: int): + if total <= 0: + self._total = 1 + else: + self._total = total + self._progress = 0 + self._lock = threading.Lock() + self._start_time = None + + def increment(self): + """Increment the progress bar by one finished task.""" + with self._lock: + self._progress += 1 + if self._start_time is None: + self._start_time = time.time() + + bar_length = 40 + block = int(round(bar_length * self._progress / self._total)) + bar = "#" * block + "-" * (bar_length - block) + + percentage = (self._progress / self._total) * 100 + elapsed_time = time.time() - self._start_time + time_per_item = elapsed_time / self._progress if self._progress > 0 else 0 + eta = (self._total - self._progress) * time_per_item + + progress_display = f"\r[{bar}] {self._progress}/{self._total}" + progress_display += f" ({percentage:.2f}%)" + + if self._progress < self._total: + progress_display += f" | ETA: {int(eta)}s" + else: + progress_display += " | DONE" + + sys.stderr.write(progress_display) + + if self._progress >= self._total: + sys.stderr.write("\n") + + +# Module-level so it can be shared by threads. +_PROGRESS_BAR: Optional[_SimpleProgressBar] = None + + +def run_eval( + client: "BaseHumanloop", + file: File, + name: Optional[str], + dataset: Dataset, + evaluation_context_variable: ContextVar[Optional[EvaluationContext]], + evaluators: Optional[Sequence[Evaluator]] = None, + workers: int = 4, +) -> List[EvaluatorCheck]: + """ + Evaluate your function for a given `Dataset` and set of `Evaluators`. + + :param client: the Humanloop API client. + :param file: the Humanloop file being evaluated, including a function to run over the dataset. + :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File. + :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation. + :param evaluators: define how judgments are provided for this Evaluation. + :param workers: the number of threads to process datapoints using your function concurrently. + :return: per Evaluator checks. + """ + global _PROGRESS_BAR + + if hasattr(file["callable"], "file"): + # When the decorator inside `file` is a decorated function, + # we need to validate that the other parameters of `file` + # match the attributes of the decorator + inner_file: File = file["callable"].file + if "path" in file and inner_file["path"] != file["path"]: + raise ValueError( + "`path` attribute specified in the `file` does not match the File path of the decorated function." + ) + if "version" in file and inner_file["version"] != file["version"]: + raise ValueError( + "`version` attribute in the `file` does not match the File version of the decorated function." + ) + if "type" in file and inner_file["type"] != file["type"]: + raise ValueError( + "`type` attribute of `file` argument does not match the File type of the decorated function." + ) + if "id" in file: + raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.") + # file on decorated function holds at least + # or more information than the `file` argument + file_ = copy.deepcopy(inner_file) + else: + file_ = file + + # Get or create the file on Humanloop + version = file_.pop("version", {}) + + # Raise error if one of path or id not provided + if not file_.get("path") and not file_.get("id"): + raise ValueError("You must provide a path or id in your `file`.") + + # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` + try: + type_ = typing.cast(FileType, file_.pop("type")) + logger.info( + f"{CYAN}Evaluating your {type_} function corresponding to `{file_['path']}` on Humanloop{RESET} \n\n" + ) + except KeyError as _: + type_ = "flow" + logger.warning("No `file` type specified, defaulting to flow.") + + # If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop. + function_ = typing.cast(Optional[Callable], file_.pop("callable", None)) + if function_ is None: + if type_ == "flow": + raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.") + else: + logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.") + + file_dict = {**file_, **version} + hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse] + + if type_ == "flow": + # Be more lenient with Flow versions as they are arbitrary json + try: + Flow.model_validate(version) + except ValidationError: + flow_version = {"attributes": version} + file_dict = {**file_, **flow_version} + hl_file = client.flows.upsert(**file_dict) # type: ignore + + elif type_ == "prompt": + try: + Prompt.model_validate(version) + except ValidationError as error_: + logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)") + raise error_ + try: + hl_file = client.prompts.upsert(**file_dict) # type: ignore + except ApiError as error_: + raise error_ + + elif type_ == "tool": + try: + Tool.model_validate(version) + except ValidationError as error_: + logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)") + raise error_ + hl_file = client.tools.upsert(**file_dict) # type: ignore + + elif type_ == "evaluator": + hl_file = client.evaluators.upsert(**file_dict) # type: ignore + + else: + raise NotImplementedError(f"Unsupported File type: {type_}") + + # Upsert the Dataset + if "action" not in dataset: + dataset["action"] = "set" + if "datapoints" not in dataset: + dataset["datapoints"] = [] + # Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path. + dataset["action"] = "add" + hl_dataset = client.datasets.upsert( + **dataset, + ) + hl_dataset = client.datasets.get( + id=hl_dataset.id, + version_id=hl_dataset.version_id, + include_datapoints=True, + ) + + # Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id` + local_evaluators: List[Evaluator] = [] + if evaluators: + for evaluator in evaluators: + # If a callable is provided for an Evaluator, we treat it as External + eval_function = evaluator.get("callable") + if eval_function is not None: + # TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally + if function_ is None: + raise ValueError( + "Local Evaluators are only supported when generating Logs locally using your " + f"{type_}'s `callable`. Please provide a `callable` for your file in order " + "to run Evaluators locally." + ) + local_evaluators.append(evaluator) + spec = ExternalEvaluator( + arguments_type=evaluator["args_type"], + return_type=evaluator["return_type"], + attributes={"code": inspect.getsource(eval_function)}, + evaluator_type="external", + ) + client.evaluators.upsert( + id=evaluator.get("id"), + path=evaluator.get("path"), + spec=spec, + ) + # function_ cannot be None, cast it for type checking + function_ = typing.cast(Callable, function_) + + # Validate upfront that the local Evaluators and Dataset fit + requires_target = False + for local_evaluator in local_evaluators: + if local_evaluator["args_type"] == "target_required": + requires_target = True + break + if requires_target: + missing_target = 0 + for datapoint in hl_dataset.datapoints: # type: ignore + if not datapoint.target: + missing_target += 1 + if missing_target > 0: + raise ValueError( + f"{missing_target} Datapoints have no target. A target " + f"is required for the Evaluator: {local_evaluator['path']}" + ) + + # Get or create the Evaluation based on the name + evaluation = None + try: + evaluation = client.evaluations.create( + name=name, + evaluators=[{"path": e["path"]} for e in evaluators], # type: ignore + file={"id": hl_file.id}, + ) + except ApiError as error_: + # If the name exists, go and get it # TODO: Update API GET to allow querying by name and file. + if error_.status_code == 409: + evals = client.evaluations.list(file_id=hl_file.id, size=50) + for page in evals.iter_pages(): + evaluation = next((e for e in page.items if e.name == name), None) # type: ignore + else: + raise error_ + if not evaluation: + raise ValueError(f"Evaluation with name {name} not found.") + + # Create a new Run + run: EvaluationRunResponse = client.evaluations.create_run( + id=evaluation.id, + dataset={"version_id": hl_dataset.version_id}, + orchestrated=False, + ) + # Every Run will generate a new batch of Logs + run_id = run.id + + _PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) # type: ignore + + # Define the function to execute the `callable` in parallel and Log to Humanloop + def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str): + def upload_callback(log: dict): + """Logic ran after the Log has been created.""" + logger.debug( + "upload_callback on Thread %s: log %s datapoint_target %s", + threading.get_ident(), + log, + dp.target, + ) + _run_local_evaluators( + client=client, + log=log, + datapoint_target=dp.target, + local_evaluators=local_evaluators, + ) + _PROGRESS_BAR.increment() # type: ignore + + datapoint_dict = dp.dict() + # Set the Evaluation Context for current datapoint + evaluation_context_variable.set( + EvaluationContext( + source_datapoint_id=dp.id, + upload_callback=upload_callback, + file_id=file_id, + run_id=run_id, + path=file_path, + ) + ) + logger.debug( + "process_datapoint on Thread %s: evaluating Datapoint %s with EvaluationContext %s", + threading.get_ident(), + datapoint_dict, + evaluation_context_variable.get(), + ) + log_func = _get_log_func( + client=client, + file_type=type_, + file_id=hl_file.id, + version_id=hl_file.version_id, + run_id=run_id, + ) + start_time = datetime.now() + try: + if "messages" in datapoint_dict and datapoint_dict["messages"] is not None: + # function_ is decorated by Humanloop, the OTel Exporter will + # handle the logging, which will call the upload_callback + # function above when it's done + output = function_( # type: ignore + **datapoint_dict["inputs"], + messages=datapoint_dict["messages"], + ) + else: + # function_ is decorated by Humanloop, the OTel Exporter will + # handle the logging, which will call the upload_callback + # function above when it's done + output = function_(**datapoint_dict["inputs"]) # type: ignore + + if not isinstance(output, str): + try: + output = json.dumps(output) + except Exception: + # throw error if it fails to serialize + raise ValueError(f"Your {type_}'s `callable` must return a string or a JSON serializable object.") + + context_variable = evaluation_context_variable.get() + if context_variable is not None: + # Evaluation Context has not been consumed + # function_ is a plain callable so we need to create a Log + logger.debug( + "process_datapoint on Thread %s: function_ %s is a simple callable, context was not consumed", + threading.get_ident(), + function_.__name__, # type: ignore + ) + log_func( + inputs=datapoint.inputs, + output=output, + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: + log_func( + inputs=dp.inputs, + error=str(e), + source_datapoint_id=dp.id, + run_id=run_id, + start_time=start_time, + end_time=datetime.now(), + ) + logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}") + + # Execute the function and send the logs to Humanloop in parallel + logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n") + logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}") + logger.info(f"{CYAN}Run ID: {run_id}{RESET}") + + # Generate locally if a file `callable` is provided + if function_: # type: ignore + logger.info( + f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} " + ) + with ThreadPoolExecutor(max_workers=workers) as executor: + for datapoint in hl_dataset.datapoints: # type: ignore + executor.submit( + process_datapoint, + datapoint, + hl_file.id, + hl_file.path, + run_id, + ) + else: + # TODO: trigger run when updated API is available + logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}") + + # Wait for the Evaluation to complete then print the results + complete = False + + while not complete: + stats = client.evaluations.get_stats(id=evaluation.id) + logger.info(f"\r{stats.progress}") + run_stats = next( + (run_stats for run_stats in stats.run_stats if run_stats.run_id == run_id), + None, + ) + complete = run_stats is not None and run_stats.status == "completed" + if not complete: + time.sleep(5) + + # Print Evaluation results + logger.info(stats.report) + + checks: List[EvaluatorCheck] = [] + + # Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run. + # (Or the logs would not be helpful) + if any(evaluator.get("threshold") is not None for evaluator in evaluators) or len(stats.run_stats) > 1: # type: ignore + for evaluator in evaluators: # type: ignore + score, delta = _check_evaluation_improvement( + evaluation=evaluation, + stats=stats, + evaluator_path=evaluator["path"], + run_id=run_id, + )[1:] + threshold_check = None + threshold = evaluator.get("threshold") + if threshold is not None: + threshold_check = _check_evaluation_threshold( + evaluation=evaluation, + stats=stats, + evaluator_path=evaluator["path"], + threshold=threshold, + run_id=run_id, + ) + checks.append( + EvaluatorCheck( + path=evaluator["path"], + # TODO: Add back in with number valence on Evaluators + # improvement_check=improvement_check, + score=score, + delta=delta, + threshold=threshold, + threshold_check=threshold_check, + evaluation_id=evaluation.id, + ) + ) + + logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n") + return checks + + +def _get_log_func( + client: "BaseHumanloop", + file_type: FileType, + file_id: str, + version_id: str, + run_id: str, +) -> Callable: + """Returns the appropriate log function pre-filled with common parameters.""" + log_request = { + # TODO: why does the Log `id` field refer to the file ID in the API? + # Why are both `id` and `version_id` needed in the API? + "id": file_id, + "version_id": version_id, + "run_id": run_id, + } + if file_type == "flow": + return partial(client.flows.log, **log_request, trace_status="complete") # type: ignore + elif file_type == "prompt": + return partial(client.prompts.log, **log_request) # type: ignore + elif file_type == "evaluator": + return partial(client.evaluators.log, **log_request) # type: ignore + elif file_type == "tool": + return partial(client.tools.log, **log_request) # type: ignore + else: + raise NotImplementedError(f"Unsupported File version: {file_type}") + + +def _get_score_from_evaluator_stat( + stat: Union[NumericStats, BooleanStats], +) -> Union[float, None]: + """Get the score from an Evaluator Stat.""" + score = None + if isinstance(stat, BooleanStats): + if stat.total_logs: + score = round(stat.num_true / stat.total_logs, 2) + elif isinstance(stat, NumericStats): + score = round(stat.mean, 2) # type: ignore + else: + raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}") + return score # type: ignore + + +def _get_evaluator_stats_by_path( + stat: RunStatsResponse, + evaluation: EvaluationResponse, +) -> Dict[str, Union[NumericStats, BooleanStats]]: + """Get the Evaluator stats by path.""" + # TODO: Update the API so this is not necessary + evaluators_by_id = {evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators} + evaluator_stats_by_path = { + evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat + for evaluator_stat in stat.evaluator_stats + } + return evaluator_stats_by_path # type: ignore + + +def _check_evaluation_threshold( + evaluation: EvaluationResponse, + stats: EvaluationStats, + evaluator_path: str, + threshold: float, + run_id: str, +) -> bool: + """Checks if the latest version has an average Evaluator result above a threshold.""" + # TODO: Update the API so this is not necessary + evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=next( + (stat for stat in stats.run_stats if stat.run_id == run_id), + None, # type: ignore + ), + evaluation=evaluation, + ) + if evaluator_path in evaluator_stats_by_path: + evaluator_stat = evaluator_stats_by_path[evaluator_path] + score = _get_score_from_evaluator_stat(stat=evaluator_stat) + if score >= threshold: # type: ignore + logger.info( + f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" + ) + return True + else: + logger.info( + f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}" + ) + return False + else: + raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") + + +def _check_evaluation_improvement( + evaluation: EvaluationResponse, + evaluator_path: str, + stats: EvaluationStats, + run_id: str, +) -> Tuple[bool, float, float]: + """ + Check the latest version has improved across for a specific Evaluator. + + :returns: A tuple of (improvement, latest_score, delta since previous score) + """ + # TODO: Update the API so this is not necessary + + latest_evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=next( + (stat for stat in stats.run_stats if stat.run_id == run_id), + None, # type: ignore + ), + evaluation=evaluation, + ) + if len(stats.run_stats) == 1: + logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}") + return True, 0, 0 + + previous_evaluator_stats_by_path = _get_evaluator_stats_by_path( + stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1 + evaluation=evaluation, + ) + if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path: + latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path] + previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path] + latest_score = _get_score_from_evaluator_stat(stat=latest_evaluator_stat) + previous_score = _get_score_from_evaluator_stat(stat=previous_evaluator_stat) + if latest_score is None or previous_score is None: + raise ValueError(f"Could not find score for Evaluator {evaluator_path}.") + diff = round(latest_score - previous_score, 2) # type: ignore + if diff >= 0: + logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + return True, latest_score, diff # type: ignore + else: + logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}") + return False, latest_score, diff # type: ignore + else: + raise ValueError(f"Evaluator {evaluator_path} not found in the stats.") + + +def _run_local_evaluators( + client: "BaseHumanloop", + log: dict, + datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]], + local_evaluators: list[Evaluator], +): + for local_evaluator in local_evaluators: + start_time = datetime.now() + try: + eval_function = local_evaluator["callable"] + if local_evaluator["args_type"] == "target_required": + judgement = eval_function( + log, + datapoint_target, + ) + else: + judgement = eval_function(log) + + _ = client.evaluators.log( + parent_id=log["id"], + judgment=judgement, + id=local_evaluator.get("id"), + path=local_evaluator.get("path"), + start_time=start_time, + end_time=datetime.now(), + ) + except Exception as e: + _ = client.evaluators.log( + parent_id=log["id"], + path=local_evaluator.get("path"), + id=local_evaluator.get("id"), + error=str(e), + start_time=start_time, + end_time=datetime.now(), + ) + logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}") diff --git a/src/humanloop/eval_utils/types.py b/src/humanloop/eval_utils/types.py new file mode 100644 index 00000000..845a8542 --- /dev/null +++ b/src/humanloop/eval_utils/types.py @@ -0,0 +1,91 @@ +from typing import Callable, Literal, Optional, Sequence, TypedDict, Union + +from pydantic import BaseModel +from typing_extensions import NotRequired + +from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict +from humanloop.requests import CreateDatapointRequestParams as DatapointDict +from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator + +# We use TypedDicts for requests, which is consistent with the rest of the SDK +from humanloop.requests import FlowKernelRequestParams as FlowDict +from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict +from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict +from humanloop.requests import PromptKernelRequestParams as PromptDict +from humanloop.requests import ToolKernelRequestParams as ToolDict +from humanloop.types import ( + EvaluatorArgumentsType, + EvaluatorReturnTypeEnum, +) + +# Responses are Pydantic models and we leverage them for improved request validation +from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo + +EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator] +Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict] +FileType = Literal["flow", "prompt", "tool", "evaluator"] + + +class Identifiers(TypedDict): + """Common identifiers for the objects required to run an Evaluation.""" + + id: NotRequired[str] + """The ID of the File on Humanloop.""" + path: NotRequired[str] + """The path of the File on Humanloop.""" + + +class File(Identifiers): + """A File on Humanloop (Flow, Prompt, Tool, Evaluator).""" + + type: NotRequired[FileType] + """The type of File this callable relates to on Humanloop.""" + version: NotRequired[Version] + """The contents uniquely define the version of the File on Humanloop.""" + callable: NotRequired[Callable] + """The function being evaluated. + It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`. + If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`. + It should return a string or json serializable output. + """ + + +class Dataset(Identifiers): + datapoints: NotRequired[Sequence[DatapointDict]] + """The datapoints to map your function over to produce the outputs required by the evaluation.""" + action: NotRequired[UpdateDatasetAction] + """How to update the Dataset given the provided Datapoints; + `set` replaces the existing Datapoints and `add` appends to the existing Datapoints.""" + + +class Evaluator(Identifiers): + """The Evaluator to provide judgments for this Evaluation.""" + + args_type: NotRequired[EvaluatorArgumentsType] + """The type of arguments the Evaluator expects - only required for local Evaluators.""" + return_type: NotRequired[EvaluatorReturnTypeEnum] + """The type of return value the Evaluator produces - only required for local Evaluators.""" + callable: NotRequired[Callable] + """The function to run on the logs to produce the judgment - only required for local Evaluators.""" + threshold: NotRequired[float] + """The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail.""" + + +class EvaluatorCheck(BaseModel): + """Summary data for an Evaluator check.""" + + path: str + """The path of the Evaluator used in the check.""" + # TODO: Add number valence and improvement check + # improvement_check: bool + # """Whether the latest version of your function has improved across the Dataset for a specific Evaluator.""" + score: float + """The score of the latest version of your function for a specific Evaluator.""" + delta: float + """The change in score since the previous version of your function for a specific Evaluator.""" + threshold: Optional[float] + """The threshold to check the Evaluator against.""" + threshold_check: Optional[bool] + """Whether the latest version has an average Evaluator result above a threshold.""" + evaluation_id: str + """The ID of the corresponding Evaluation.""" diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py index f0b5af8c..efd4e908 100644 --- a/src/humanloop/evaluations/client.py +++ b/src/humanloop/evaluations/client.py @@ -11,10 +11,14 @@ from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError -from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams +from .requests.create_evaluation_request_evaluators_item import ( + CreateEvaluationRequestEvaluatorsItemParams, +) from ..requests.file_request import FileRequestParams from ..core.serialization import convert_and_respect_annotation_metadata -from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams +from .requests.add_evaluators_request_evaluators_item import ( + AddEvaluatorsRequestEvaluatorsItemParams, +) from ..core.jsonable_encoder import jsonable_encoder from ..types.evaluation_runs_response import EvaluationRunsResponse from .requests.create_run_request_dataset import CreateRunRequestDatasetParams @@ -22,7 +26,9 @@ from ..types.evaluation_run_response import EvaluationRunResponse from ..types.evaluation_status import EvaluationStatus from ..types.evaluation_stats import EvaluationStats -from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse +from ..types.paginated_data_evaluation_log_response import ( + PaginatedDataEvaluationLogResponse, +) from ..core.client_wrapper import AsyncClientWrapper from ..core.pagination import AsyncPager @@ -289,7 +295,11 @@ def add_evaluators( raise ApiError(status_code=_response.status_code, body=_response_json) def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + self, + id: str, + evaluator_version_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, ) -> EvaluationResponse: """ Remove an Evaluator from an Evaluation. @@ -594,10 +604,14 @@ def create_run( method="POST", json={ "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + object_=dataset, + annotation=CreateRunRequestDatasetParams, + direction="write", ), "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" + object_=version, + annotation=CreateRunRequestVersionParams, + direction="write", ), "orchestrated": orchestrated, "use_existing_logs": use_existing_logs, @@ -630,7 +644,11 @@ def create_run( raise ApiError(status_code=_response.status_code, body=_response_json) def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + self, + id: str, + run_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ Add an existing Run to the specified Evaluation. @@ -695,7 +713,13 @@ def add_existing_run( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + def remove_run( + self, + id: str, + run_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: """ Remove a Run from an Evaluation. @@ -1331,7 +1355,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response_json) async def remove_evaluator( - self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + self, + id: str, + evaluator_version_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, ) -> EvaluationResponse: """ Remove an Evaluator from an Evaluation. @@ -1676,10 +1704,14 @@ async def main() -> None: method="POST", json={ "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + object_=dataset, + annotation=CreateRunRequestDatasetParams, + direction="write", ), "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" + object_=version, + annotation=CreateRunRequestVersionParams, + direction="write", ), "orchestrated": orchestrated, "use_existing_logs": use_existing_logs, @@ -1712,7 +1744,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response_json) async def add_existing_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + self, + id: str, + run_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, ) -> typing.Optional[typing.Any]: """ Add an existing Run to the specified Evaluation. @@ -1786,7 +1822,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response_json) async def remove_run( - self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + self, + id: str, + run_id: str, + *, + request_options: typing.Optional[RequestOptions] = None, ) -> None: """ Remove a Run from an Evaluation. diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py index 1884b45c..edb48562 100644 --- a/src/humanloop/flows/client.py +++ b/src/humanloop/flows/client.py @@ -200,6 +200,7 @@ def log( ), ) """ + _response = self._client_wrapper.httpx_client.request( "flows/log", method="POST", diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py index b6cfadb9..3e38e860 100644 --- a/src/humanloop/logs/client.py +++ b/src/humanloop/logs/client.py @@ -37,6 +37,7 @@ def list( end_date: typing.Optional[dt.datetime] = None, include_parent: typing.Optional[bool] = None, in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample_n: typing.Optional[int] = None, request_options: typing.Optional[RequestOptions] = None, ) -> SyncPager[LogResponse]: """ @@ -77,6 +78,9 @@ def list( in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + sample_n : typing.Optional[int] + If provided, only a random sample of approximately N Logs will be returned. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -118,6 +122,7 @@ def list( "end_date": serialize_datetime(end_date) if end_date is not None else None, "include_parent": include_parent, "in_trace_filter": in_trace_filter, + "sample_n": sample_n, }, request_options=request_options, ) @@ -143,6 +148,7 @@ def list( end_date=end_date, include_parent=include_parent, in_trace_filter=in_trace_filter, + sample_n=sample_n, request_options=request_options, ) _items = _parsed_response.records @@ -296,6 +302,7 @@ async def list( end_date: typing.Optional[dt.datetime] = None, include_parent: typing.Optional[bool] = None, in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None, + sample_n: typing.Optional[int] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AsyncPager[LogResponse]: """ @@ -336,6 +343,9 @@ async def list( in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]] If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace. + sample_n : typing.Optional[int] + If provided, only a random sample of approximately N Logs will be returned. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -385,6 +395,7 @@ async def main() -> None: "end_date": serialize_datetime(end_date) if end_date is not None else None, "include_parent": include_parent, "in_trace_filter": in_trace_filter, + "sample_n": sample_n, }, request_options=request_options, ) @@ -410,6 +421,7 @@ async def main() -> None: end_date=end_date, include_parent=include_parent, in_trace_filter=in_trace_filter, + sample_n=sample_n, request_options=request_options, ) _items = _parsed_response.records diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py new file mode 100644 index 00000000..0a1eab92 --- /dev/null +++ b/src/humanloop/otel/__init__.py @@ -0,0 +1,52 @@ +from typing import Optional, TypedDict + +from opentelemetry.sdk.trace import TracerProvider +from typing_extensions import NotRequired + +from humanloop.otel.helpers import module_is_installed + + +def instrument_provider(provider: TracerProvider): + """Add Instrumentors to the TracerProvider. + + Instrumentors intercept calls to libraries such as OpenAI client + and adds metadata to the Spans created by the decorators. + """ + if module_is_installed("openai"): + from opentelemetry.instrumentation.openai import OpenAIInstrumentor + + OpenAIInstrumentor().instrument(tracer_provider=provider) + + if module_is_installed("cohere"): + from opentelemetry.instrumentation.cohere import CohereInstrumentor + + CohereInstrumentor().instrument(tracer_provider=provider) + + if module_is_installed("anthropic"): + from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor + + AnthropicInstrumentor().instrument(tracer_provider=provider) + + if module_is_installed("groq"): + from opentelemetry.instrumentation.groq import GroqInstrumentor + + GroqInstrumentor().instrument(tracer_provider=provider) + + if module_is_installed("replicate"): + from opentelemetry.instrumentation.replicate import ReplicateInstrumentor + + ReplicateInstrumentor().instrument(tracer_provider=provider) + + if module_is_installed("boto3"): + from opentelemetry.instrumentation.bedrock import BedrockInstrumentor + + BedrockInstrumentor().instrument(tracer_provider=provider) + + +class FlowContext(TypedDict): + trace_id: NotRequired[str] + trace_parent_id: NotRequired[Optional[int]] + is_flow_log: NotRequired[bool] + + +TRACE_FLOW_CONTEXT: dict[int, FlowContext] = {} diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py new file mode 100644 index 00000000..d28126a0 --- /dev/null +++ b/src/humanloop/otel/constants.py @@ -0,0 +1,6 @@ +# Attribute name prefix on Humanloop spans for file-related attributes + path +HUMANLOOP_FILE_KEY = "humanloop.file" +# Attribute name prefix on Humanloop spans for log-related attributes +HUMANLOOP_LOG_KEY = "humanloop.log" +HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type" +HUMANLOOP_PATH_KEY = "humanloop.file.path" diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py new file mode 100644 index 00000000..8208d06c --- /dev/null +++ b/src/humanloop/otel/exporter.py @@ -0,0 +1,345 @@ +import contextvars +import copy +import json +import logging +import threading +import typing +from queue import Empty as EmptyQueue +from queue import Queue +from threading import Thread +from typing import Any, Optional + +from opentelemetry import trace +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult + +from humanloop.core import ApiError as HumanloopApiError +from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext +from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext +from humanloop.otel.constants import ( + HUMANLOOP_FILE_KEY, + HUMANLOOP_FILE_TYPE_KEY, + HUMANLOOP_LOG_KEY, + HUMANLOOP_PATH_KEY, +) +from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span +from humanloop.requests.flow_kernel_request import FlowKernelRequestParams +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams + +if typing.TYPE_CHECKING: + from humanloop.client import Humanloop + + +logger = logging.getLogger("humanloop.sdk") + + +class HumanloopSpanExporter(SpanExporter): + """Upload Spans created by SDK decorators to Humanloop. + + Spans not created by Humanloop SDK decorators will be ignored. + """ + + DEFAULT_NUMBER_THREADS = 4 + + def __init__( + self, + client: "Humanloop", + worker_threads: Optional[int] = None, + ) -> None: + """Upload Spans created by SDK decorators to Humanloop. + + Spans not created by Humanloop SDK decorators will be ignored. + """ + super().__init__() + self._client = client + # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace + self._span_id_to_uploaded_log_id: dict[int, Optional[str]] = {} + # Work queue for the threads uploading the spans + self._upload_queue: Queue = Queue() + # Worker threads to export the spans + self._threads: list[Thread] = [ + Thread( + target=self._do_work, + daemon=True, + ) + for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS) + ] + # Signals threads no more work will arrive and + # they should wind down if the queue is empty + self._shutdown: bool = False + for thread in self._threads: + thread.start() + logger.debug("Exporter Thread %s started", thread.ident) + + def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult: + def is_evaluated_file( + span: ReadableSpan, + evaluation_context: Optional[EvaluationContext], + ) -> bool: + if evaluation_context is None: + return False + + return span.attributes.get(HUMANLOOP_PATH_KEY) == evaluation_context["path"] # type: ignore + + if not self._shutdown: + try: + evaluation_context = self._client.evaluation_context_variable.get() + if len(spans) > 1: + raise RuntimeError("HumanloopSpanExporter expected a single span when running an evaluation") + if not is_evaluated_file(spans[0], evaluation_context): + evaluation_context = None + except LookupError: + evaluation_context = None + for span in spans: + if is_humanloop_span(span): + # We pass the EvaluationContext from the eval_run utility thread to + # the export thread so the .log action works as expected + evaluation_context_copy = None + for context_var, context_var_value in contextvars.copy_context().items(): + if context_var.name == EVALUATION_CONTEXT_VARIABLE_NAME: + evaluation_context_copy = context_var_value + self._upload_queue.put( + ( + span, + evaluation_context_copy, + ), + ) + logger.debug( + "Span %s with EvaluationContext %s added to upload queue", + span.attributes, + evaluation_context_copy, + ) + # Reset the EvaluationContext so run eval does not + # create a duplicate Log + if evaluation_context is not None and is_evaluated_file( + spans[0], + evaluation_context, + ): + logger.debug( + "EvaluationContext %s marked as exhausted for Log in Span %s", + evaluation_context, + spans[0].attributes, + ) + # Mark the EvaluationContext as used + self._client.evaluation_context_variable.set(None) + return SpanExportResult.SUCCESS + else: + logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans") + return SpanExportResult.FAILURE + + def shutdown(self) -> None: + self._shutdown = True + for thread in self._threads: + thread.join() + logger.debug("Exporter Thread %s joined", thread.ident) + + def force_flush(self, timeout_millis: int = 3000) -> bool: + self._shutdown = True + for thread in self._threads: + thread.join(timeout=timeout_millis) + self._upload_queue.join() + + return True + + def _do_work(self): + """Upload spans to Humanloop. + + Ran by worker threads. The threads use the self._shutdown flag to wait + for Spans to arrive. Setting a timeout on self._upload_queue.get() risks + shutting down the thread early as no Spans are produced e.g. while waiting + for user input into the instrumented feature or application. + + Each thread will upload a Span to Humanloop, provided the Span has all its + dependencies uploaded. The dependency happens in a Flow Trace context, where + the Trace parent must be uploaded first. The Span Processor will send in Spans + bottoms-up, while the upload of a Trace happens top-down. If a Span did not + have its span uploaded yet, it will be re-queued to be uploaded later. + """ + + # Do work while the Exporter was not instructed to + # wind down or the queue is not empty + while self._upload_queue.qsize() > 0 or not self._shutdown: + try: + thread_args: tuple[ReadableSpan, EvaluationContext] # type: ignore + # Don't block or the thread will never be notified of the shutdown + thread_args = self._upload_queue.get( + block=False, + ) # type: ignore + span_to_export, evaluation_context = thread_args + # Set the EvaluationContext for the thread so the .log action works as expected + # NOTE: Expecting the evaluation thread to send a single span so we are + # not resetting the EvaluationContext in the scope of the export thread + self._client.evaluation_context_variable.set(evaluation_context) + except EmptyQueue: + continue + trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id) + if trace_metadata is None: + # Span is not part of a Flow Log + self._export_span_dispatch(span_to_export) + logger.debug( + "_do_work on Thread %s: Dispatched span %s with FlowContext %s which is not part of a Flow", + threading.get_ident(), + span_to_export.attributes, + trace_metadata, + ) + elif trace_metadata["trace_parent_id"] is None: + # Span is the head of a Flow Trace + self._export_span_dispatch(span_to_export) + logger.debug( + "Dispatched span %s which is a Flow Log with FlowContext %s", + span_to_export.attributes, + trace_metadata, + ) + elif trace_metadata["trace_parent_id"] in self._span_id_to_uploaded_log_id: + # Span is part of a Flow and its parent has been uploaded + self._export_span_dispatch(span_to_export) + logger.debug( + "_do_work on Thread %s: Dispatched span %s after its parent %s with FlowContext %s", + threading.get_ident(), + span_to_export.attributes, + trace_metadata["trace_parent_id"], + trace_metadata, + ) + else: + # Requeue the Span to be uploaded later + self._upload_queue.put((span_to_export, evaluation_context)) + self._upload_queue.task_done() + + def _export_span_dispatch(self, span: ReadableSpan) -> None: + hl_file = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY) + file_type = span._attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore + + if file_type == "prompt": + export_func = self._export_prompt + elif file_type == "tool": + export_func = self._export_tool + elif file_type == "flow": + export_func = self._export_flow + else: + raise NotImplementedError(f"Unknown span type: {hl_file}") + export_func(span=span) + + def _export_prompt(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_FILE_KEY, + ) + log_object: dict[str, Any] = read_from_opentelemetry_span( + span, + key=HUMANLOOP_LOG_KEY, + ) + # NOTE: Due to OTel conventions, attributes with value of None are removed + # If not present, instantiate as empty dictionary + if "inputs" not in log_object: + log_object["inputs"] = {} + if "messages" not in log_object: + log_object["messages"] = [] + if "tools" not in file_object["prompt"]: + file_object["prompt"]["tools"] = [] + trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id) + if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]: + trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]] + if trace_parent_id is None: + # Parent Log in Trace upload failed + file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY) + logger.error(f"Skipping log for {file_path}: parent Log upload failed") + return + else: + trace_parent_id = None + prompt: PromptKernelRequestParams = file_object["prompt"] + path: str = file_object["path"] + if "output" in log_object: + if not isinstance(log_object["output"], str): + # Output expected to be a string, if decorated function + # does not return one, jsonify it + log_object["output"] = json.dumps(log_object["output"]) + if "attributes" not in prompt or not prompt["attributes"]: + prompt["attributes"] = {} + try: + log_response = self._client.prompts.log( + path=path, + prompt=prompt, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id + except HumanloopApiError: + self._span_id_to_uploaded_log_id[span.context.span_id] = None + + def _export_tool(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY) + log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY) + trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {}) + if "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]: + trace_parent_id = self._span_id_to_uploaded_log_id.get( + trace_metadata["trace_parent_id"], + ) + if trace_parent_id is None: + # Parent Log in Trace upload failed + file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY) + logger.error(f"Skipping log for {file_path}: parent Log upload failed") + return + else: + trace_parent_id = None + tool = file_object["tool"] + if not tool.get("attributes"): + tool["attributes"] = {} + if not tool.get("setup_values"): + tool["setup_values"] = {} + path: str = file_object["path"] + if "parameters" in tool["function"] and "properties" not in tool["function"]["parameters"]: + tool["function"]["parameters"]["properties"] = {} + if not isinstance(log_object["output"], str): + # Output expected to be a string, if decorated function + # does not return one, jsonify it + log_object["output"] = json.dumps(log_object["output"]) + try: + log_response = self._client.tools.log( + path=path, + tool=tool, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id + except HumanloopApiError: + self._span_id_to_uploaded_log_id[span.context.span_id] = None + + def _export_flow(self, span: ReadableSpan) -> None: + file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY) + log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY) + trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get( + span.get_span_context().span_id, + {}, + ) + if "trace_parent_id" in trace_metadata: + trace_parent_id = self._span_id_to_uploaded_log_id.get( + trace_metadata["trace_parent_id"], # type: ignore + ) + if trace_parent_id is None and trace_metadata["trace_id"] != span.get_span_context().span_id: + # Parent Log in Trace upload failed + # NOTE: Check if the trace_id metadata field points to the + # span itself. This signifies the span is the head of the Trace + file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY) + logger.error(f"Skipping log for {file_path}: parent Log upload failed") + return + else: + trace_parent_id = None + flow: FlowKernelRequestParams + if not file_object.get("flow"): + flow = {"attributes": {}} + else: + flow = file_object["flow"] + path: str = file_object["path"] + if "output" not in log_object: + log_object["output"] = None + try: + log_response = self._client.flows.log( + path=path, + flow=flow, + **log_object, + trace_parent_id=trace_parent_id, + ) + self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id + except HumanloopApiError as e: + logger.error(str(e)) + self._span_id_to_uploaded_log_id[span.context.span_id] = None diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py new file mode 100644 index 00000000..d25a5674 --- /dev/null +++ b/src/humanloop/otel/helpers.py @@ -0,0 +1,301 @@ +import json +import uuid +from typing import Any, Callable, Union + +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.trace import SpanKind +from opentelemetry.util.types import AttributeValue + +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY + +NestedDict = dict[str, Union["NestedDict", AttributeValue]] +NestedList = list[Union["NestedList", NestedDict]] + + +def _list_to_otel_format(lst: NestedList) -> NestedDict: + """Transforms list of values to be written into a dictionary with index values as keys. + + When writing to OTel span attributes, only primitive values or lists are allowed. + Nested dictionaries must be linearised. For example, writing to span attribute `foo` + the dictionary value {'a': 7, 'b': 'hello'} would translated in the span attributes + dictionary to look like: + ```python + { + 'foo.a': 7, + 'foo.b': 'hello' + } + ``` + + Calling :func:`write_to_opentelemetry_span` with a list for write value will have + the list transformed into a pseudo-dictionary with index values as keys. + + Examples: + ```python + _list_to_ott([1, 2, 'a']) == {'0': 1, '1': 2, '2': 'a'} + _list_to_ott([ + "baz", + {'a': 6, 'b': 'hello'} + ]) == { + '0': 'baz', + '1.a': 6, + '1.b': 'hello' + } + ``` + """ + return {str(idx): val if not isinstance(val, list) else _list_to_otel_format(val) for idx, val in enumerate(lst)} + + +def write_to_opentelemetry_span( + span: ReadableSpan, + value: Union[NestedDict, NestedList, AttributeValue], + key: str = "", +) -> None: + """Write a Python object to the OpenTelemetry Span's attributes. Reverse of :func:`read_from_opentelemetry_span`. + + Note: OTel will complain about falsy values other then None, and keys with value set + to None will be silently dropped. Consider adding a placeholder value if the key should + be present in the span attributes. + + Example: + ```python + { + 'foo': { + 'a': 7, + 'b': 'hello' + }, + "baz": [42, 43] + } + + 1. Visit foo, push ('foo.a', 7), ('foo.b', 'hello') to stack + 2. Visit baz, push ('baz.0', 42), ('baz.1', 43) to stack + 3. Take each primitive key-value from the stack and write to the span attributes, + resulting in: + { + 'foo.a': 7, + 'foo.b': 'hello', + 'baz.0': 42, + 'baz.1': 43 + } + + :param span: OpenTelemetry span to write values to + + :param value: Python object to write to the span attributes. Can also be a primitive value. + + :param key: Key prefix to write to the span attributes. The path to the values does not need to exist in the span attributes. + ``` + """ + + to_write_copy: Union[dict, AttributeValue] + if isinstance(value, list): + to_write_copy = _list_to_otel_format(value) + else: + to_write_copy = dict(value) # type: ignore + linearised_attributes: dict[str, AttributeValue] = {} + work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)] + + # Remove all keys with the prefix to avoid duplicates + for attribute_key in span._attributes.keys(): # type: ignore + if attribute_key.startswith(key): + del span._attributes[attribute_key] # type: ignore + + while len(work_stack) > 0: + key, value = work_stack.pop() # type: ignore + if isinstance(value, dict): + for sub_key, sub_value in value.items(): + work_stack.append((f"{key}.{sub_key}" if key else sub_key, sub_value)) + elif isinstance(value, list): + # OTel does not allow lists of complex objects, so we linearise them + # by mapping each dict to an index key and recursing into the dict + for idx, list_value in enumerate(value): + work_stack.append((f"{key}.{idx}" if key else idx, list_value)) # type: ignore + else: + linearised_attributes[key] = value # type: ignore + for final_key, final_value in linearised_attributes.items(): + if final_value is not None: + span._attributes[final_key] = final_value # type: ignore + + +def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDict: + """Read a value from the OpenTelemetry span attributes. + + OpenTelemetry liniarises dictionaries and lists, storing only primitive values + in the span attributes. This function reconstructs the original structure from + a key prefix. + + :param span: OpenTelemetry span to read values from + :param key: Key prefix to read from the span attributes + + + Examples: + `span.attributes` contains the following attributes: + ```python + foo.x.y = 7 + foo.x.z.a = 'hello' + foo.x.z.b = 'world' + baz.0 = 42 + baz.1 = 43 + ``` + + `read_from_opentelemetry_span(span, key='foo')` returns: + ```python + { + 'x': { + 'y': 7, + 'z': { + 'a': 'hello', + 'b': 'world' + } + } + } + ``` + + `read_from_opentelemetry_span(span, key='foo.x')` returns: + ```python + { + 'y': 7, + 'z': { + 'a': 'hello', + 'b': 'world' + } + } + ``` + + `read_from_opentelemetry_span(span, key='baz')` returns: + ```python + [42, 43] + ``` + """ + if span._attributes is None: + raise ValueError("Span attributes are empty") + + result: dict[str, Union[dict, AttributeValue]] = {} + + to_process: list[tuple[str, Union[dict, AttributeValue]]] = [] + for span_key, span_value in span._attributes.items(): # type: ignore + if key == "": + # No key prefix, add to root + to_process.append((f"{key}.{span_key}", span_value)) + elif span_key.startswith(key): + # Remove the key prefix and the first dot + to_process.append((span_key, span_value)) + + if not to_process: + if key == "": + # Empty span attributes + return result + raise KeyError(f"Key {key} not found in span attributes") + + for span_key, span_value in to_process: # type: ignore + parts = span_key.split(".") + len_parts = len(parts) + sub_result: dict[str, Union[dict, AttributeValue]] = result + for idx, part in enumerate(parts): + # For each section of the key formatted like 'foo.bar.baz' + # allocate the final value 'baz' to the final dict + if idx == len_parts - 1: + # Final part of the key + sub_result[part] = span_value + else: + if part not in sub_result: + # Create new dict for a previously unseen part of the key + sub_result[part] = {} + sub_result = sub_result[part] # type: ignore + + def pseudo_to_list(sub_dict): + """Convert pseudo-dictionary to list if all keys are numeric. + + Conversion happens bottom up. + + Example: + ```python + { + '0': 'a', + '1': 'b', + '2': 'c' + } + + -> + + ['a', 'b', 'c'] + ``` + """ + if not isinstance(sub_dict, dict): + # Primitive value + return sub_dict + if isinstance(sub_dict, dict): + for key, value in sub_dict.items(): + # Recurse into keys + sub_dict[key] = pseudo_to_list(value) + if all(str.isnumeric(key) for key in sub_dict.keys()): + # If all keys are numeric, convert to list + return list(sub_dict.values()) + return sub_dict + + result = pseudo_to_list(result) + if "" in result: + # User read the root of attributes + return result[""] # type: ignore + + for part in key.split("."): + if str.isnumeric(part): + result = result[int(part)] # type: ignore + else: + result = result[part] # type: ignore + + return result + + +def is_llm_provider_call(span: ReadableSpan) -> bool: + """Determines if the span was created by an Instrumentor for LLM provider clients.""" + if not span.instrumentation_scope: + return False + span_instrumentor_name = span.instrumentation_scope.name + # Match against the prefix of the Instrumentor name since + # the name might be version dependent e.g. + # "opentelemetry.instrumentation.openai.v1" + return span.kind == SpanKind.CLIENT and any( + span_instrumentor_name.startswith(instrumentor) + for instrumentor in [ + "opentelemetry.instrumentation.openai", + "opentelemetry.instrumentation.groq", + "opentelemetry.instrumentation.anthropic", + "opentelemetry.instrumentation.cohere", + "opentelemetry.instrumentation.replicate", + ] + ) + + +def is_humanloop_span(span: ReadableSpan) -> bool: + """Check if the Span was created by the Humanloop SDK.""" + try: + # Valid spans will have keys with the HL_FILE_OT_KEY and HL_LOG_OT_KEY prefixes present + read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY) + read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY) + except KeyError: + return False + return True + + +def module_is_installed(module_name: str) -> bool: + """Returns true if the current Python environment has the module installed. + + Used to check if a library that is instrumentable exists in the current environment. + """ + try: + __import__(module_name) + except ImportError: + return False + return True + + +def generate_span_id() -> str: + return str(uuid.uuid4()) + + +def jsonify_if_not_string(func: Callable, output: Any) -> str: + if not isinstance(output, str): + try: + output = json.dumps(output) + except TypeError as e: + raise TypeError(f"Output of {func.__name__} must be a string or JSON serializable") from e + return output diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py new file mode 100644 index 00000000..5caa5c80 --- /dev/null +++ b/src/humanloop/otel/processor.py @@ -0,0 +1,168 @@ +import logging +from collections import defaultdict +from typing import Any + +# No typing stubs for parse +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter +from pydantic import ValidationError as PydanticValidationError + +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY +from humanloop.otel.helpers import ( + is_humanloop_span, + is_llm_provider_call, + read_from_opentelemetry_span, + write_to_opentelemetry_span, +) +from humanloop.types.prompt_kernel_request import PromptKernelRequest + +logger = logging.getLogger("humanloop.sdk") + + +class HumanloopSpanProcessor(SimpleSpanProcessor): + """Enrich Humanloop spans with data from their children spans. + + The decorators add Instrumentors to the OpenTelemetry TracerProvider + that log interactions with common LLM libraries. These Instrumentors + produce Spans which contain information that can be used to enrich the + Humanloop File Kernels. + + For example, Instrumentors for LLM provider libraries intercept + hyperparameters used in the API call to the model to build the + Prompt File definition when using the @prompt decorator. + + Spans created that are not created by Humanloop decorators, such as + those created by the Instrumentors mentioned above, will be passed + to the Exporter as they are. + """ + + def __init__(self, exporter: SpanExporter) -> None: + super().__init__(exporter) + # Span parent to Span children map + self._children: dict[int, list] = defaultdict(list) + + # NOTE: Could override on_start and process Flow spans ahead of time + # and PATCH the created Logs in on_end. A special type of ReadableSpan could be + # used for this + + def on_end(self, span: ReadableSpan) -> None: + if is_humanloop_span(span=span): + _process_span_dispatch(span, self._children[span.context.span_id]) + # Release the reference to the Spans as they've already + # been sent to the Exporter + del self._children[span.context.span_id] + else: + if span.parent is not None and _is_instrumentor_span(span): + # Copy the Span and keep it until the Humanloop Span + # arrives in order to enrich it + self._children[span.parent.span_id].append(span) + # Pass the Span to the Exporter + self.span_exporter.export([span]) + + +def _is_instrumentor_span(span: ReadableSpan) -> bool: + """Determine if the Span contains information of interest for Spans created by Humanloop decorators.""" + # At the moment we only enrich Spans created by the Prompt decorators + # As we add Instrumentors for other libraries, this function must + # be expanded + return is_llm_provider_call(span=span) + + +def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]): + file_type = span.attributes[HUMANLOOP_FILE_TYPE_KEY] # type: ignore + + # Processing common to all Humanloop File types + if span.start_time: + span._attributes[f"{HUMANLOOP_LOG_KEY}.start_time"] = int(span.start_time / 1e9) # type: ignore + if span.end_time: + span._attributes[f"{HUMANLOOP_LOG_KEY}.end_time"] = int(span.end_time / 1e9) # type: ignore + span._attributes[f"{HUMANLOOP_LOG_KEY}.created_at"] = int(span.end_time / 1e9) # type: ignore + + # Processing specific to each Humanloop File type + if file_type == "prompt": + _process_prompt(prompt_span=span, children_spans=children_spans) + return + elif file_type == "tool": + pass + elif file_type == "flow": + pass + else: + logger.error("Unknown Humanloop File Span %s", span) + + +def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan]): + if len(children_spans) == 0: + return + for child_span in children_spans: + if is_llm_provider_call(child_span): + _enrich_prompt_kernel(prompt_span, child_span) + _enrich_prompt_log(prompt_span, child_span) + # NOTE: @prompt decorator expects a single LLM provider call + # to happen in the function. If there are more than one, we + # ignore the rest + break + + +def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): + hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_FILE_KEY) + gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") + llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm") + + prompt: dict[str, Any] = hl_file.get("prompt", {}) # type: ignore + + # Check if the Prompt Kernel keys were assigned default values + # via the @prompt arguments. Otherwise, use the information + # from the intercepted LLM provider call + prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None) + if prompt["model"] is None: + raise ValueError("Could not infer required parameter `model`. Please provide it in the @prompt decorator.") + prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type") + prompt["provider"] = prompt.get("provider") or gen_ai_object.get("system", None) + if prompt["provider"]: + # Normalize provider name; Interceptors output the names with + # different capitalization e.g. OpenAI instead of openai + prompt["provider"] = prompt["provider"].lower() + prompt["temperature"] = prompt.get("temperature") or gen_ai_object.get("request", {}).get("temperature", None) + prompt["top_p"] = prompt.get("top_p") or gen_ai_object.get("request", {}).get("top_p", None) + prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None) + prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None) + prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None) + prompt["tools"] = prompt.get("tools", []) + + try: + # Validate the Prompt Kernel + PromptKernelRequest.model_validate(obj=prompt) + except PydanticValidationError as e: + logger.error("Could not validate Prompt Kernel extracted from Span: %s", e) + + # Write the enriched Prompt Kernel back to the Span + hl_file["prompt"] = prompt + write_to_opentelemetry_span( + span=prompt_span, + key=HUMANLOOP_FILE_KEY, + # hl_file was modified in place via prompt_kernel reference + value=hl_file, + ) + + +def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan): + try: + hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_LOG_KEY) + except KeyError: + hl_log = {} + gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai") + + # TODO: Seed not added by Instrumentors in provider call + + if "output_tokens" not in hl_log: + hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens") + if len(gen_ai_object.get("completion", [])) > 0: + hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason") + hl_log["messages"] = gen_ai_object.get("prompt") + + write_to_opentelemetry_span( + span=prompt_span, + key=HUMANLOOP_LOG_KEY, + # hl_log was modified in place + value=hl_log, + ) diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py index 22e2747f..88cfa117 100644 --- a/src/humanloop/prompts/client.py +++ b/src/humanloop/prompts/client.py @@ -232,7 +232,7 @@ def log( messages=[{"role": "user", "content": "What really happened at Roswell?"}], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-19 00:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -2097,7 +2097,7 @@ async def main() -> None: ], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-19 00:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ diff --git a/src/humanloop/requests/create_datapoint_request.py b/src/humanloop/requests/create_datapoint_request.py index 8e9d5005..d9e2e564 100644 --- a/src/humanloop/requests/create_datapoint_request.py +++ b/src/humanloop/requests/create_datapoint_request.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions import typing_extensions import typing from .chat_message import ChatMessageParams diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py index 60b7753a..8e727f19 100644 --- a/src/humanloop/requests/flow_response.py +++ b/src/humanloop/requests/flow_response.py @@ -27,7 +27,7 @@ class FlowResponseParams(typing_extensions.TypedDict): id: str """ - Unique identifier for the Flow. Starts with fl\_. + Unique identifier for the Flow. Starts with fl_. """ directory_id: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py index e9127722..5491ee6d 100644 --- a/src/humanloop/requests/run_stats_response.py +++ b/src/humanloop/requests/run_stats_response.py @@ -1,9 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions import typing_extensions import typing -from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams +from .run_stats_response_evaluator_stats_item import ( + RunStatsResponseEvaluatorStatsItemParams, +) from ..types.evaluation_status import EvaluationStatus diff --git a/src/humanloop/requests/tool_kernel_request.py b/src/humanloop/requests/tool_kernel_request.py index 6973c1d0..bd0cd783 100644 --- a/src/humanloop/requests/tool_kernel_request.py +++ b/src/humanloop/requests/tool_kernel_request.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions import typing_extensions from .tool_function import ToolFunctionParams import typing diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py index 32ff5b40..2cbbb5e4 100644 --- a/src/humanloop/types/evaluation_log_response.py +++ b/src/humanloop/types/evaluation_log_response.py @@ -43,7 +43,9 @@ class EvaluationLogResponse(UncheckedBaseModel): """ if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 else: class Config: @@ -56,10 +58,14 @@ class Config: update_forward_refs(EvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(FlowLogResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(FlowResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse) +update_forward_refs( + MonitoringEvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse +) update_forward_refs(PromptLogResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(PromptResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(ToolLogResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(ToolResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationLogResponse=EvaluationLogResponse) +update_forward_refs( + VersionDeploymentResponse, EvaluationLogResponse=EvaluationLogResponse +) update_forward_refs(VersionIdResponse, EvaluationLogResponse=EvaluationLogResponse) diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py index 874782a1..5f8c0254 100644 --- a/src/humanloop/types/flow_response.py +++ b/src/humanloop/types/flow_response.py @@ -25,7 +25,7 @@ class FlowResponse(UncheckedBaseModel): id: str = pydantic.Field() """ - Unique identifier for the Flow. Starts with fl\_. + Unique identifier for the Flow. Starts with fl_. """ directory_id: typing.Optional[str] = pydantic.Field(default=None) diff --git a/src/humanloop/types/overall_stats.py b/src/humanloop/types/overall_stats.py index c3753321..7b0c35aa 100644 --- a/src/humanloop/types/overall_stats.py +++ b/src/humanloop/types/overall_stats.py @@ -33,7 +33,9 @@ class OverallStats(UncheckedBaseModel): """ if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 else: class Config: diff --git a/src/humanloop/types/run_stats_response.py b/src/humanloop/types/run_stats_response.py index dbc1be73..86f91f89 100644 --- a/src/humanloop/types/run_stats_response.py +++ b/src/humanloop/types/run_stats_response.py @@ -44,7 +44,9 @@ class RunStatsResponse(UncheckedBaseModel): """ if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( + extra="allow", frozen=True + ) # type: ignore # Pydantic v2 else: class Config: diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..5e626b39 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,151 @@ +from typing import Generator +from unittest.mock import MagicMock + +import pytest +from humanloop.otel.exporter import HumanloopSpanExporter +from humanloop.otel.processor import HumanloopSpanProcessor +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor +from opentelemetry.instrumentation.cohere import CohereInstrumentor +from opentelemetry.instrumentation.groq import GroqInstrumentor +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor # type: ignore +from opentelemetry.instrumentation.openai import OpenAIInstrumentor +from opentelemetry.instrumentation.replicate import ReplicateInstrumentor +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.trace import Tracer + + +@pytest.fixture(scope="function") +def opentelemetry_test_provider() -> TracerProvider: + """Create a test TracerProvider with a resource. + + This is similar to the created TracerProvider in the + Humanloop class. + """ + provider = TracerProvider( + resource=Resource.create( + { + "service": "humanloop.sdk", + "environment": "test", + } + ) + ) + return provider + + +@pytest.fixture(scope="function") +def test_span(opentelemetry_test_provider: TracerProvider): + exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(exporter) + opentelemetry_test_provider.add_span_processor(processor) + tracer = opentelemetry_test_provider.get_tracer("test") + return tracer.start_span("test_span") + + +@pytest.fixture(scope="function") +def opentelemetry_test_configuration( + opentelemetry_test_provider: TracerProvider, +) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]: + """Configure OTel backend without HumanloopSpanProcessor. + + Spans created by Instrumentors will not be used to enrich + Humanloop Spans. + """ + exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(exporter) + opentelemetry_test_provider.add_span_processor(processor) + instrumentors: list[BaseInstrumentor] = [ + OpenAIInstrumentor(), + AnthropicInstrumentor(), + GroqInstrumentor(), + CohereInstrumentor(), + ReplicateInstrumentor(), + ] + for instrumentor in instrumentors: + instrumentor.instrument(tracer_provider=opentelemetry_test_provider) + tracer = opentelemetry_test_provider.get_tracer("test") + # Circumvent configuration procedure + + yield tracer, exporter + + for instrumentor in instrumentors: + instrumentor.uninstrument() + + +@pytest.fixture(scope="function") +def opentelemetry_hl_test_configuration( + opentelemetry_test_provider: TracerProvider, +) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]: + """Configure OTel backend with HumanloopSpanProcessor. + + Spans created by Instrumentors will be used to enrich + Humanloop Spans. + """ + exporter = InMemorySpanExporter() + processor = HumanloopSpanProcessor(exporter=exporter) + opentelemetry_test_provider.add_span_processor(processor) + instrumentors: list[BaseInstrumentor] = [ + OpenAIInstrumentor(), + AnthropicInstrumentor(), + GroqInstrumentor(), + CohereInstrumentor(), + ReplicateInstrumentor(), + AnthropicInstrumentor(), + ] + for instrumentor in instrumentors: + instrumentor.instrument( + tracer_provider=opentelemetry_test_provider, + ) + tracer = opentelemetry_test_provider.get_tracer("test") + + yield tracer, exporter + + for instrumentor in instrumentors: + instrumentor.uninstrument() + + +@pytest.fixture(scope="function") +def hl_test_exporter() -> HumanloopSpanExporter: + """ + Test Exporter where HTTP calls to Humanloop API + are mocked. + """ + client = MagicMock() + exporter = HumanloopSpanExporter(client=client) + return exporter + + +@pytest.fixture(scope="function") +def opentelemetry_hl_with_exporter_test_configuration( + hl_test_exporter: HumanloopSpanExporter, + opentelemetry_test_provider: TracerProvider, +) -> Generator[tuple[Tracer, HumanloopSpanExporter], None, None]: + """Configure OTel backend with HumanloopSpanProcessor and + a HumanloopSpanExporter where HTTP calls are mocked. + """ + processor = HumanloopSpanProcessor(exporter=hl_test_exporter) + opentelemetry_test_provider.add_span_processor(processor) + instrumentor = OpenAIInstrumentor() + instrumentor.instrument(tracer_provider=opentelemetry_test_provider) + tracer = opentelemetry_test_provider.get_tracer("test") + + yield tracer, hl_test_exporter + + instrumentor.uninstrument() + + +@pytest.fixture(scope="session") +def call_llm_messages() -> list[ChatCompletionMessageParam]: + return [ + { + "role": "system", + "content": "You are an assistant on the following topics: greetings in foreign languages.", + }, + { + "role": "user", + "content": "Bonjour!", + }, + ] diff --git a/tests/decorators/__init__.py b/tests/decorators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py new file mode 100644 index 00000000..09a769f6 --- /dev/null +++ b/tests/decorators/test_flow_decorator.py @@ -0,0 +1,298 @@ +import os +import random +import string +import time +from unittest.mock import patch + +import pytest +from humanloop.decorators.flow import flow +from humanloop.decorators.prompt import prompt +from humanloop.decorators.tool import tool +from humanloop.otel import TRACE_FLOW_CONTEXT +from humanloop.otel.constants import HUMANLOOP_FILE_KEY +from humanloop.otel.exporter import HumanloopSpanExporter +from humanloop.otel.helpers import read_from_opentelemetry_span +from openai import OpenAI +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter + + +def _test_scenario( + opentelemetry_tracer: Tracer, +): + @tool(opentelemetry_tracer=opentelemetry_tracer) + def _random_string() -> str: + """Return a random string.""" + return "".join( + random.choices( + string.ascii_letters + string.digits, + k=10, + ) + ) + + @prompt( # type: ignore + opentelemetry_tracer=opentelemetry_tracer, + path=None, + template="You are an assistant on the following topics: {topics}.", + ) + def _call_llm(messages: list[ChatCompletionMessageParam]) -> str: + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + return ( + client.chat.completions.create( + model="gpt-4o", + messages=messages, + temperature=0.8, + ) + .choices[0] + .message.content + ) + _random_string() + + @flow( + opentelemetry_tracer=opentelemetry_tracer, + attributes={"foo": "bar", "baz": 7}, + ) + def _agent_call(messages: list[dict]) -> str: + return _call_llm(messages=messages) + + @flow( # type: ignore + opentelemetry_tracer=opentelemetry_tracer, + ) + def _flow_over_flow(messages: list[dict]) -> str: + return _agent_call(messages=messages) + + return _random_string, _call_llm, _agent_call, _flow_over_flow + + +def test_decorators_without_flow( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_hl_test_configuration + + _call_llm = _test_scenario(tracer)[1] + + # GIVEN a call to @prompt annotated function that calls a @tool + _call_llm( + [ + { + "role": "system", + "content": "You are an assistant on the following topics: greetings in foreign languages.", + }, + { + "role": "user", + "content": "Hello, how are you?", + }, + ] + ) + # WHEN exporting the spans + spans = exporter.get_finished_spans() + # THEN 3 spans arrive at the exporter in the following order: + # 0. Intercepted OpenAI call, which is ignored by the exporter + # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes) + # 2. Prompt Span + assert len(spans) == 3 + assert read_from_opentelemetry_span( + span=spans[1], + key=HUMANLOOP_FILE_KEY, + )["tool"] + assert read_from_opentelemetry_span( + span=spans[2], + key=HUMANLOOP_FILE_KEY, + )["prompt"] + for span in spans: + # THEN no metadata related to trace is present on either of them + assert TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id) is None + + +def test_decorators_with_flow_decorator( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN a @flow entrypoint to an instrumented application + tracer, exporter = opentelemetry_hl_test_configuration + + _agent_call = _test_scenario(tracer)[2] + + # WHEN calling the Flow + _agent_call( + [ + { + "role": "system", + "content": "You are an assistant on the following topics: greetings in foreign languages.", + }, + { + "role": "user", + "content": "Hello, how are you?", + }, + ] + ) + # THEN 4 spans arrive at the exporter in the following order: + # 0. Intercepted OpenAI call, which is ignored by the exporter + # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes) + # 2. Prompt Span + # 3. Flow Span + spans = exporter.get_finished_spans() + assert len(spans) == 4 + # THEN the span are returned bottom to top + assert read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["tool"] + assert read_from_opentelemetry_span(span=spans[2], key=HUMANLOOP_FILE_KEY)["prompt"] + # assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"] + assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)) + assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)) + assert (flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)) + # THEN Tool span is a child of Prompt span + assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id + assert tool_trace_metadata["is_flow_log"] is False + assert prompt_trace_metadata["trace_parent_id"] == spans[3].context.span_id + # THEN Prompt span is a child of Flow span + assert prompt_trace_metadata["is_flow_log"] is False + assert flow_trace_metadata["is_flow_log"] + assert flow_trace_metadata["trace_id"] == spans[3].context.span_id + + +def test_flow_decorator_flow_in_flow( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[dict], +): + # GIVEN A configured OpenTelemetry tracer and exporter + tracer, exporter = opentelemetry_hl_test_configuration + + _flow_over_flow = _test_scenario(tracer)[3] + + # WHEN Calling the _test_flow_in_flow function with specific messages + _flow_over_flow(call_llm_messages) + + # THEN 5 spans are arrive at the exporter in the following order: + # 0. Intercepted OpenAI call, which is ignored by the exporter + # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes) + # 2. Prompt Span + # 3. Nested Flow Span + # 4. Flow Span + spans = exporter.get_finished_spans() + assert len(spans) == 5 + assert read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["tool"] + assert read_from_opentelemetry_span(span=spans[2], key=HUMANLOOP_FILE_KEY)["prompt"] + assert read_from_opentelemetry_span(span=spans[3], key=HUMANLOOP_FILE_KEY)["flow"] != {} + with pytest.raises(KeyError): + read_from_opentelemetry_span(span=spans[4], key=HUMANLOOP_FILE_KEY)["flow"] != {} + + assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)) + assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)) + assert (nested_flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)) + assert (flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[4].get_span_context().span_id)) + # THEN the parent of the Tool Log is the Prompt Log + assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id + assert tool_trace_metadata["is_flow_log"] is False + # THEN the parent of the Prompt Log is the Flow Log + assert prompt_trace_metadata["trace_parent_id"] == spans[3].context.span_id + assert prompt_trace_metadata["is_flow_log"] is False + # THEN the nested Flow Log creates a new trace + assert nested_flow_trace_metadata["trace_id"] == spans[3].context.span_id + assert nested_flow_trace_metadata["is_flow_log"] + # THEN the parent of the nested Flow Log is the upper Flow Log + assert nested_flow_trace_metadata["trace_parent_id"] == spans[4].context.span_id + # THEN the parent Flow Log correctly points to itself + assert flow_trace_metadata["trace_id"] == spans[4].context.span_id + assert flow_trace_metadata["is_flow_log"] + + +def test_flow_decorator_with_hl_exporter( + call_llm_messages: list[dict], + opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], +): + # NOTE: type ignore comments are caused by the MagicMock used to mock _client + # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter + tracer, exporter = opentelemetry_hl_with_exporter_test_configuration + + _agent_call = _test_scenario(tracer)[2] + + with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: + # WHEN calling the @flow decorated function + _agent_call(call_llm_messages) + + # Exporter is threaded, need to wait threads shutdown + time.sleep(3) + + # THEN 4 spans are arrive at the exporter in the following order: + # 0. Intercepted OpenAI call, which is ignored by the exporter + # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes) + # 2. Prompt Span + # 3. Flow Span + assert len(mock_export_method.call_args_list) == 4 + + tool_span = mock_export_method.call_args_list[1][0][0][0] + prompt_span = mock_export_method.call_args_list[2][0][0][0] + flow_span = mock_export_method.call_args_list[3][0][0][0] + # THEN the last uploaded span is the Flow + assert read_from_opentelemetry_span( + span=flow_span, + key=HUMANLOOP_FILE_KEY, + )["flow"]["attributes"] == { # type: ignore[index,call-overload] + "foo": "bar", + "baz": 7, + } + # THEN the second uploaded span is the Prompt + assert "prompt" in read_from_opentelemetry_span( + span=prompt_span, + key=HUMANLOOP_FILE_KEY, + ) + # THEN the first uploaded span is the Tool + assert "tool" in read_from_opentelemetry_span( + span=tool_span, + key=HUMANLOOP_FILE_KEY, + ) + + # NOTE: The type: ignore comments are caused by the MagicMock used to mock the HTTP client + + # THEN the first Log uploaded is the Flow + first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore + assert "flow" in first_log + exporter._client.flows.log.assert_called_once() # type: ignore + flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore + assert flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7} + flow_log_id = exporter._client.flows.log.return_value.id # type: ignore + + # THEN the second Log uploaded is the Prompt + exporter._client.prompts.log.assert_called_once() # type: ignore + prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore + assert prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id + assert prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8 + prompt_log_id = exporter._client.prompts.log.return_value.id # type: ignore + + # THEN the final Log uploaded is the Tool + exporter._client.tools.log.assert_called_once() # type: ignore + tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore + assert tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id + + +def test_flow_decorator_hl_exporter_flow_inside_flow( + call_llm_messages: list[dict], + opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter], +): + # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter + tracer, exporter = opentelemetry_hl_with_exporter_test_configuration + + _flow_over_flow = _test_scenario(tracer)[3] + + with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method: + # WHEN calling the @flow decorated function + _flow_over_flow(call_llm_messages) + + # Exporter is threaded, need to wait threads shutdown + time.sleep(3) + + # THEN 5 spans are arrive at the exporter in the following order: + # 0. Intercepted OpenAI call, which is ignored by the exporter + # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes) + # 2. Prompt Span + # 3. Nested Flow Span + # 4. Flow Span + assert len(mock_export_method.call_args_list) == 5 + # THEN the last uploaded span is the larger Flow + # THEN the second to last uploaded span is the nested Flow + flow_span = mock_export_method.call_args_list[4][0][0][0] + nested_flow_span = mock_export_method.call_args_list[3][0][0][0] + assert (last_span_flow_metadata := TRACE_FLOW_CONTEXT.get(flow_span.get_span_context().span_id)) + assert (flow_span_flow_metadata := TRACE_FLOW_CONTEXT.get(nested_flow_span.get_span_context().span_id)) + assert flow_span_flow_metadata["trace_parent_id"] == flow_span.context.span_id + assert last_span_flow_metadata["is_flow_log"] + assert flow_span_flow_metadata["is_flow_log"] diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py new file mode 100644 index 00000000..4da40358 --- /dev/null +++ b/tests/decorators/test_prompt_decorator.py @@ -0,0 +1,292 @@ +import os +from typing import Optional + +import cohere +import pytest + +# replicate has no typing stubs +import replicate # type: ignore +from anthropic import Anthropic +from anthropic.types.message_param import MessageParam +from dotenv import load_dotenv +from groq import Groq +from groq import NotFoundError as GroqNotFoundError +from humanloop.decorators.prompt import prompt +from humanloop.otel.constants import HUMANLOOP_FILE_KEY +from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span +from humanloop.types.model_providers import ModelProviders +from humanloop.types.prompt_kernel_request import PromptKernelRequest +from openai import OpenAI +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter + +# replicate has no typing stubs, ruff wants this import placed here +from replicate.exceptions import ModelError as ReplicateModelError # type: ignore + +_PROVIDER_AND_MODEL = [ + ("openai", "gpt-4o"), + ("groq", "llama3-8b-8192"), + ("cohere", "command"), + ("replicate", "meta/meta-llama-3-8b-instruct"), + ("anthropic", "claude-3-opus-latest"), +] + + +def _test_scenario(opentelemetry_tracer: Tracer, **kwargs): + """ + Set up the function decorated with @prompt. + + Normally the opentelemetry_tracer would be passed in by the Humanloop client. + In a test environment, the Tracer is obtained from a fixture and the test + call this function to setup the decorated function that is tested. + """ + + @prompt(opentelemetry_tracer=opentelemetry_tracer, **kwargs) + def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]: + load_dotenv() + if provider == "openai": + # NOTE: These tests check if instrumentors are capable of intercepting OpenAI + # provider calls. Could not find a way to intercept them coming from a Mock. + client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore + return ( + client.chat.completions.create( + model=model, + messages=messages, # type: ignore + temperature=0.8, + ) + .choices[0] + .message.content + ) + if provider == "anthropic": + client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore + messages_anthropic_format = [ + MessageParam( + content=message["content"], + role="user" if message["role"] in ("user", "system") else "assistant", + ) + for message in messages + ] + return ( + client.messages.create( # type: ignore + model=model, + messages=messages_anthropic_format, + max_tokens=200, + temperature=0.8, + ) + .content[0] + .text + ) + if provider == "groq": + try: + client = Groq( # type: ignore + # This is the default and can be omitted + api_key=os.environ.get("GROQ_API_KEY"), + ) + return ( + client.chat.completions.create( + messages=messages, # type: ignore + model=model, + temperature=0.8, + ) + .choices[0] + .message.content + ) + except GroqNotFoundError: + # NOTE: Tests in this file are integration tests that rely on live LLM provider + # clients. If a test fails, it might be flaky. If this happens, consider adding + # a skip mechanism similar to Groq + pytest.skip("GROQ not available") + if provider == "cohere": + client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore + messages_cohere_format: list[cohere.Message] = [] + for message in messages: + if message["role"] == "system": + messages_cohere_format.append(cohere.SystemMessage(message=message["content"])) + elif message["role"] == "user": + messages_cohere_format.append(cohere.UserMessage(message=message["content"])) + elif message["role"] == "assistant": + messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"])) + return client.chat( # type: ignore + chat_history=messages_cohere_format, + model=model, + max_tokens=200, + message=messages[-1]["content"], + temperature=0.8, + ).text + if provider == "replicate": + # TODO: Instrumentor only picks up methods on module-level, not client level + # This should be documented somewhere or changed + replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY") + try: + output = "" + for event in replicate.run( + model, + input={ + "prompt": messages[0]["content"] + " " + messages[-1]["content"], + "temperature": 0.8, + }, + ): + output += str(event) + except ReplicateModelError: + pytest.skip("Replicate not available") + if not output: + pytest.skip("Replicate not available") + return output + raise ValueError(f"Unknown provider: {provider}") + + return _call_llm_base + + +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator( + provider_model: tuple[str, str], + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration without HumanloopSpanProcessor + tracer, exporter = opentelemetry_test_configuration + # WHEN using the Prompt decorator + + call_llm = _test_scenario(tracer) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt + spans = exporter.get_finished_spans() + assert len(spans) == 2 + assert not is_humanloop_span(span=spans[0]) + assert is_humanloop_span(span=spans[1]) + # THEN the Prompt span is not enhanced with information from the LLM provider + assert is_humanloop_span(spans[1]) + # THEN no information is added to the Prompt span without the HumanloopSpanProcessor + assert spans[1].attributes.get("prompt") is None # type: ignore + + +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator_with_hl_processor( + provider_model: tuple[str, str], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor + tracer, exporter = opentelemetry_hl_test_configuration + # WHEN using the Prompt decorator + + call_llm = _test_scenario(opentelemetry_tracer=tracer) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt + spans = exporter.get_finished_spans() + assert len(spans) == 2 + assert not is_humanloop_span(span=spans[0]) + assert is_humanloop_span(span=spans[1]) + # THEN the Prompt span is enhanced with information and forms a correct PromptKernel + prompt_kernel = PromptKernelRequest.model_validate( + read_from_opentelemetry_span( + span=spans[1], + key=HUMANLOOP_FILE_KEY, + )["prompt"] # type: ignore + ) + # THEN temperature is intercepted from LLM provider call + assert prompt_kernel.temperature == 0.8 + # THEN the provider intercepted from LLM provider call + assert prompt_kernel.provider == provider + # THEN model is intercepted from LLM provider call + assert prompt_kernel.model == model + # THEN top_p is not present since it's not present in the LLM provider call + assert prompt_kernel.top_p is None + + +@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL) +def test_prompt_decorator_with_defaults( + provider_model: tuple[str, str], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], + call_llm_messages: list[ChatCompletionMessageParam], +): + provider, model = provider_model + # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor + tracer, exporter = opentelemetry_hl_test_configuration + # WHEN using the Prompt decorator with default values + + call_llm = _test_scenario( + opentelemetry_tracer=tracer, + temperature=0.9, + top_p=0.1, + template="You are an assistant on the following topics: {topics}.", + path=None, + ) + + call_llm( + provider=provider, + model=model, + messages=call_llm_messages, + ) + spans = exporter.get_finished_spans() + # THEN the Prompt span is enhanced with information and forms a correct PromptKernel + prompt = PromptKernelRequest.model_validate( + read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["prompt"] # type: ignore + ) + # THEN temperature intercepted from LLM provider call is overridden by default value + assert prompt.temperature == 0.9 + # THEN top_p is taken from decorator default value + assert prompt.top_p == 0.1 + # THEN the provider intercepted from LLM provider call + assert prompt.model == model + + +@pytest.mark.parametrize( + "attributes_test_expected", + [ + ( + {"foo": "bar"}, + {"foo": "bar"}, + ), + ( + {}, + None, + ), + ( + None, + None, + ), + ], +) +def test_prompt_attributes( + attributes_test_expected: tuple[dict[str, str], dict[str, str]], + call_llm_messages: list[ChatCompletionMessageParam], + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + test_attributes, expected_attributes = attributes_test_expected + tracer, exporter = opentelemetry_hl_test_configuration + + call_llm = _test_scenario( + opentelemetry_tracer=tracer, + path=None, + attributes=test_attributes, + ) + + call_llm( + provider="openai", + model="gpt-4o", + messages=call_llm_messages, + ) + + assert len(exporter.get_finished_spans()) == 2 + + prompt_kernel = PromptKernelRequest.model_validate( + read_from_opentelemetry_span( + span=exporter.get_finished_spans()[1], + key=HUMANLOOP_FILE_KEY, + )["prompt"] # type: ignore + ) + assert prompt_kernel.attributes == expected_attributes diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py new file mode 100644 index 00000000..34b330ef --- /dev/null +++ b/tests/decorators/test_tool_decorator.py @@ -0,0 +1,568 @@ +import sys +from typing import Any, Optional, TypedDict, Union + +import pytest +from humanloop.decorators.tool import tool +from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY +from humanloop.otel.helpers import read_from_opentelemetry_span +from jsonschema.protocols import Validator +from opentelemetry.sdk.trace import Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter + + +def test_calculator_decorator( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN a test OpenTelemetry configuration + tracer, exporter = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + # WHEN calling the @tool decorated function + result = calculator(operation="add", num1=1, num2=2) + assert result == 3 + # THEN a single span is created and the log and file attributes are correctly set + spans = exporter.get_finished_spans() + assert len(spans) == 1 + hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_FILE_KEY) + hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_LOG_KEY) + assert hl_log["output"] == str(result) == "3" + assert hl_log["inputs"] == { + "operation": "add", + "num1": 1, + "num2": 2, + } + assert hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers." + # TODO: pydantic is inconsistent by dumping either tuple or list + assert calculator.json_schema == hl_file["tool"]["function"] + + Validator.check_schema(calculator.json_schema) + + +def test_union_type(opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter]): + tracer, _ = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def foo(a: Union[int, float], b: float) -> float: + return a + b + + assert foo.json_schema["parameters"]["properties"]["a"] == { + "anyOf": [ + {"type": "integer"}, + {"type": "number"}, + ] + } + assert foo.json_schema["parameters"]["properties"]["b"] == {"type": "number"} + assert foo.json_schema["parameters"]["required"] == ("a", "b") + + Validator.check_schema(foo.json_schema) + + +def test_not_required_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_test_configuration + + @tool(opentelemetry_tracer=tracer) + def test_calculator(a: Optional[float], b: float) -> float: + if a is None: + a = 0 + return a + b + + assert test_calculator(3, 4) == 7 + assert len(exporter.get_finished_spans()) == 1 + assert test_calculator.json_schema["parameters"]["properties"]["a"] == { + "type": ["number", "null"], + } + + Validator.check_schema(test_calculator.json_schema) + + +def test_no_annotation_on_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b) -> float: + if a is None: + a = 0 + return a + b + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` is of `any` type + # NOTE: JSONSchema dropped support for 'any' type, we include all types + # as a workaround + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_dict_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: dict) -> float: + if a is None: + a = 0 + return a + b["c"] + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + # on both keys and values + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": "object", + "properties": { + "key": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + "value": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_list_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: Optional[list]) -> float: + if a is None: + a = 0 + sum = a + if b is None: + return sum + for val in b: + sum += val + return sum + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": ["array", "null"], + "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + "required": (), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + +def test_tuple_annotation_no_sub_types( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on a parameter + @tool(opentelemetry_tracer=tracer) + def calculator(a: Optional[float], b: Optional[tuple]) -> float: + if a is None: + a = 0 + sum = a + if b is None: + return sum + for val in b: + sum += val + return sum + + # WHEN building the Tool kernel + # THEN the JSON schema is correctly built and `b` accepts any type + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": ["number", "null"]}, + "b": { + "type": ["array", "null"], + "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + }, + "required": (), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + +def test_function_without_return_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool and without type hint on the return value + # WHEN building the Tool kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: Optional[float], b: float) -> float: + """Add two numbers.""" + if a is None: + a = 0 + return a + b + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_list_annotation_parameter( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, exporter = opentelemetry_test_configuration + + # WHEN defining a tool with a list parameter + @tool(opentelemetry_tracer=tracer) + def foo(to_join: list[str]) -> str: + return " ".join(to_join) + + assert "a b c" == foo(to_join=["a", "b", "c"]) + + # THEN the function call results in a Span + assert len(exporter.get_finished_spans()) == 1 + # THEN the argument is correctly described in the JSON schema + assert foo.json_schema["parameters"]["properties"]["to_join"] == { # type: ignore + "type": "array", + "items": {"type": "string"}, + } + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_list_in_list_parameter_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a list of lists parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def nested_plain_join(to_join: list[list[str]]): + return " ".join([val for sub_list in to_join for val in sub_list]) + + # THEN the JSON schema is correctly built and parameter is correctly described + assert nested_plain_join.json_schema["parameters"]["properties"]["to_join"] == { + "type": "array", + "items": { + "type": "array", + "items": {"type": "string"}, + }, + } + + # THEN the JSONSchema is valid + Validator.check_schema(nested_plain_join.json_schema) + + +def test_complex_dict_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a dictionary parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: dict[Union[int, str], list[str]]): + return a + + # THEN the parameter is correctly described + assert foo.json_schema["parameters"]["properties"]["a"] == { + "type": "object", + "properties": { + "key": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "value": {"type": "array", "items": {"type": "string"}}, + }, + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_tuple_annotation( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition with a tuple parameter + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(a: Optional[tuple[int, Optional[str], float]]): + return a + + # THEN the parameter is correctly described + assert foo.json_schema["parameters"]["properties"]["a"] == { + "type": ["array", "null"], + "items": [ + {"type": "integer"}, + {"type": ["string", "null"]}, + {"type": "number"}, + ], + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_tool_no_args( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a tool definition without arguments + # WHEN building the Tool Kernel + @tool(opentelemetry_tracer=tracer) + def foo(): + return 42 + + # THEN the JSON schema is correctly built + assert foo.json_schema == { + "description": "", + "name": "foo", + "parameters": { + "properties": {}, + "required": [], + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + # THEN the JSONSchema is valid + Validator.check_schema(foo.json_schema) + + +def test_custom_types_throws( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a user-defined type + class Foo(TypedDict): + a: int # type: ignore + b: int # type: ignore + + # WHEN defining a tool with a parameter of that type + with pytest.raises(ValueError) as exc: + + @tool(opentelemetry_tracer=tracer) + def foo_bar(foo: Foo): + return foo.a + foo.b # type: ignore + + # THEN a ValueError is raised + assert exc.value.args[0].startswith("Error parsing signature of @tool annotated function foo_bar") + + +def test_tool_as_higher_order_function( + opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + tracer, exporter = opentelemetry_hl_test_configuration + + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + higher_order_fn_tool = tool(opentelemetry_tracer=tracer)(calculator) + + @tool(opentelemetry_tracer=tracer) # type: ignore + def calculator(operation: str, num1: float, num2: float) -> float: + """Do arithmetic operations on two numbers.""" + if operation == "add": + return num1 + num2 + elif operation == "subtract": + return num1 - num2 + elif operation == "multiply": + return num1 * num2 + elif operation == "divide": + return num1 / num2 + else: + raise ValueError(f"Invalid operation: {operation}") + + higher_order_fn_tool(operation="add", num1=1, num2=2) + calculator(operation="add", num1=1, num2=2) + + assert len(spans := exporter.get_finished_spans()) == 2 + + hl_file_higher_order_fn = read_from_opentelemetry_span( + span=spans[0], + key=HUMANLOOP_FILE_KEY, + ) + hl_file_decorated_fn = read_from_opentelemetry_span( + span=spans[1], + key=HUMANLOOP_FILE_KEY, + ) + assert hl_file_higher_order_fn["tool"]["source_code"] == hl_file_decorated_fn["tool"]["source_code"] # type: ignore + + +def test_python310_syntax( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + if sys.version_info < (3, 10): + pytest.skip("Requires Python 3.10") + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `|` for Optional + @tool(opentelemetry_tracer=tracer) + def calculator(a: float, b: float | None = None) -> float: + # NOTE: dummy function, only testing its signature not correctness + if a is None: + a = 0 + return a + b # type: ignore + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": "number"}, + "b": {"type": ["number", "null"]}, + }, + "required": ("a",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_python310_union_syntax( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + if sys.version_info < (3, 10): + pytest.skip("Requires Python 3.10") + + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `|` for Union + @tool(opentelemetry_tracer=tracer) + def calculator(a: float, b: float | int | str) -> float: + # NOTE: dummy function, only testing its signature not correctness + return a + b # type: ignore + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + "a": {"type": "number"}, + "b": {"anyOf": [{"type": "number"}, {"type": "integer"}, {"type": "string"}]}, + }, + "required": ("a", "b"), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } + + Validator.check_schema(calculator.json_schema) + + +def test_python_list_ellipsis( + opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter], +): + if sys.version_info < (3, 10): + pytest.skip("Requires Python 3.10") + # GIVEN an OTel configuration + tracer, _ = opentelemetry_test_configuration + + # GIVEN a function annotated with @tool where a parameter uses `...` + @tool(opentelemetry_tracer=tracer) + def calculator(b: ...) -> float | None: # type: ignore + # NOTE: dummy function, only testing its signature not correctness + if isinstance(b, list): + return sum(b) + return None + + # WHEN building the Tool kernel + # THEN the JSON schema is correct + assert calculator.json_schema == { + "description": "", + "name": "calculator", + "parameters": { + "properties": { + # THEN b is of any type + "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}, + }, + "required": ("b",), + "type": "object", + "additionalProperties": False, + }, + "strict": True, + } diff --git a/tests/otel/__init__.py b/tests/otel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py new file mode 100644 index 00000000..c409640e --- /dev/null +++ b/tests/otel/test_helpers.py @@ -0,0 +1,171 @@ +import pytest +from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span +from opentelemetry.sdk.trace import Span + + +def test_read_empty(test_span: Span): + assert read_from_opentelemetry_span(test_span) == {} + + +def test_read_non_existent_key(test_span: Span): + with pytest.raises(KeyError): + assert read_from_opentelemetry_span(test_span, "key") == {} + write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, key="key") + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "key.x": 7, + "key.y": "foo", + } + with pytest.raises(KeyError): + assert read_from_opentelemetry_span(test_span, "key.z") is None + + +def test_simple_dict(test_span: Span): + write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, "key") + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "key.x": 7, + "key.y": "foo", + } + assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": "foo"} + + +def test_no_prefix(test_span: Span): + write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}) + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "x": 7, + "y": "foo", + } + assert read_from_opentelemetry_span(test_span) == {"x": 7, "y": "foo"} + + +def test_nested_object(test_span: Span): + write_to_opentelemetry_span(test_span, {"x": 7, "y": {"z": "foo"}}, "key") + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "key.x": 7, + "key.y.z": "foo", + } + assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": {"z": "foo"}} + + +def test_list(test_span: Span): + write_to_opentelemetry_span( + test_span, + [{"x": 7, "y": "foo"}, {"z": "bar"}], # type: ignore + "key", + ) # type: ignore + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "key.0.x": 7, + "key.0.y": "foo", + "key.1.z": "bar", + } + assert read_from_opentelemetry_span(test_span, "key") == [ + {"z": "bar"}, + {"x": 7, "y": "foo"}, + ] + + +def test_list_no_prefix(test_span: Span): + write_to_opentelemetry_span( + test_span, + [{"x": 7, "y": "foo"}, {"z": "bar"}], # type: ignore + ) + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "0.x": 7, + "0.y": "foo", + "1.z": "bar", + } + assert read_from_opentelemetry_span(test_span) == [ + {"z": "bar"}, + {"x": 7, "y": "foo"}, + ] + + +def test_multiple_nestings(test_span: Span): + write_to_opentelemetry_span( + test_span, + [ + {"x": 7, "y": "foo"}, + [{"z": "bar"}, {"a": 42}], + ], # type: ignore + "key", + ) + assert dict(test_span.attributes) == { # type: ignore + "key.0.x": 7, + "key.0.y": "foo", + "key.1.0.z": "bar", + "key.1.1.a": 42, + } + assert read_from_opentelemetry_span(test_span, "key") == [ + [ + {"a": 42}, + {"z": "bar"}, + ], + {"x": 7, "y": "foo"}, + ] + + +def test_read_mixed_numeric_string_keys(test_span: Span): + test_span.set_attributes( + { + "key.0.x": 7, + "key.0.y": "foo", + "key.a.z": "bar", + "key.a.a": 42, + } + ) + assert read_from_opentelemetry_span(span=test_span, key="key") == { # type: ignore + "0": {"x": 7, "y": "foo"}, + "a": {"z": "bar", "a": 42}, + } + assert read_from_opentelemetry_span(span=test_span) == { # type: ignore + "key": { + "0": {"x": 7, "y": "foo"}, + "a": {"z": "bar", "a": 42}, + } + } + + +def test_sub_key_same_as_key(test_span: Span): + write_to_opentelemetry_span(test_span, {"key": 7}, "key") + # NOTE: attributes cannot be None at this point + assert dict(test_span.attributes) == { # type: ignore + "key.key": 7, + } + assert read_from_opentelemetry_span(test_span, "key") == {"key": 7} + + +def test_read_nested_key(test_span: Span): + test_span.set_attributes({"key.x": 7, "key.y.z": "foo"}) + assert read_from_opentelemetry_span(span=test_span, key="key.y") == {"z": "foo"} + + +def test_write_read_sub_key(test_span: Span): + write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, "key") + assert read_from_opentelemetry_span(test_span, "key.x") == 7 + assert read_from_opentelemetry_span(test_span, "key.y") == "foo" + assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": "foo"} + + +def test_write_drops_dict_all_null_values(test_span: Span): + # GIVEN a test_span to which a value with null values is written + # NOTE: mypy complains about None value in the dict, but it is intentionally under test + write_to_opentelemetry_span(test_span, {"x": None, "y": None}, "key") # type: ignore + # WHEN reading the value from the span + # THEN the value is not present in the span attributes + assert "key" not in test_span.attributes # type: ignore + with pytest.raises(KeyError): + assert read_from_opentelemetry_span(test_span, "key") == {} + + +def test_write_drops_null_value_from_dict(test_span: Span): + # GIVEN a test_span to which a dict with some null values are written + # NOTE: mypy complains about None value in the dict, but it is intentionally under test + write_to_opentelemetry_span(test_span, {"x": 2, "y": None}, "key") # type: ignore + # WHEN reading the values from the span + # THEN the value with null value is not present in the span attributes + assert read_from_opentelemetry_span(test_span, "key") == {"x": 2} diff --git a/tests/utils/assets/models/__init__.py b/tests/utils/assets/models/__init__.py index 3a1c852e..2cf01263 100644 --- a/tests/utils/assets/models/__init__.py +++ b/tests/utils/assets/models/__init__.py @@ -5,7 +5,7 @@ from .circle import CircleParams from .object_with_defaults import ObjectWithDefaultsParams from .object_with_optional_field import ObjectWithOptionalFieldParams -from .shape import ShapeParams, Shape_CircleParams, Shape_SquareParams +from .shape import Shape_CircleParams, Shape_SquareParams, ShapeParams from .square import SquareParams from .undiscriminated_shape import UndiscriminatedShapeParams diff --git a/tests/utils/assets/models/circle.py b/tests/utils/assets/models/circle.py index 3395545e..759fe3eb 100644 --- a/tests/utils/assets/models/circle.py +++ b/tests/utils/assets/models/circle.py @@ -2,7 +2,6 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions import typing_extensions from humanloop.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/object_with_defaults.py b/tests/utils/assets/models/object_with_defaults.py index ef14f7b2..a977b1d2 100644 --- a/tests/utils/assets/models/object_with_defaults.py +++ b/tests/utils/assets/models/object_with_defaults.py @@ -3,7 +3,6 @@ # This file was auto-generated by Fern from our API Definition. import typing_extensions -import typing_extensions class ObjectWithDefaultsParams(typing_extensions.TypedDict): diff --git a/tests/utils/assets/models/object_with_optional_field.py b/tests/utils/assets/models/object_with_optional_field.py index d6ab74e8..d667d6b8 100644 --- a/tests/utils/assets/models/object_with_optional_field.py +++ b/tests/utils/assets/models/object_with_optional_field.py @@ -2,12 +2,13 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions +import datetime as dt import typing +import uuid + import typing_extensions from humanloop.core.serialization import FieldMetadata -import datetime as dt -import uuid + from .color import Color from .shape import ShapeParams from .undiscriminated_shape import UndiscriminatedShapeParams diff --git a/tests/utils/assets/models/shape.py b/tests/utils/assets/models/shape.py index 0160cdbd..4add344e 100644 --- a/tests/utils/assets/models/shape.py +++ b/tests/utils/assets/models/shape.py @@ -3,9 +3,10 @@ # This file was auto-generated by Fern from our API Definition. from __future__ import annotations -import typing_extensions -import typing_extensions + import typing + +import typing_extensions from humanloop.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/square.py b/tests/utils/assets/models/square.py index c7d6cfaf..da4a2111 100644 --- a/tests/utils/assets/models/square.py +++ b/tests/utils/assets/models/square.py @@ -2,7 +2,6 @@ # This file was auto-generated by Fern from our API Definition. -import typing_extensions import typing_extensions from humanloop.core.serialization import FieldMetadata diff --git a/tests/utils/assets/models/undiscriminated_shape.py b/tests/utils/assets/models/undiscriminated_shape.py index 68876a23..99f12b30 100644 --- a/tests/utils/assets/models/undiscriminated_shape.py +++ b/tests/utils/assets/models/undiscriminated_shape.py @@ -3,6 +3,7 @@ # This file was auto-generated by Fern from our API Definition. import typing + from .circle import CircleParams from .square import SquareParams diff --git a/tests/utils/test_serialization.py b/tests/utils/test_serialization.py index 56591905..2ad8e1b5 100644 --- a/tests/utils/test_serialization.py +++ b/tests/utils/test_serialization.py @@ -1,10 +1,10 @@ # This file was auto-generated by Fern from our API Definition. -from typing import List, Any +from typing import Any, List from humanloop.core.serialization import convert_and_respect_annotation_metadata -from .assets.models import ShapeParams, ObjectWithOptionalFieldParams +from .assets.models import ObjectWithOptionalFieldParams, ShapeParams UNION_TEST: ShapeParams = {"radius_measurement": 1.0, "shape_type": "circle", "id": "1"} UNION_TEST_CONVERTED = {"shapeType": "circle", "radiusMeasurement": 1.0, "id": "1"}