diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..bd478102 Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..2d57149a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,91 @@ +name: CI +on: [push] +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout +jobs: + python-setup: + runs-on: ubuntu-latest + strategy: + matrix: + os: [ubuntu-24.04] + steps: + - name: Check out repository + uses: actions/checkout@v2 + - name: Set up python + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + #---------------------------# + - name: Compute Cache Key (Ignoring Only [tool.poetry] version) + id: compute-cache-key + run: | + awk ' + /^\[tool.poetry\]/ {p=1} + /^\[/ && !/^\[tool.poetry\]/ {p=0} + p && /^[[:space:]]*version[[:space:]]*=/ {next} + {print} + ' pyproject.toml | sha256sum | cut -d " " -f1 > cache_key.txt + echo "cache_key=venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-$(cat cache_key.txt)" >> $GITHUB_OUTPUT + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: 1.4.0 + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + - name: Load cached venv + id: cached-poetry-dependencies + uses: actions/cache@v3 + with: + path: .venv + key: ${{ steps.compute-cache-key.outputs.cache_key }} + - name: Install dependencies + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root --with=dev + - name: Debug Poetry Environment + run: | + poetry env info + ls -l .venv/bin/pytest + outputs: + python-version: ${{ steps.setup-python.outputs.python-version }} + cache_key: ${{ steps.compute-cache-key.outputs.cache_key }} + test: + runs-on: ubuntu-latest + needs: python-setup + strategy: + matrix: + os: [ubuntu-24.04] + pytest_target: [models, layers] + name: test-${{ matrix.pytest_target }} + steps: + - name: Check out repository + uses: actions/checkout@v2 + - name: Set up python + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Poetry in Test Job + uses: snok/install-poetry@v1 + with: + version: 1.4.0 + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + - name: Load cached venv from setup job + id: cached-poetry-dependencies + uses: actions/cache@v3 + with: + path: .venv + key: ${{ needs.python-setup.outputs.cache_key }} + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4.0.1 + with: + role-to-assume: arn:aws:iam::898022457080:role/github-action-test-s3-access-role + aws-region: us-east-1 + - name: Test with pytest + run: | + source .venv/bin/activate + pytest test/${{ matrix.pytest_target }}/ diff --git a/.github/workflows/end2end.yml b/.github/workflows/end2end.yml new file mode 100644 index 00000000..30435211 --- /dev/null +++ b/.github/workflows/end2end.yml @@ -0,0 +1,182 @@ +name: Start End2End test +on: + workflow_dispatch: + inputs: + model_name: + description: 'Model name' + required: true + cloud_dir: + description: 'Cloud directory' + required: true +permissions: + id-token: write # This is required for requesting the JWT + contents: read # This is required for actions/checkout + issues: write # permits an action to add a comment to an issue. + pull-requests: write +jobs: + update-leap-model-parser: + runs-on: ubuntu-latest + steps: + - name: Check branch name + id: check_branch + run: | + if [ "${{ github.event.inputs.branch }}" = "master" ]; then + echo "::error::Manual runs not allowed from master branch." + exit 1 + fi + - name: Extract branch name + id: extract_branch + run: echo "::set-output name=branch_name::$(echo ${GITHUB_REF#refs/heads/})" + - name: Check out repository + uses: actions/checkout@v2 + - name: Get package version from pyproject.toml + id: package_version + run: | + PACKAGE_VERSION=$(awk -F' = ' '$1 == "version" {gsub(/"/, "", $2); print $2}' pyproject.toml | head -n 1) + echo "PACKAGE_VERSION=$PACKAGE_VERSION" >> $GITHUB_ENV + echo "$PACKAGE_VERSION" + - name: Set up python + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: 3.8.12 + - name: Set Poetry Lock Hash + run: | + echo "POETRY_HASH=$(sha256sum poetry.lock | cut -d ' ' -f 1)" >> $GITHUB_ENV + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: 1.8.2 + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + - name: Load cached venv + id: cached-poetry-dependencies + uses: actions/cache/restore@v4 + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_HASH }} + - name: Install dependencies + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root + - name: Save cache + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_HASH }} + - name: Check if version exists on PyPI + id: check_version + run: | + URL="https://pypi.org/pypi/onnx2kerastl/$PACKAGE_VERSION/json" + echo "URL: $URL" + RESPONSE=$(curl -s -o /dev/null -w "%{http_code}" "$URL") + if [ $RESPONSE -eq 200 ]; then + echo "Version $PACKAGE_VERSION already exists on PyPI." + echo "IS_O2K_PUBLISHED=true" >> $GITHUB_ENV + else + echo "Version $PACKAGE_VERSION does not exist on PyPI." + echo "IS_O2K_PUBLISHED=false" >> $GITHUB_ENV + fi + - name: Build and publish O2K package + if: success() # Only run this step if the previous steps were successful + run: | + if [ "$IS_O2K_PUBLISHED" != "true" ]; then + poetry build + poetry publish -u __token__ -p ${{ secrets.PYPI_O2K }} + echo "Published O2K version $PACKAGE_VERSION" + else + echo "Skipping publishing as version already exists on PyPI." + fi + - name: checkout leap model parser + uses: actions/checkout@v4 + with: + token: ${{ secrets.TENSORLEAP_OPS_GITHUB_TOKEN }} + repository: tensorleap/leap-model-parser + path: leap-model-parser + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4.0.1 + with: + role-to-assume: arn:aws:iam::898022457080:role/github-action-test-s3-access-role + aws-region: us-east-1 + - name: Configure GIT credentials + run: | + git config --global user.email github-actions@github.com + git config --global user.name github-actions + - name: leap-model-parser test and push + working-directory: leap-model-parser + id: parser_test + env: + PYTHONPATH: ${{ env.PYTHONPATH }}:${{ github.workspace }}/leap-model-parser + run: | #fetch or create branch, update-o2k-version + echo "Project version is $PACKAGE_VERSION" + BRANCH="${{ steps.extract_branch.outputs.branch_name }}-from-o2k" + echo "BRANCH=$BRANCH" >> $GITHUB_ENV + if git ls-remote --exit-code --heads origin "$BRANCH" >/dev/null 2>&1; then + git fetch origin "$BRANCH" + git checkout "$BRANCH" + else + git checkout -b "$BRANCH" + fi + # add packages with retry + clear_cache() { + echo "Clearing Poetry cache..." + poetry cache clear . --all --no-interaction + } + add_packages() { + poetry add onnx2kerastl==$PACKAGE_VERSION + } + max_retries=3 + attempt=0 + # Try to add packages + while [ $attempt -lt $max_retries ]; do + echo "Attempt $(($attempt + 1)) to add the package..." + + # Attempt to add the package + if add_packages; then + echo "Package added successfully." + break + else + echo "Package add failed." + + # Clear the cache before retrying + clear_cache + + # Increment the attempt counter + attempt=$((attempt + 1)) + + # If this was the last attempt, exit with failure + if [ $attempt -ge $max_retries ]; then + echo "Failed to add the package after $max_retries attempts." + exit 1 + fi + fi + done + source .venv/bin/activate + cd tests + if pytest -s test_branch_model.py --cloud_dir ${{ github.event.inputs.cloud_dir }} --model_name ${{ github.event.inputs.model_name }}; then + TEST_CONCLUSION=success + echo "Tests passed, Pushing Branch if needed" + cd .. + git add -u + if git status --porcelain | grep -q '^M'; then + echo "Files are modified - updating branch" + git commit -m "update O2K --model_name ${{ github.event.inputs.model_name }} --cloud_dir ${{ github.event.inputs.cloud_dir }} --sha $GITHUB_SHA" + git push origin "$BRANCH" + else + echo "No changes were made to leap-model-parser - not pushing the branch" + fi + else + TEST_CONCLUSION=failure + echo "Tests Failed, check pytest output" + fi + echo "TEST_CONCLUSION=$TEST_CONCLUSION" >> $GITHUB_ENV + - name: add commit status + id: commit_status + run: | + curl -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.TENSORLEAP_OPS_GITHUB_TOKEN }}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/tensorleap/onnx2keras/statuses/$GITHUB_SHA \ + -d '{"state":"'${TEST_CONCLUSION}'","target_url":"https://github.com/'${GITHUB_REPOSITORY}'/actions/runs/'${GITHUB_RUN_ID}'","description":"Leap model parser dynamic test result","context":"end2end/parser-dynamic-test"}' diff --git a/.gitignore b/.gitignore index 22617b4d..0c84d248 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,14 @@ __pycache__/ *.egg-info *.onnx *.h5 -dist/ \ No newline at end of file +*.npy +*.pth +*.log +dist/ +*.DS_Store +.python-version +test/models/custom_conversion_tests + +.vscode/ +gallery_models +.DS_Store diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 03852067..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include LICENSE -include README.md -include requirements.txt \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..9d71ae68 --- /dev/null +++ b/Makefile @@ -0,0 +1,38 @@ +PYTHONPATH := . +POETRY_MODULE := PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python poetry run python -m +PYTEST := $(POETRY_MODULE) pytest + +.PHONY: run_tests +run_tests: + $(PYTEST) test -v + + +.PHONY: poetry_update +poetry_update: + PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python poetry update + +.PHONY: test_models +test_models: + $(PYTEST) test/models -v + +.PHONY: watch +watch: + $(POETRY_MODULE) pytest_watch --runner "python -m pytest -v -k $(K)" + +.PHONY: lint +lint: + $(POETRY_MODULE) mypy --install-types --non-interactive . + +.PHONY: lint_strict_code +lint_strict_code: + $(POETRY_MODULE) mypy --install-types --non-interactive --strict code_loader + +.PHONY: lint_tests +lint_tests: + $(POETRY_MODULE) mypy --install-types --non-interactive tests + +.PHONY: test_with_coverage +test_with_coverage: + $(PYTEST) --cov=code_loader --cov-branch --no-cov-on-fail --cov-report term-missing --cov-report html -v tests/ + + diff --git a/README.md b/README.md index 7b7ade35..2f3a7614 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# onnx2keras +# Tensorleap fork of onnx2keras ONNX to Keras deep neural network converter. @@ -21,7 +21,7 @@ TensorFlow 2.0 `input_shapes`: override input shapes (experimental) -`name_policy`: ['renumerate', 'short', 'default'] override layer names (experimental) +`name_policy`: ['renumerate', 'short', 'default', 'attach_weights_name'] override layer names (experimental) `verbose`: detailed output @@ -31,9 +31,10 @@ TensorFlow 2.0 ## Getting started ### ONNX model + ```python import onnx -from onnx2keras import onnx_to_keras +from onnx2kerastl import onnx_to_keras # Load ONNX model onnx_model = onnx.load('resnet18.onnx') diff --git a/onnx2keras/__init__.py b/onnx2keras/__init__.py deleted file mode 100644 index edfde4eb..00000000 --- a/onnx2keras/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .converter import onnx_to_keras -from .utils import check_torch_keras_error - -__all__ = ['onnx_to_keras', 'check_torch_keras_error'] diff --git a/onnx2keras/activation_layers.py b/onnx2keras/activation_layers.py deleted file mode 100644 index 78a585a6..00000000 --- a/onnx2keras/activation_layers.py +++ /dev/null @@ -1,183 +0,0 @@ -from tensorflow import keras -import logging -from .utils import ensure_tf_type, ensure_numpy_type - - -def convert_relu(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert ReLU activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - relu = keras.layers.Activation('relu', name=keras_name) - layers[node_name] = relu(input_0) - - -def convert_elu(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert ELU activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - elu = \ - keras.layers.ELU(alpha=params['alpha'], name=keras_name) - layers[node_name] = elu(input_0) - - -def convert_lrelu(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert LeakyReLU activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - leakyrelu = \ - keras.layers.LeakyReLU(alpha=params['alpha'], name=keras_name) - layers[node_name] = leakyrelu(input_0) - - -def convert_sigmoid(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Sigmoid activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - sigmoid = keras.layers.Activation('sigmoid', name=keras_name) - layers[node_name] = sigmoid(input_0) - - -def convert_tanh(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Tanh activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - tanh = keras.layers.Activation('tanh', name=keras_name) - layers[node_name] = tanh(input_0) - - -def convert_selu(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert SELU activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - selu = keras.layers.Activation('selu', name=keras_name) - layers[node_name] = selu(input_0) - - -def convert_softmax(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert softmax activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for an activation layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x, axis=params['axis']): - import tensorflow as tf - return tf.nn.softmax(x, axis=axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - layers[node_name].set_shape(layers[node_name].shape) - lambda_func[keras_name] = target_layer - - -def convert_prelu(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert PReLU activation layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.prelu') - - if len(node.input) != 2: - assert AttributeError('Activation layer PReLU should have 2 inputs.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - W = ensure_numpy_type(layers[node.input[1]]) - - if params['change_ordering']: - logger.warning('PRelu + change ordering needs to be fixed after TF graph is built.') - logger.warning('It\'s experimental.') - - shared_axes = [2, 3] - - # for case when W.shape (n,). When activation is used for single dimension vector. - shared_axes = shared_axes if len(W.shape) > 1 else None - - prelu = keras.layers.PReLU(weights=[W], shared_axes=shared_axes, name=keras_name) - layers[node_name] = prelu(input_0) diff --git a/onnx2keras/constant_layers.py b/onnx2keras/constant_layers.py deleted file mode 100644 index becfca9d..00000000 --- a/onnx2keras/constant_layers.py +++ /dev/null @@ -1,12 +0,0 @@ -def convert_constant(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Constant layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - layers[node_name] = params['value'] diff --git a/onnx2keras/converter.py b/onnx2keras/converter.py deleted file mode 100644 index 616295cf..00000000 --- a/onnx2keras/converter.py +++ /dev/null @@ -1,290 +0,0 @@ -""" -The ONNX to keras converter module -""" - -from tensorflow import keras -import logging -import inspect -import collections -from onnx import numpy_helper - -from .layers import AVAILABLE_CONVERTERS - - -def onnx_node_attributes_to_dict(args): - """ - Parse ONNX attributes to Python dictionary - :param args: ONNX attributes object - :return: Python dictionary - """ - def onnx_attribute_to_dict(onnx_attr): - """ - Parse ONNX attribute - :param onnx_attr: ONNX attribute - :return: Python data type - """ - if onnx_attr.HasField('t'): - return numpy_helper.to_array(getattr(onnx_attr, 't')) - - for attr_type in ['f', 'i', 's']: - if onnx_attr.HasField(attr_type): - return getattr(onnx_attr, attr_type) - - for attr_type in ['floats', 'ints', 'strings']: - if getattr(onnx_attr, attr_type): - return list(getattr(onnx_attr, attr_type)) - return {arg.name: onnx_attribute_to_dict(arg) for arg in args} - - -def onnx_to_keras(onnx_model, input_names, - input_shapes=None, name_policy=None, verbose=True, change_ordering=False): - """ - Convert ONNX graph to Keras model format - :param onnx_model: loaded ONNX model - :param input_names: list with input names - :param input_shapes: override input shapes (experimental) - :param name_policy: override layer names. None, "short" or "renumerate" (experimental) - :param verbose: verbose output - :param change_ordering: change ordering to HWC (experimental) - :return: Keras model - """ - # Use channels first format by default. - keras_fmt = keras.backend.image_data_format() - keras.backend.set_image_data_format('channels_first') - - if verbose: - logging.basicConfig(level=logging.DEBUG) - - logger = logging.getLogger('onnx2keras') - - logger.info('Converter is called.') - - onnx_weights = onnx_model.graph.initializer - onnx_inputs = onnx_model.graph.input - onnx_outputs = [i.name for i in onnx_model.graph.output] - onnx_nodes = onnx_model.graph.node - - logger.debug('List input shapes:') - logger.debug(input_shapes) - - logger.debug('List inputs:') - for i, input in enumerate(onnx_inputs): - logger.debug('Input {0} -> {1}.'.format(i, input.name)) - - logger.debug('List outputs:') - for i, output in enumerate(onnx_outputs): - logger.debug('Output {0} -> {1}.'.format(i, output)) - - logger.debug('Gathering weights to dictionary.') - weights = {} - for onnx_w in onnx_weights: - try: - if len(onnx_w.ListFields()) < 4: - onnx_extracted_weights_name = onnx_w.ListFields()[1][1] - else: - onnx_extracted_weights_name = onnx_w.ListFields()[2][1] - weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) - except: - onnx_extracted_weights_name = onnx_w.ListFields()[3][1] - weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) - - logger.debug('Found weight {0} with shape {1}.'.format( - onnx_extracted_weights_name, - weights[onnx_extracted_weights_name].shape)) - - layers = dict() - lambda_funcs = dict() - keras_outputs = [] - keras_inputs = [] - - for i, input_name in enumerate(input_names): - for onnx_i in onnx_inputs: - if onnx_i.name == input_name: - if input_shapes: - input_shape = input_shapes[i] - else: - input_shape = [i.dim_value for i in onnx_i.type.tensor_type.shape.dim][1:] - - layers[input_name] = keras.layers.InputLayer( - input_shape=input_shape, name=input_name - ).output - - keras_inputs.append(layers[input_name]) - - logger.debug('Found input {0} with shape {1}'.format(input_name, input_shape)) - - # Convert every operation separable - node_names = [] - for node_index, node in enumerate(onnx_nodes): - node_type = node.op_type - node_params = onnx_node_attributes_to_dict(node.attribute) - - # Add global converter info: - node_params['change_ordering'] = change_ordering - node_params['name_policy'] = name_policy - - node_name = str(node.output[0]) - keras_names = [] - for output_index, output in enumerate(node.output): - if name_policy == 'short': - keras_name = keras_name_i = str(output)[:8] - suffix = 1 - while keras_name_i in node_names: - keras_name_i = keras_name + '_' + str(suffix) - suffix += 1 - keras_names.append(keras_name_i) - elif name_policy == 'renumerate': - postfix = node_index if len(node.output) == 1 else "%s_%s" % (node_index, output_index) - keras_names.append('LAYER_%s' % postfix) - else: - keras_names.append(output) - - if len(node.output) != 1: - logger.warning('Trying to convert multi-output node') - node_params['_outputs'] = list(node.output) - node_names.extend(keras_names) - else: - keras_names = keras_names[0] - node_names.append(keras_names) - - logger.debug('######') - logger.debug('...') - logger.debug('Converting ONNX operation') - logger.debug('type: %s', node_type) - logger.debug('node_name: %s', node_name) - logger.debug('node_params: %s', node_params) - logger.debug('...') - - logger.debug('Check if all inputs are available:') - if len(node.input) == 0 and node_type != 'Constant': - raise AttributeError('Operation doesn\'t have an input. Aborting.') - - for i, node_input in enumerate(node.input): - logger.debug('Check input %i (name %s).', i, node_input) - if node_input not in layers: - logger.debug('The input not found in layers / model inputs.') - - if node_input in weights: - logger.debug('Found in weights, add as a numpy constant.') - layers[node_input] = weights[node_input] - else: - raise AttributeError('Current node is not in weights / model inputs / layers.') - else: - logger.debug('... found all, continue') - - keras.backend.set_image_data_format('channels_first') - AVAILABLE_CONVERTERS[node_type]( - node, - node_params, - layers, - lambda_funcs, - node_name, - keras_names - ) - if isinstance(keras_names, list): - keras_names = keras_names[0] - - try: - logger.debug('Output TF Layer -> ' + str(layers[keras_names])) - except KeyError: - pass - - # Check for terminal nodes - for layer in onnx_outputs: - if layer in layers: - keras_outputs.append(layers[layer]) - - # Create model - model = keras.models.Model(inputs=keras_inputs, outputs=keras_outputs) - - if change_ordering: - change_ord_axes_map = { - 3: 2, - 1: 3, - -1: 1 - } - - import numpy as np - conf = model.get_config() - - for layer in conf['layers']: - if layer['config'] and 'shared_axes' in layer['config']: - # TODO: check axes first (if it's not 4D tensor) - layer['config']['shared_axes'] = [1, 2] - - if layer['config'] and 'batch_input_shape' in layer['config']: - layer['config']['batch_input_shape'] = \ - tuple(np.reshape(np.array( - [ - [None] + - list(layer['config']['batch_input_shape'][2:][:]) + - [layer['config']['batch_input_shape'][1]] - ]), -1 - )) - if layer['config'] and 'target_shape' in layer['config']: - if len(list(layer['config']['target_shape'][1:][:])) > 0: - layer['config']['target_shape'] = \ - tuple(np.reshape(np.array( - list(layer['config']['target_shape'][1:]) + - [layer['config']['target_shape'][0]] - ), -1),) - - if layer['config'] and 'data_format' in layer['config']: - layer['config']['data_format'] = 'channels_last' - if layer['config'] and 'axis' in layer['config']: - axis = layer['config']['axis'] - # BatchNorm wrap axis with ListWrapper instead single INT value - if isinstance(axis, (tuple, list)): - axis = axis[0] - layer['config']['axis'] = change_ord_axes_map.get(axis, layer['config']['axis']) - - for layer in conf['layers']: - if 'function' in layer['config'] and layer['config']['function'][1] is not None: - kerasf = list(layer['config']['function']) - dargs = list(kerasf[1]) - func = lambda_funcs.get(layer['name']) - - if func: - # ReduceSum operation has 'axis' param as array of ints. When onnx uses ReduceSum - # to reproduce SoftMax - dargs become something like [[1]] (list of lists) - # that why we handle collections.Iterable - if len(dargs) > 1 or isinstance(dargs[0], (tuple, list)): - params = inspect.signature(func).parameters - i = list(params.keys()).index('axes') if ('axes' in params) else -1 - - if i > 0: - i -= 1 - axes = list(range(len(dargs[i].shape))) - axes = axes[0:1] + axes[2:] + axes[1:2] - dargs[i] = np.transpose(dargs[i], axes) - - i = list(params.keys()).index('axis') if ('axis' in params) else -1 - - if i > 0: - i -= 1 - axis = np.array(dargs[i]) - axes_map = np.array([0, 3, 1, 2]) - # to list because some tf operations check only for core python types (e.g tf.norm) - dargs[i] = axes_map[axis].tolist() - else: - # if map exits will change else will remain the same - dargs[0] = change_ord_axes_map.get(dargs[0], dargs[0]) - - kerasf[1] = tuple(dargs) - layer['config']['function'] = tuple(kerasf) - - keras.backend.set_image_data_format('channels_last') - model_tf_ordering = keras.models.Model.from_config(conf) - - for dst_layer, src_layer, conf in zip(model_tf_ordering.layers, model.layers, conf['layers']): - W = src_layer.get_weights() - # TODO: check axes first (if it's not 4D tensor) - if conf['config'] and 'shared_axes' in conf['config']: - W[0] = W[0].transpose(1, 2, 0) - dst_layer.set_weights(W) - - model = model_tf_ordering - - keras.backend.set_image_data_format(keras_fmt) - - return model diff --git a/onnx2keras/convolution_layers.py b/onnx2keras/convolution_layers.py deleted file mode 100644 index 023a0b57..00000000 --- a/onnx2keras/convolution_layers.py +++ /dev/null @@ -1,316 +0,0 @@ -from tensorflow import keras -import logging -from .utils import ensure_tf_type, ensure_numpy_type - - -def convert_conv(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert convolution layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.conv') - - if len(node.input) == 3: - logger.debug('Conv with bias') - # Has bias - has_bias = True - W = ensure_numpy_type(layers[node.input[1]]) - bias = ensure_numpy_type(layers[node.input[2]]) - - elif len(node.input) == 2: - logger.debug('Conv without bias') - has_bias = False - W = ensure_numpy_type(layers[node.input[1]]) - bias = None - - else: - raise NotImplementedError('Not implemented') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - n_groups = params['group'] if 'group' in params else 1 - dilation = params['dilations'][0] if 'dilations' in params else 1 - pads = params['pads'] if 'pads' in params else [0, 0, 0] - strides = params['strides'] if 'strides' in params else [1, 1, 1] - - if len(W.shape) == 5: # 3D conv - logger.debug('3D convolution') - if pads[0] > 0 or pads[1] > 0 or pads[2] > 0: - logger.debug('Paddings exist, add ZeroPadding layer') - padding_name = keras_name + '_pad' - padding_layer = keras.layers.ZeroPadding3D( - padding=(pads[0], pads[1], pads[2]), - name=padding_name - ) - layers[padding_name] = input_0 = padding_layer(input_0) - out_channels, channels_per_group, dimension, height, width = W.shape - W = W.transpose(2, 3, 4, 1, 0) - - if has_bias: - weights = [W, bias] - else: - weights = [W] - - conv = keras.layers.Conv3D( - filters=out_channels, - kernel_size=(dimension, height, width), - strides=(strides[0], strides[1], strides[2]), - padding='valid', - weights=weights, - use_bias=has_bias, - activation=None, - dilation_rate=dilation, - bias_initializer='zeros', kernel_initializer='zeros', - name=keras_name, - groups=n_groups - ) - layers[node_name] = conv(input_0) - - elif len(W.shape) == 4: # 2D conv - logger.debug('2D convolution') - - padding = None - if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): - padding = (pads[0], pads[1]) - elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): - padding = ((pads[0], pads[2]), (pads[1], pads[3])) - - if padding: - logger.debug('Paddings exist, add ZeroPadding layer') - padding_name = keras_name + '_pad' - padding_layer = keras.layers.ZeroPadding2D( - padding=padding, - name=padding_name, - data_format='channels_first' - ) - layers[padding_name] = input_0 = padding_layer(input_0) - - W = W.transpose(2, 3, 1, 0) - height, width, channels_per_group, out_channels = W.shape - in_channels = channels_per_group * n_groups - - if n_groups == in_channels and n_groups != 1: - logger.debug('Number of groups is equal to input channels, use DepthWise convolution') - W = W.transpose(0, 1, 3, 2) - if has_bias: - weights = [W, bias] - else: - weights = [W] - - conv = keras.layers.DepthwiseConv2D( - kernel_size=(height, width), - strides=(strides[0], strides[1]), - padding='valid', - use_bias=has_bias, - activation=None, - depth_multiplier=1, - weights=weights, - dilation_rate=dilation, - bias_initializer='zeros', kernel_initializer='zeros', - name=keras_name - ) - layers[node_name] = conv(input_0) - - elif n_groups != 1: - logger.debug('Number of groups more than 1, but less than number of in_channel, use group convolution') - - # Example from https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html - def target_layer(x, groups=n_groups, stride_y=strides[0], stride_x=strides[1]): - import tensorflow as tf - from tensorflow.keras import backend as K - data_format = 'NCHW' if K.image_data_format() == 'channels_first' else 'NHWC' - - if data_format == 'NCHW': - x = tf.transpose(x, [0, 2, 3, 1]) - - def convolve_lambda_biased(i, k, b): - import tensorflow as tf - conv = tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], dilations=[1, dilation, dilation, 1], padding='VALID', data_format='NHWC') - return tf.nn.bias_add(conv, b, data_format='NHWC') - - def convolve_lambda(i, k): - import tensorflow as tf - return tf.nn.conv2d(i, k, strides=[1, stride_y, stride_x, 1], dilations=[1, dilation, dilation, 1], padding='VALID', data_format='NHWC') - - input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) - weight_groups = tf.split(axis=3, num_or_size_splits=groups, value=W) - if has_bias: - bias_groups = tf.split(axis=0, num_or_size_splits=groups, value=bias) - output_groups = [convolve_lambda_biased(i, k, b) for i, k, b in - zip(input_groups, weight_groups, bias_groups)] - else: - output_groups = [convolve_lambda(i, k) for i, k in zip(input_groups, weight_groups)] - - layer = tf.concat(axis=3, values=output_groups) - if data_format == 'NCHW': - layer = tf.transpose(layer, [0, 3, 1, 2]) - - return layer - - lambda_layer = keras.layers.Lambda(target_layer) - layers[node_name] = lambda_layer(input_0) - - else: - if has_bias: - weights = [W, bias] - else: - weights = [W] - - conv = keras.layers.Conv2D( - filters=out_channels, - kernel_size=(height, width), - strides=(strides[0], strides[1]), - padding='valid', - weights=weights, - use_bias=has_bias, - activation=None, - dilation_rate=dilation, - bias_initializer='zeros', kernel_initializer='zeros', - name=keras_name - ) - - layers[node_name] = conv(input_0) - else: - # 1D conv - W = W.transpose(2, 1, 0) - width, channels, n_filters = W.shape - print(width, channels, n_filters, has_bias) - - if has_bias: - weights = [W, bias] - else: - weights = [W] - - def target_layer(x, w=weights, stride=strides[0]): - import tensorflow as tf - w = tf.convert_to_tensor(w[0]) - x = tf.transpose(x, [0, 2, 1]) - x = tf.nn.conv1d(x, w, stride=stride, padding='SAME', data_format='NWC') - return tf.transpose(x, [0, 2, 1]) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - lambda_layer[keras_name] = target_layer - layers[node_name] = lambda_layer(input_0) - - # padding_name = keras_name + '_pad' - # padding_layer = keras.layers.ZeroPadding1D( - # padding=(pads[0]), - # name=padding_name - # ) - # print(input_0) - # layers[node_name] = padding_layer(input_0) - # input_0.set_shape(input_0._keras_shape) - # print(input_0._keras_shape) - # print(input_0, n_filters, width) - # conv = keras.layers.Conv1D( - # filters=n_filters, - # kernel_size=width, - # strides=strides[0], - # padding='valid', - # weights=weights, - # use_bias=has_bias, - # activation=None, - # dilation_rate=dilation, - # name=keras_name - # ) - # layers[node_name] = conv(input_0) - - -def convert_convtranspose(node, params, layers, - lambda_func, node_name, keras_name): - """ - Convert transposed convolution layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.convtranpose') - - if len(node.input) == 3: - logger.debug('ConvTranspose with bias') - # Has bias - has_bias = True - W = ensure_numpy_type(layers[node.input[1]]) - bias = ensure_numpy_type(layers[node.input[2]]) - - elif len(node.input) == 2: - logger.debug('ConvTranspose without bias') - has_bias = False - W = ensure_numpy_type(layers[node.input[1]]) - bias = None - - else: - raise NotImplementedError('Not implemented') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - n_groups = params['group'] if 'group' in params else 1 - dilation = params['dilations'][0] if 'dilations' in params else 1 - pads = params['pads'] if 'pads' in params else [0, 0] - strides = params['strides'] if 'strides' in params else [1, 1] - - if len(W.shape) == 5: # 3D conv - raise NotImplementedError('Not implemented') - - elif len(W.shape) == 4: # 2D conv - W = W.transpose(2, 3, 1, 0) - height, width, n_filters, channels = W.shape - - if has_bias: - weights = [W, bias] - else: - weights = [W] - - if n_groups > 1: - raise AttributeError('Cannot convert ConvTranspose2d with groups != 1') - - if dilation > 1: - raise AttributeError('Cannot convert ConvTranspose2d with dilation_rate != 1') - - conv = keras.layers.Conv2DTranspose( - filters=n_filters, - kernel_size=(height, width), - strides=strides, - padding='valid', - output_padding=0, - weights=weights, - use_bias=has_bias, - activation=None, - dilation_rate=dilation, - bias_initializer='zeros', kernel_initializer='zeros', - name=keras_name - ) - - if 'output_shape' in params and 'pads' not in params: - logger.debug('!!!!! Paddings will be calculated automatically !!!!!') - pads = [strides[0] * (int(input_0.shape[2]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][0], - strides[1] * (int(input_0.shape[3]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][1]] - - layers[node_name] = input_0 = conv(input_0) - - # Magic ad-hoc. - # See the Keras issue: https://github.com/keras-team/keras/issues/6777 - # input_0.set_shape(input_0.shape) - - if 'output_padding' in params and (params['output_padding'][0] > 0 or params['output_padding'][1] > 0): - raise AttributeError('Cannot convert ConvTranspose2d with output_padding != 0') - - if pads[0] > 0: - logger.debug('Add cropping layer for output padding') - assert(len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) - - crop = keras.layers.Cropping2D( - pads[:2], - name=keras_name + '_crop' - ) - layers[node_name] = crop(input_0) - else: - raise AttributeError('Layer is not supported for now') diff --git a/onnx2keras/elementwise_layers.py b/onnx2keras/elementwise_layers.py deleted file mode 100644 index d309b7cd..00000000 --- a/onnx2keras/elementwise_layers.py +++ /dev/null @@ -1,232 +0,0 @@ -from tensorflow import keras -import logging -from .utils import is_numpy, ensure_tf_type - - -def convert_elementwise_div(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert element-wise division - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.div') - - if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - - if is_numpy(layers[node.input[0]]) and is_numpy(layers[node.input[1]]): - logger.debug('Divide numpy arrays.') - layers[node_name] = layers[node.input[0]] / layers[node.input[1]] - else: - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) - input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) - - def target_layer(x): - import tensorflow as tf - layer = tf.divide( - x[0], - x[1] - ) - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer([input_0, input_1]) - lambda_func[keras_name] = target_layer - - -def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert element-wise add. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.add') - - if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) - input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) - - try: - if not is_numpy(layers[node.input[0]]) and not is_numpy(layers[node.input[1]]): - add = keras.layers.Add(name=keras_name) - layers[node_name] = add([input_0, input_1]) - else: - raise ValueError('Operands are different.') - - except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Add. Fallback to TF lambda.') - - def target_layer(x): - import tensorflow as tf - print(x[0], x[1]) - layer = tf.add( - x[0], - x[1] - ) - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer([input_0, input_1]) - lambda_func[keras_name] = target_layer - - -def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert element-wise mul. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.mul') - - if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) - input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) - - try: - mul = keras.layers.Multiply(name=keras_name) - layers[node_name] = mul([input_0, input_1]) - except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Multiply. Fallback to TF lambda.') - - # Doesn't work with constants - # IndexError: tuple index out of range - - def target_layer(x): - import tensorflow as tf - layer = tf.multiply( - x[0], - x[1] - ) - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer([input_0, input_1]) - lambda_func[keras_name] = target_layer - - -def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert element-wise sub. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.sub') - - if len(node.input) != 2: - raise AttributeError('Number of inputs is not equal 2 for element-wise layer') - - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const1" % keras_name) - input_1 = ensure_tf_type(layers[node.input[1]], layers[list(layers)[0]], name="%s_const2" % keras_name) - - try: - sub = keras.layers.Subtract(name=keras_name) - layers[node_name] = sub([input_0, input_1]) - except (IndexError, ValueError): - logger.warning('Failed to use keras.layers.Subtract. Fallback to TF lambda.') - - # Doesn't work with constants - # IndexError: tuple index out of range - - def target_layer(x): - import tensorflow as tf - layer = tf.subtract( - x[0], - x[1] - ) - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer([input_0, input_1]) - lambda_func[keras_name] = target_layer - - -def convert_min(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Min layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) < 2: - assert AttributeError('Less than 2 inputs for min layer.') - - inputs = list() - for i, inp in enumerate(node.input): - input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) - inputs.append(input_) - layers[node_name] = keras.layers.Minimum(name=keras_name)(inputs) - - -def convert_max(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Max layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) < 2: - assert AttributeError('Less than 2 inputs for max layer.') - - inputs = list() - for i, inp in enumerate(node.input): - input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) - inputs.append(input_) - layers[node_name] = keras.layers.Maximum(name=keras_name)(inputs) - - -def convert_mean(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Mean layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - :TODO: Test if this supports multidirectional (i.e., Numpy-style) broadcasting as required - """ - if len(node.input) < 2: - assert AttributeError('Less than 2 inputs for mean layer.') - - inputs = list() - for i, inp in enumerate(node.input): - input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i+1)) - inputs.append(input_) - layers[node_name] = keras.layers.Average(name=keras_name)(inputs) diff --git a/onnx2keras/layers.py b/onnx2keras/layers.py deleted file mode 100644 index 336e8255..00000000 --- a/onnx2keras/layers.py +++ /dev/null @@ -1,73 +0,0 @@ -from .convolution_layers import convert_conv, convert_convtranspose -from .activation_layers import convert_relu, convert_elu, convert_lrelu, convert_selu, \ - convert_sigmoid, convert_tanh, convert_softmax, convert_prelu -from .operation_layers import convert_clip, convert_exp, convert_reduce_sum, convert_reduce_mean, \ - convert_log, convert_pow, convert_sqrt, convert_split, convert_cast, convert_floor, convert_identity, \ - convert_argmax, convert_reduce_l2, convert_reduce_max -from .elementwise_layers import convert_elementwise_div, convert_elementwise_add, convert_elementwise_mul, convert_elementwise_sub, convert_max, convert_min, convert_mean -from .linear_layers import convert_gemm -from .reshape_layers import convert_transpose, convert_shape, convert_gather, convert_unsqueeze, \ - convert_concat, convert_reshape, convert_flatten, convert_slice, convert_squeeze, convert_expand -from .constant_layers import convert_constant -from .normalization_layers import convert_batchnorm, convert_instancenorm, convert_dropout, convert_lrn -from .pooling_layers import convert_avgpool, convert_maxpool, convert_global_avg_pool -from .padding_layers import convert_padding -from .upsampling_layers import convert_upsample - - -AVAILABLE_CONVERTERS = { - 'Conv': convert_conv, - 'ConvTranspose': convert_convtranspose, - 'Relu': convert_relu, - 'Elu': convert_elu, - 'LeakyRelu': convert_lrelu, - 'Sigmoid': convert_sigmoid, - 'Tanh': convert_tanh, - 'Selu': convert_selu, - 'Clip': convert_clip, - 'Exp': convert_exp, - 'Log': convert_log, - 'Softmax': convert_softmax, - 'PRelu': convert_prelu, - 'ReduceMax': convert_reduce_max, - 'ReduceSum': convert_reduce_sum, - 'ReduceMean': convert_reduce_mean, - 'Pow': convert_pow, - 'Slice': convert_slice, - 'Squeeze': convert_squeeze, - 'Expand': convert_expand, - 'Sqrt': convert_sqrt, - 'Split': convert_split, - 'Cast': convert_cast, - 'Floor': convert_floor, - 'Identity': convert_identity, - 'ArgMax': convert_argmax, - 'ReduceL2': convert_reduce_l2, - 'Max': convert_max, - 'Min': convert_min, - 'Mean': convert_mean, - 'Div': convert_elementwise_div, - 'Add': convert_elementwise_add, - 'Sum': convert_elementwise_add, - 'Mul': convert_elementwise_mul, - 'Sub': convert_elementwise_sub, - 'Gemm': convert_gemm, - 'MatMul': convert_gemm, - 'Transpose': convert_transpose, - 'Constant': convert_constant, - 'BatchNormalization': convert_batchnorm, - 'InstanceNormalization': convert_instancenorm, - 'Dropout': convert_dropout, - 'LRN': convert_lrn, - 'MaxPool': convert_maxpool, - 'AveragePool': convert_avgpool, - 'GlobalAveragePool': convert_global_avg_pool, - 'Shape': convert_shape, - 'Gather': convert_gather, - 'Unsqueeze': convert_unsqueeze, - 'Concat': convert_concat, - 'Reshape': convert_reshape, - 'Pad': convert_padding, - 'Flatten': convert_flatten, - 'Upsample': convert_upsample, -} diff --git a/onnx2keras/linear_layers.py b/onnx2keras/linear_layers.py deleted file mode 100644 index 5192e739..00000000 --- a/onnx2keras/linear_layers.py +++ /dev/null @@ -1,54 +0,0 @@ -from tensorflow import keras -import logging -from .utils import is_numpy - -def convert_gemm(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Linear / GEMM layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.gemm') - - # Check if Bias available - if len(node.input) == 3: - has_bias = True - keras_weights = [layers[node.input[1]], layers[node.input[2]]] - logger.debug('Convert GEMM with bias.') - elif len(node.input) == 2: - has_bias = False - keras_weights = [layers[node.input[1]]] - logger.debug('Convert GEMM without bias.') - else: - raise AttributeError('More than 3 or less than 2 inputs') - - # Linear can have additional flag to transpose weights - if 'transB' in params and params['transB'] == 1: - logger.debug('Transposing W matrix.') - keras_weights[0] = keras_weights[0].transpose() - - # Estimate input/output neurons - input_channels, output_channels = keras_weights[0].shape - logger.debug('Input units %s, output units %s.', input_channels, output_channels) - - if is_numpy(keras_weights[0]): - dense = keras.layers.Dense( - output_channels, - weights=keras_weights, name=keras_name, bias_initializer='zeros', kernel_initializer='zeros', use_bias=has_bias - ) - - # The first input - always X - try: - layers[node_name] = dense(layers[node.input[0]]) - except ValueError: - reshape = keras.layers.Reshape([input_channels], name=keras_name + '_reshape') - reshaped_x = reshape(layers[node.input[0]]) - layers[node_name] = dense(reshaped_x) - - else: - layers[node_name] = keras.layers.Multiply()([layers[node.input[0]], layers[node.input[1]]]) diff --git a/onnx2keras/operation_layers.py b/onnx2keras/operation_layers.py deleted file mode 100644 index 0bfabfef..00000000 --- a/onnx2keras/operation_layers.py +++ /dev/null @@ -1,409 +0,0 @@ -from tensorflow import keras -from tensorflow.keras import backend as K -import logging -from .utils import is_numpy, ensure_tf_type, ensure_numpy_type -import numpy as np - -# Handle python 2.7 import error -try: - from collections.abc import Iterable -except ImportError: - from collections import Iterable - - -def convert_clip(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert clip layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.clip') - if len(node.input) != 1: - assert AttributeError('More than 1 input for clip layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - if params['min'] == 0: - logger.debug("Using ReLU({0}) instead of clip".format(params['max'])) - layer = keras.layers.ReLU(max_value=params['max'], name=keras_name) - else: - def target_layer(x, vmin=params['min'], vmax=params['max']): - import tensorflow as tf - return tf.clip_by_value(x, vmin, vmax) - layer = keras.layers.Lambda(target_layer, name=keras_name) - lambda_func[keras_name] = target_layer - - layers[node_name] = layer(input_0) - - -def convert_log(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Log layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for log layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x): - import tensorflow.keras.backend as K - return K.log(x) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_exp(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Exp layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for log layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x): - import tensorflow.keras.backend as K - return K.exp(x) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_reduce_sum(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert reduce sum. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for reduce sum layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - axis = params['axes'] - - def target_layer(x, axis=axis): - import tensorflow.keras.backend as K - return K.sum(x, keepdims=True, axis=axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - layers[node_name].set_shape(layers[node_name].shape) - lambda_func[keras_name] = target_layer - - -def convert_reduce_mean(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert reduce mean. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for reduce mean layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x, axis=params['axes'], keepdims=params['keepdims']): - import tensorflow.keras.backend as K - return K.mean(x, keepdims=(keepdims == 1), axis=axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - layers[node_name].set_shape(layers[node_name].shape) - lambda_func[keras_name] = target_layer - - -def convert_reduce_max(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert reduce max. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for reduce max layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x, axis=params['axes'], keepdims=params['keepdims']): - import tensorflow.keras.backend as K - return K.max(x, keepdims=(keepdims == 1), axis=axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - layers[node_name].set_shape(layers[node_name].shape) - lambda_func[keras_name] = target_layer - - -def convert_pow(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Pow layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 2: - assert AttributeError('More than 2 inputs for pow layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - power = ensure_numpy_type(layers[node.input[1]]) - - def target_layer(x, a=power): - import tensorflow.keras.backend as K - return K.pow(x, a) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_sqrt(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Sqrt layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for sqrt layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x): - import tensorflow.keras.backend as K - return K.sqrt(x) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_split(node, params, layers, lambda_func, node_name, keras_names): - """ - Convert Split layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for split layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_names[0]) - splits = params["split"] - axis = params.get("axis", 0) - if not isinstance(splits, Iterable): - # This might not work if `split` is a tensor. - chunk_size = K.int_size(input_0)[axis] // splits - splits = (chunk_size,) * splits - - cur = 0 - for i, split in enumerate(splits): - node_name = params['_outputs'][i] - - def target_layer(x, axis=axis, start_i=cur, end_i=cur+split): - slices = [slice(None, None)] * len(K.int_shape(x)) - slices[axis] = slice(start_i, end_i) - return x[tuple(slices)] - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_names[i]) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_names[i]] = target_layer - cur += split - - -def convert_cast(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Cast layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.cast') - - if len(node.input) != 1: - assert AttributeError('More than 1 input for cast layer.') - - if is_numpy(layers[node.input[0]]): - logger.debug('Cast numpy array') - - cast_map = { - 1: np.float32, - 2: np.uint8, - 3: np.int8, - 5: np.int16, - 6: np.int32, - 7: np.int64, - 9: np.bool, - 10: np.float16, - 11: np.double, - } - - layers[node_name] = cast_map[params['to']](layers[node.input[0]]) - else: - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x, dtype=params['to']): - import tensorflow as tf - cast_map = { - 1: tf.float32, - 2: tf.uint8, - 3: tf.int8, - 5: tf.int16, - 6: tf.int32, - 7: tf.int64, - 9: tf.bool, - 10: tf.float16, - 11: tf.double, - } - return tf.cast(x, cast_map[dtype]) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_floor(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Floor layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for floor layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x): - # Floor is absent in keras.backend - import tensorflow as tf - return tf.floor(x) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_identity(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Identity layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for itentity layer.') - - layers[node_name] = layers[node.input[0]] - - -def convert_argmax(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert ArgMax layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for argmax layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - axis = params.get("axis", -1) - - def target_layer(x, axis=axis): - import tensorflow as tf - return tf.argmax(x, axis=axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_reduce_l2(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert ReduceL2 layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for reduce_l2 layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - axis = params.get("axes", [-1]) - keepdims = params.get("keepdims", 0) - - def target_layer(x, axis=axis, keepdims=keepdims): - import tensorflow as tf - return tf.norm(x, axis=axis, keepdims=keepdims == 1) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer diff --git a/onnx2keras/padding_layers.py b/onnx2keras/padding_layers.py deleted file mode 100644 index d8734064..00000000 --- a/onnx2keras/padding_layers.py +++ /dev/null @@ -1,77 +0,0 @@ -from tensorflow import keras -import logging -from .utils import ensure_tf_type - - -def convert_padding(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Constant layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - # It's binary by-default - logger = logging.getLogger("onnx2keras.padding") - params['mode'] = params['mode'].decode('ascii') - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - if 'pads' in params: - pads = params['pads'] - else: - pads = layers[node.input[1]] - - print(pads) - - if params['mode'] == 'constant': - - if 'value' in params and params['value'] != 0.0: - raise AssertionError('Cannot convert non-zero padding') - - # Magic ordering - if len(pads) == 8: - padding_layer = keras.layers.ZeroPadding2D( - padding=((pads[2], pads[6]), (pads[3], pads[7])), - name=keras_name - ) - else: - logger.warning("Caution - no test yet") - padding_layer = keras.layers.ZeroPadding3D( - padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])), - name=keras_name - ) - layers[node_name] = padding_layer(input_0) - elif params['mode'] == 'reflect': - - def target_layer(x, pads=pads): - import tensorflow as tf - if len(pads) == 8: - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') - else: - logger.warning("Caution - no test yet") - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - elif params['mode'] == 'edge': - - def target_layer(x, pads=pads): - import tensorflow as tf - if len(pads) == 8: # TODO not tested yet - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'SYMMETRIC') - else: - logger.warning("Caution - no test yet") - layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC') - return layer - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - else: - raise AttributeError('Unknown padding') diff --git a/onnx2keras/pooling_layers.py b/onnx2keras/pooling_layers.py deleted file mode 100644 index 963032d9..00000000 --- a/onnx2keras/pooling_layers.py +++ /dev/null @@ -1,162 +0,0 @@ -from tensorflow import keras -import logging -from .utils import ensure_tf_type - - -def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert MaxPooling layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.maxpool') - - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - kernel_shape = params['kernel_shape'] - stride_shape = params['strides'] - - pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] - pad = 'valid' - - if all([shape % 2 == 1 for shape in kernel_shape]) and \ - all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ - all([shape == 1 for shape in stride_shape]): - pad = 'same' - logger.debug('Use `same` padding parameters.') - else: - logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') - padding_name = keras_name + '_pad' - if len(kernel_shape) == 2: - padding = None - - if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): - padding = (pads[0], pads[1]) - elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): - padding = ((pads[0], pads[2]), (pads[1], pads[3])) - - if padding is not None: - padding_layer = keras.layers.ZeroPadding2D( - padding=padding, - name=padding_name - ) - layers[padding_name] = input_0 = padding_layer(input_0) - else: # 3D padding - padding_layer = keras.layers.ZeroPadding3D( - padding=pads[:len(stride_shape)], - name=padding_name - ) - layers[padding_name] = input_0 = padding_layer(input_0) - if len(kernel_shape) == 2: - pooling = keras.layers.MaxPooling2D( - pool_size=kernel_shape, - strides=stride_shape, - padding=pad, - name=keras_name, - data_format='channels_first' - ) - else: - pooling = keras.layers.MaxPooling3D( - pool_size=kernel_shape, - strides=stride_shape, - padding=pad, - name=keras_name, - data_format='channels_first' - ) - - layers[node_name] = pooling(input_0) - - -def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert AvgPooling layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.avgpool') - - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - kernel_shape = params['kernel_shape'] - stride_shape = params['strides'] - - pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] - pad = 'valid' - - if all([shape % 2 == 1 for shape in kernel_shape]) and \ - all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ - all([shape == 1 for shape in stride_shape]): - pad = 'same' - logger.debug('Use `same` padding parameters.') - else: - logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') - padding_name = keras_name + '_pad' - if len(kernel_shape) == 2: - padding_layer = keras.layers.ZeroPadding2D( - padding=pads[:len(stride_shape)], - name=padding_name - ) - else: # 3D padding - padding_layer = keras.layers.ZeroPadding3D( - padding=pads[:len(stride_shape)], - name=padding_name - ) - layers[padding_name] = input_0 = padding_layer(input_0) - if len(kernel_shape) == 2: - pooling = keras.layers.AveragePooling2D( - pool_size=kernel_shape, - strides=stride_shape, - padding=pad, - name=keras_name, - data_format='channels_first' - ) - else: - pooling = keras.layers.AveragePooling3D( - pool_size=kernel_shape, - strides=stride_shape, - padding=pad, - name=keras_name, - data_format='channels_first' - ) - layers[node_name] = pooling(input_0) - - -def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert GlobalAvgPool layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.global_avg_pool') - - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', name=keras_name) - input_0 = global_pool(input_0) - - def target_layer(x): - from tensorflow import keras - return keras.backend.expand_dims(x) - - logger.debug('Now expand dimensions twice.') - lambda_layer1 = keras.layers.Lambda(target_layer, name=keras_name + '_EXPAND1') - lambda_layer2 = keras.layers.Lambda(target_layer, name=keras_name + '_EXPAND2') - input_0 = lambda_layer1(input_0) # double expand dims - layers[node_name] = lambda_layer2(input_0) - lambda_func[keras_name + '_EXPAND1'] = target_layer - lambda_func[keras_name + '_EXPAND2'] = target_layer diff --git a/onnx2keras/reshape_layers.py b/onnx2keras/reshape_layers.py deleted file mode 100644 index edc7514b..00000000 --- a/onnx2keras/reshape_layers.py +++ /dev/null @@ -1,423 +0,0 @@ -from tensorflow import keras -import numpy as np -import logging -from .utils import is_numpy, ensure_tf_type, ensure_numpy_type - - -def convert_transpose(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert transpose. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.transpose') - input_name = node.input[0] - - if params['perm'][0] != 0: - logger.warning('Can\'t permute batch dimension. Result may be wrong.') - if is_numpy(layers[input_name]): - logger.warning('Transposing numpy array.') - layers[node_name] = np.transpose(layers[input_name], axes=params['perm']) - else: - raise NotImplementedError('Can\'t modify this type of data') - else: - permute = keras.layers.Permute(params['perm'][1:], name=keras_name) - layers[node_name] = permute(layers[input_name]) - - -def convert_shape(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert shape. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.shape') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - logger.debug('Actual shape:') - logger.debug(np.array(input_0.shape)) - - shapes = [] - for i in input_0.shape: - if i is not None: - shapes.append(i) - else: - shapes.append(None) - - layers[node_name] = np.array(shapes) - - -def convert_gather(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert gather. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.gather') - - if is_numpy(layers[node.input[0]]) and is_numpy(layers[node.input[1]]): - logger.debug('Gather from numpy array') - - if params['axis'] == 0: - layers[node_name] = np.array(layers[node.input[0]][layers[node.input[1]]]) - elif params['axis'] == 1: - layers[node_name] = np.array(layers[:, node.input[0]][layers[node.input[1]]]) - elif params['axis'] == 2: - layers[node_name] = np.array(layers[:, :, node.input[0]][layers[node.input[1]]]) - elif params['axis'] == 3: - layers[node_name] = np.array(layers[:, :, :, node.input[0]][layers[node.input[1]]]) - else: - raise AttributeError('Can\'t gather by axis more than 3.') - else: - raise AttributeError('Can\'t gather from tf tensor.') - - -def convert_concat(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert concat. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.concat') - - layer_input = [layers[node.input[i]] for i in range(len(node.input))] - - if all([is_numpy(layers[node.input[i]]) for i in range(len(node.input))]): - logger.debug('Concat numpy arrays.') - layers[node_name] = np.concatenate(layer_input, axis=params['axis']) - else: - logger.debug('Concat Keras layers.') - if len(layer_input) > 1: - try: - layers[node_name] = keras.layers.concatenate(inputs=layer_input, - axis=params['axis'], - name=keras_name) - except: - logger.warning('!!! IMPORTANT INFORMATION !!!') - logger.warning('Something goes wrong with concat layers. Will use TF fallback.') - logger.warning('---') - - def target_layer(x, axis=params['axis']): - import tensorflow as tf - x = tf.concat(x, axis=axis) - return x - - lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) - layers[node_name] = lambda_layer(layer_input) - lambda_func["%s_CHW" % keras_name] = target_layer - else: - layers[node_name] = layer_input[0] - - -def convert_reshape(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert reshape. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.reshape') - - input_0 = layers[node.input[0]] - input_1 = layers[node.input[1]] - - if is_numpy(input_1): - logger.debug('The second argument is numpy array.') - if is_numpy(input_0): - logger.debug('The first argument is numpy array. Apply np.reshape.') - layers[node_name] = np.reshape(input_0, np.int32(input_1)) - else: - if params['change_ordering']: - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - # Fix critical issue with NHWC - if input_1[0] is None and input_1[1] == -1: - logger.warning('!!! IMPORTANT INFORMATION !!!') - logger.warning('The target shape if [None, -1] that means flatten.') - logger.warning('But the target ordering is NHWC, so we cant simply perform flatten') - logger.warning('The layer will be converted as lambda with tf.transpose') - logger.warning('---') - - def target_layer(x): - import tensorflow as tf - x = tf.transpose(x, [0, 3, 1, 2]) - return x - - lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - else: - layers[node_name] = input_0 - - reshape = keras.layers.Reshape(np.int32(input_1[1:]), name=keras_name) - layers[node_name] = reshape(layers[node_name]) - - else: - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - logger.debug('The first argument is Keras/tf layer. Apply keras.Reshape.') - logger.debug('Target shape :') - logger.debug(np.int32(input_1[1:])) - - if len(np.int32(input_1[1:])) == 1 and np.int32(input_1[1:])[0] == -1: - logger.debug('The first argument is Keras/tf layer. Apply keras.Flatten.') - flatten = keras.layers.Flatten(name=keras_name) - layers[node_name] = flatten(input_0) - else: - reshape = keras.layers.Reshape(np.int32(input_1[1:]), name=keras_name) - layers[node_name] = reshape(input_0) - else: - raise AttributeError('Can\'t reshape dynamic size.') - - -def convert_unsqueeze(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert unsqueeze. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.unsqueeze') - - if len(node.input) != 1: - raise AttributeError('Number of inputs is not equal 1 for unsqueeze layer') - - if is_numpy(layers[node.input[0]]): - logger.debug('Work with numpy types.') - layers[node_name] = layers[node.input[0]] - for axis in params['axes']: - layers[node_name] = np.expand_dims(layers[node_name], axis) - else: - - if len(params['axes']) != 1: - raise AttributeError('Number of axes is not equal 1. Cannot unsqueeze') - - # if params['axes'][0] != 0: - # raise AttributeError('Axes is not 0. Cannot unsqueeze') - - def target_layer(x, axis=params['axes'][0]): - from tensorflow import keras - return keras.backend.expand_dims(x, axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(layers[node.input[0]]) - lambda_func[keras_name] = target_layer - - -def convert_flatten(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert flatten. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.flatten') - - if len(node.input) != 1: - raise AttributeError('Number of inputs is not equal 1 for flatten layer') - - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - - if params['change_ordering']: - # Fix critical issue with flatten - def target_layer(x): - import tensorflow as tf - x = tf.transpose(x, [0, 3, 1, 2]) - return x - - lambda_layer = keras.layers.Lambda(target_layer, name="%s_CHW" % keras_name) - tensor_chw = lambda_layer(input_0) - flatten = keras.layers.Flatten(name=keras_name) - layers[node_name] = flatten(tensor_chw) - lambda_func["%s_CHW" % keras_name] = target_layer - else: - reshape = keras.layers.Reshape([-1], name=keras_name) - layers[node_name] = reshape(input_0) - - -def convert_slice(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert slice. - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - logger = logging.getLogger('onnx2keras.slice') - - if is_numpy(layers[node.input[0]]): - if params['change_ordering']: - raise NotImplementedError("change_ordering for Slice is not implemented") - logger.debug('Slice numpy constants') - if 'axes' in params: - if len(params["axes"]) != 1: - raise NotImplementedError("Multiple axes in Slice is not implemented") - axes = params["axes"][0] - ends = params["ends"][0] - starts = params["starts"][0] - else: - raise AttributeError('Not implemented') - - if axes == 0: - layers[node_name] = layers[node.input[0]][starts:ends] - elif axes == 1: - layers[node_name] = layers[node.input[0]][:, starts:ends] - elif axes == 2: - layers[node_name] = layers[node.input[0]][:, :, starts:ends] - elif axes == 3: - layers[node_name] = layers[node.input[0]][:, :, :, starts:ends] - else: - raise AttributeError('Not implemented') - else: - logger.debug('Convert inputs to Keras/TF layers if needed.') - input_0 = ensure_tf_type(layers[node.input[0]], layers[list(layers)[0]], name="%s_const" % keras_name) - layers[node_name] = input_0 - - if 'axes' in params: - if len(params["axes"]) != 1: - raise NotImplementedError("Multiple axes in Slice is not implemented") - axes = params["axes"][0] - ends = params["ends"][0] - starts = params["starts"][0] - else: - starts = ensure_numpy_type(layers[node.input[1]]) - ends = ensure_numpy_type(layers[node.input[2]]) - axes = ensure_numpy_type(layers[node.input[3]]) - - for i in range(len(starts)): - if axes[i] != i: - assert AttributeError('Cant slice permuted axes') - - if isinstance(axes, list) or isinstance(axes, np.ndarray): - if params['change_ordering']: - raise NotImplementedError("change_ordering for Slice is not implemented") - - def target_layer(x, axes=np.array(axes), starts=starts, ends=ends): - import tensorflow as tf - rank = max(axes) - s = [0 for _ in range(rank+1)] - e = [0 for _ in range(rank+1)] - mask = 0xff - for _s, _e, axis in zip(starts, ends, axes): - s[axis] = _s - e[axis] = _e - mask = mask ^ (0x1 << axis) - return tf.strided_slice(x, s, e, begin_mask=mask, end_mask=mask) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - else: - def target_layer(x, axis=axes, starts=starts, ends=ends): - import tensorflow as tf - rank = axis - s = [0 for _ in range(rank+1)] - e = [0 for _ in range(rank+1)] - mask = 0xff - s[axis] = starts - e[axis] = ends - mask = mask ^ (0x1 << axis) - return tf.strided_slice(x, s, e, begin_mask=mask, end_mask=mask) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_squeeze(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Squeeze layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 1: - assert AttributeError('More than 1 input for squeeze layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - - def target_layer(x, axis=params['axes'][0]): - from tensorflow import keras - return keras.backend.squeeze(x, axis) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer - - -def convert_expand(node, params, layers, lambda_func, node_name, keras_name): - """ - Convert Expand layer - :param node: current operation node - :param params: operation attributes - :param layers: available keras layers - :param lambda_func: function for keras Lambda layer - :param node_name: internal converter name - :param keras_name: resulting layer name - :return: None - """ - if len(node.input) != 2: - assert AttributeError('More than 2 input for expand layer.') - - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) - input_1 = ensure_numpy_type(layers[node.input[1]]) - - def target_layer(x, shape=input_1): - from tensorflow import keras - - # if (len(x.shape) == len(shape)): - # for axis, new_shape in enumerate(shape): - # if axis == 0: - # continue - # x = keras.backend.repeat_elements(x, int(new_shape // x.shape[axis]), axis) - # pass - - x = keras.backend.repeat_elements(x, int(shape[1] // x.shape[1]), 1) - x = keras.backend.repeat_elements(x, int(shape[2] // x.shape[2]), 2) - return x - - # Proper version - # return tf.broadcast_to(x, (1, *shape[1:])) - - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) - layers[node_name] = lambda_layer(input_0) - lambda_func[keras_name] = target_layer diff --git a/onnx2keras/utils.py b/onnx2keras/utils.py deleted file mode 100644 index bc4dca2b..00000000 --- a/onnx2keras/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -import numpy as np -from tensorflow import keras - - -def is_numpy(obj): - """ - Check of the type is instance of numpy array - :param obj: object to check - :return: True if the object is numpy-type array. - """ - return isinstance(obj, (np.ndarray, np.generic)) - - -def ensure_numpy_type(obj): - """ - Raise exception if it's not a numpy - :param obj: object to check - :return: numpy object - """ - if is_numpy(obj): - return obj - else: - raise AttributeError('Not a numpy type.') - - -def ensure_tf_type(obj, fake_input_layer=None, name=None): - """ - Convert to Keras Constant if needed - :param obj: numpy / tf type - :param fake_input_layer: fake input layer to add constant - :return: tf type - """ - if is_numpy(obj): - if obj.dtype == np.int64: - obj = np.int32(obj) - - def target_layer(_, inp=obj, dtype=obj.dtype.name): - import numpy as np - import tensorflow as tf - if not isinstance(inp, (np.ndarray, np.generic)): - inp = np.array(inp, dtype=dtype) - return tf.constant(inp, dtype=inp.dtype) - - lambda_layer = keras.layers.Lambda(target_layer, name=name) - return lambda_layer(fake_input_layer) - else: - return obj - - -def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False): - """ - Check difference between Torch and Keras models - :param model: torch model - :param k_model: keras model - :param input_np: input data as numpy array or list of numpy array - :param epsilon: allowed difference - :param change_ordering: change ordering for keras input - :return: actual difference - """ - from torch.autograd import Variable - import torch - - initial_keras_image_format = keras.backend.image_data_format() - - if isinstance(input_np, np.ndarray): - input_np = [input_np.astype(np.float32)] - - - input_var = [Variable(torch.FloatTensor(i)) for i in input_np] - pytorch_output = model(*input_var) - if not isinstance(pytorch_output, tuple): - pytorch_output = [pytorch_output.data.numpy()] - else: - pytorch_output = [p.data.numpy() for p in pytorch_output] - - if change_ordering: - # change image data format - - # to proper work with Lambda layers that transpose weights based on image_data_format - keras.backend.set_image_data_format("channels_last") - - _input_np = [] - for i in input_np: - axes = list(range(len(i.shape))) - axes = axes[0:1] + axes[2:] + axes[1:2] - _input_np.append(np.transpose(i, axes)) - input_np = _input_np - - # run keras model - keras_output = k_model.predict(input_np) - if not isinstance(keras_output, list): - keras_output = [keras_output] - - # change image data format if output shapes are different (e.g. the same for global_avgpool2d) - _koutput = [] - for i, k in enumerate(keras_output): - if k.shape != pytorch_output[i].shape: - axes = list(range(len(k.shape))) - axes = axes[0:1] + axes[-1:] + axes[1:-1] - k = np.transpose(k, axes) - _koutput.append(k) - keras_output = _koutput - else: - keras.backend.set_image_data_format("channels_first") - keras_output = k_model.predict(input_np) - if not isinstance(keras_output, list): - keras_output = [keras_output] - - # reset to previous image_data_format - keras.backend.set_image_data_format(initial_keras_image_format) - - max_error = 0 - for p, k in zip(pytorch_output, keras_output): - error = np.max(np.abs(p - k)) - np.testing.assert_allclose(p, k, atol=epsilon, rtol=0.0) - if error > max_error: - max_error = error - - return max_error diff --git a/onnx2kerastl/__init__.py b/onnx2kerastl/__init__.py new file mode 100644 index 00000000..4ca8f5c2 --- /dev/null +++ b/onnx2kerastl/__init__.py @@ -0,0 +1,3 @@ +from .converter import onnx_to_keras + +__all__ = ['onnx_to_keras'] diff --git a/onnx2kerastl/activation_layers.py b/onnx2kerastl/activation_layers.py new file mode 100644 index 00000000..a1c7f6a0 --- /dev/null +++ b/onnx2kerastl/activation_layers.py @@ -0,0 +1,318 @@ +import logging + +import keras +import tensorflow as tf + +from .utils import ensure_tf_type +from .tfops_funcs import tf_multiply, tf_add, tf_clip_by_value, tf_math_erf, tf_math_tanh, tf_math_softplus + + +def convert_relu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ReLU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + relu = keras.layers.Activation('relu', name=f"{params['cleaned_name']}_relu") + layers[node_name] = relu(input_0) + + +def convert_elu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + alpha = params.get('alpha', keras.layers.ELU.__init__.__defaults__[0]) + elu = keras.layers.ELU(alpha=alpha, name=f"{params['cleaned_name']}_elu") + layers[node_name] = elu(input_0) + + +def convert_lrelu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert LeakyReLU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + alpha = params.get('alpha', keras.layers.LeakyReLU.__init__.__defaults__[0]) + leakyrelu = keras.layers.LeakyReLU(alpha=alpha, name=f"{params['cleaned_name']}_leakyrelu") + layers[node_name] = leakyrelu(input_0) + + +def convert_sigmoid(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Sigmoid activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + sigmoid = keras.layers.Activation('sigmoid', name=f"{params['cleaned_name']}_sigmoid") + layers[node_name] = sigmoid(input_0) + + +def convert_tanh(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Tanh activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + tanh = keras.layers.Activation('tanh', name=f"{params['cleaned_name']}_tanh") + layers[node_name] = tanh(input_0) + + +def convert_selu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + selu = keras.layers.Activation('selu', name=f"{params['cleaned_name']}_selu") + layers[node_name] = selu(input_0) + + +def convert_soft_plus(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf.keras.activations.softplus(input_0) + + +def convert_soft_sign(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf.keras.activations.softsign(input_0) + + +def convert_mish(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = input_0 * tf_math_tanh(tf_math_softplus(input_0)) + + +def convert_hard_swish(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + alpha = 1 / 6 + beta = 0.5 + hard_sigmoid = max(0, min(1, alpha * input_0 + beta)) + hard_swish = input_0 * hard_sigmoid + layers[node_name] = hard_swish + + +def convert_gelu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert SELU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf.keras.activations.gelu(input_0) + + +def convert_softmax(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert softmax activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + axis = params.get('axis', keras.layers.Softmax.__init__.__defaults__[0]) + softmax_layer = keras.layers.Softmax(axis=axis, name=f"{params['cleaned_name']}_softmax") + layers[node_name] = softmax_layer(input_0) + layers[node_name].set_shape(layers[node_name].shape) + + +def convert_prelu(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert PReLU activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.prelu') + + if len(node.input) != 2: + assert AttributeError('Activation layer PReLU should have 2 inputs.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + W = layers[node.input[1]] + + if params['change_ordering']: + logger.warning('PRelu + change ordering needs to be fixed after TF graph is built.') + logger.warning('It\'s experimental.') + + shared_axes = [2, 3] + + # for case when W.shape (n,). When activation is used for single dimension vector. + shared_axes = shared_axes if len(W.shape) > 1 else None + + prelu = keras.layers.PReLU(weights=[W], shared_axes=shared_axes, name=f"{params['cleaned_name']}_prelu") + layers[node_name] = prelu(input_0) + + +def convert_hard_sigmoid(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Hard Sigmoid activation layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + alpha = params.get("alpha", 0.2) + beta = params.get("beta", 0.5) + # hard sigmoid logic + x = tf_multiply(input_0, alpha, tf_name=f"{params['cleaned_name']}_multiply") + x = tf_add(x, beta, tf_name=f"{params['cleaned_name']}_add") + x = tf_clip_by_value(x, 0., 1., tf_name=f"{params['cleaned_name']}_clip") + layers[node_name] = x + + +def convert_erf(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ERF math operation + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for an activation layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf_math_erf(input_0, tf_name=f"{params['cleaned_name']}_erf") diff --git a/onnx2kerastl/caffe2_layers.py b/onnx2kerastl/caffe2_layers.py new file mode 100644 index 00000000..2c5521ab --- /dev/null +++ b/onnx2kerastl/caffe2_layers.py @@ -0,0 +1,32 @@ +from .upsampling_layers import convert_upsample + + +def convert_alias_with_name(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Constant layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + layers[node_name] = layers[node.input[0]] + + +def convert_resize_nearest(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Constant layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + params['scales'] = (params['height_scale'], params['width_scale']) + params['mode'] = 'nearest'.encode('utf-8') + convert_upsample(node, params, layers, lambda_func, node_name, keras_name) + diff --git a/onnx2kerastl/constant_layers.py b/onnx2kerastl/constant_layers.py new file mode 100644 index 00000000..cdf5bdc8 --- /dev/null +++ b/onnx2kerastl/constant_layers.py @@ -0,0 +1,55 @@ +import numpy as np +import tensorflow as tf +from .utils import is_numpy +from .tfops_funcs import tf_cast, tf_one_hot +from keras import backend as K + +def convert_constant(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Constant layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + layers[node_name] = params['value'] + + +def convert_constant_of_shape(node, params, layers, lambda_func, node_name, keras_name): + value = params.get('value') + if value is None: + raise NotImplementedError("ConstantOfShape should have a value param") + + input_0 = layers[node.input[0]] + + if not is_numpy(input_0) and not isinstance(input_0, list) and K.is_keras_tensor(input_0): + # Boolean case + if value.dtype == np.bool_: + layers[node_name] = tf.fill(layers[node.input[0]], tf.constant(value.item(), dtype=tf.bool)) + else: + # Non-boolean case + layers[node_name] = tf.ones(layers[node.input[0]], dtype=tf.as_dtype(value.dtype)) * value + else: + # Handle numpy inputs or non-Keras tensors + if value.dtype == np.bool_: + layers[node_name] = np.full(layers[node.input[0]], value.item(), dtype=bool) + else: + layers[node_name] = np.ones(layers[node.input[0]], dtype=value.dtype) * value + + + +def convert_one_hot(node, params, layers, lambda_func, node_name, keras_name): + axis = params.get('axis', -1) + layers[node_name] = tf_one_hot(indices=tf_cast(layers[node.input[0]], + tf.int64, + tf_name=f"{params['cleaned_name']}_onehot_cast"), + depth=int(layers[node.input[1]]), + off_value=layers[node.input[2]][0], + on_value=layers[node.input[2]][1], + axis=axis, + tf_name=f"{params['cleaned_name']}_onehot" + ) + diff --git a/onnx2kerastl/convert_model.py b/onnx2kerastl/convert_model.py new file mode 100644 index 00000000..a012cd6d --- /dev/null +++ b/onnx2kerastl/convert_model.py @@ -0,0 +1,37 @@ +import argparse +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +import tensorflow as tf +from keras_data_format_converter import convert_channels_first_to_last + +def convert_onnx_to_keras(onnx_model_path, transform_io:bool = True): + # Load ONNX model + save_model_path = onnx_model_path.replace('.onnx', '.h5') + onnx_model = onnx.load(onnx_model_path) + + # Extract input feature names from the model + input_features = [inp.name for inp in onnx_model.graph.input] + + # Convert ONNX model to Keras + keras_model = onnx_to_keras(onnx_model, input_names=input_features, + name_policy='attach_weights_name', allow_partial_compilation=False).converted_model + + # Convert from channels-first to channels-last format + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=transform_io, + verbose=True) + + # Save the final Keras model + final_model.save(save_model_path) + print(f"Model saved to {save_model_path}") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert ONNX model to Keras') + parser.add_argument('onnx_path', type=str, help='Path to the input ONNX model') + parser.add_argument('transform_input_output', type=bool, help='Whether to transform input and output data format') + args = parser.parse_args() + + # Convert input_shape string to tuple of integers + + convert_onnx_to_keras(args.onnx_path, args.transform_input_output) \ No newline at end of file diff --git a/onnx2kerastl/converter.py b/onnx2kerastl/converter.py new file mode 100644 index 00000000..346adf40 --- /dev/null +++ b/onnx2kerastl/converter.py @@ -0,0 +1,432 @@ +""" +The ONNX to keras converter module +""" +import importlib.util +import inspect +import logging +import uuid +from dataclasses import dataclass +from typing import Optional + +import keras +import keras.backend +import tensorflow as tf +from keras.models import Model + +from .customonnxlayer import onnx_custom_objects_map +from .exceptions import UnsupportedLayer, OnnxUnsupported +from .layers import AVAILABLE_CONVERTERS +import re + +onnx_imported = False +package_name = 'onnx' +spec = importlib.util.find_spec(package_name) +if spec is not None: + from onnx import numpy_helper + + onnx_imported = True + + +@dataclass +class ConvertedResponse: + converted_model: Model + error_info: Optional[str] = None + + +def onnx_node_attributes_to_dict(args): + """ + Parse ONNX attributes to Python dictionary + :param args: ONNX attributes object + :return: Python dictionary + """ + + def onnx_attribute_to_dict(onnx_attr): + """ + Parse ONNX attribute + :param onnx_attr: ONNX attribute + :return: Python data type + """ + if onnx_attr.HasField('t'): + return numpy_helper.to_array(getattr(onnx_attr, 't')) + + for attr_type in ['f', 'i', 's']: + if onnx_attr.HasField(attr_type): + return getattr(onnx_attr, attr_type) + + for attr_type in ['floats', 'ints', 'strings']: + if getattr(onnx_attr, attr_type): + return list(getattr(onnx_attr, attr_type)) + + return {arg.name: onnx_attribute_to_dict(arg) for arg in args} + + +def flatten_onnx_nodes(onnx_nodes): + onnx_list = [] + for node in onnx_nodes: + if node.op_type == "If": + onnx_list += flatten_onnx_nodes(node.attribute[0].g.node) + onnx_list += flatten_onnx_nodes(node.attribute[1].g.node) + onnx_list.append(node) + return onnx_list + + +def onnx_to_keras(onnx_model, input_names, name_policy=None, verbose=True, change_ordering=False, input_types=None, + allow_partial_compilation=True) \ + -> ConvertedResponse: + """ + Convert ONNX graph to Keras model format + :param onnx_model: loaded ONNX model + :param input_names: list with input names + :param name_policy: override layer names. None, "short", "renumerate" or "attach_weights_name" (last 2 are experimental) + :param verbose: verbose output + :param change_ordering: change ordering to HWC (experimental) + :return: Keras model + """ + if not onnx_imported: + raise OnnxUnsupported() + # Use channels first format by default. + keras_fmt = keras.backend.image_data_format() + keras.backend.set_image_data_format('channels_first') + + if verbose: + logging.basicConfig(level=logging.DEBUG) + + logger = logging.getLogger('onnx2keras') + + logger.info('Converter is called.') + + onnx_weights = onnx_model.graph.initializer + onnx_inputs = onnx_model.graph.input + onnx_outputs = [i.name for i in onnx_model.graph.output] + onnx_nodes = flatten_onnx_nodes(onnx_model.graph.node) # Pulls graph out of If nodes + logger.debug('List inputs:') + for i, input in enumerate(onnx_inputs): + logger.debug('Input {0} -> {1}.'.format(i, input.name)) + + logger.debug('List outputs:') + for i, output in enumerate(onnx_outputs): + logger.debug('Output {0} -> {1}.'.format(i, output)) + + logger.debug('Gathering weights to dictionary.') + weights = {} + for onnx_w in onnx_weights: + try: + if len(onnx_w.ListFields()) < 4: + onnx_extracted_weights_name = onnx_w.ListFields()[1][1] + else: + onnx_extracted_weights_name = onnx_w.ListFields()[2][1] + weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) + except: + onnx_extracted_weights_name = onnx_w.ListFields()[3][1] + weights[onnx_extracted_weights_name] = numpy_helper.to_array(onnx_w) + + logger.debug('Found weight {0} with shape {1}.'.format( + onnx_extracted_weights_name, + weights[onnx_extracted_weights_name].shape)) + + layers = dict() + lambda_funcs = dict() + keras_outputs = [] + keras_inputs = [] + + for i, input_name in enumerate(input_names): + for onnx_i in onnx_inputs: + if onnx_i.name == input_name: + dtype = None if input_types is None else input_types[i] + input_shape = [i.dim_value for i in onnx_i.type.tensor_type.shape.dim] + input_shape = [shape if shape != 0 else None for shape in input_shape] + if len(input_shape) <= 1: + input_tensor = keras.layers.InputLayer(input_shape=input_shape, name=input_name, dtype=dtype).output + layers[input_name] = input_tensor[0] + keras_inputs.append(input_tensor) + + else: + batch_size = input_shape[0] + input_shape = input_shape[1:] + if batch_size is None: + layers[input_name] = keras.layers.InputLayer( + input_shape=input_shape, name=input_name, dtype=dtype).output + else: + layers[input_name] = keras.layers.InputLayer( + input_shape=input_shape, name=input_name, dtype=dtype, batch_size=batch_size).output + + keras_inputs.append(layers[input_name]) + + logger.debug('Found input {0} with shape {1}'.format(input_name, input_shape)) + + keras_middle_outputs = {} + error_info = None + try: + # Convert every operation separable + node_names = [] + embedding_weights_mapping = {} + for node_index, node in enumerate(onnx_nodes): + node_type = node.op_type + node_params = onnx_node_attributes_to_dict(node.attribute) + # Add global converter info: + node_params['change_ordering'] = change_ordering + node_params['name_policy'] = name_policy + + node_name = str(node.output[0]) + keras_names = [] + for output_index, output in enumerate(node.output): + if name_policy == 'short': + keras_name = keras_name_i = str(output)[:8] + suffix = 1 + while keras_name_i in node_names: + keras_name_i = keras_name + '_' + str(suffix) + suffix += 1 + keras_names.append(keras_name_i) + elif name_policy == 'renumerate': + postfix = node_index if len(node.output) == 1 else "%s_%s" % (node_index, output_index) + keras_names.append('LAYER_%s' % postfix) + elif name_policy == 'attach_weights_name': + attached_weights_names = [] + for node_input in node.input: + if node_input in weights: + weight_name = ".".join(node_input.split(".")[:-1]) + attached_weights_names.append(weight_name) + set_weights_names = set(attached_weights_names) + set_weights_names = "__".join(set_weights_names) + layer_name = output.replace(":", "_") + while not (str.isalpha(layer_name[0]) or str.isdigit(layer_name[0]) or layer_name[0] == "."): + layer_name = layer_name[1:] + + if layer_name == "": + layer_name = str(uuid.uuid4())[:10] + + if set_weights_names: + layer_name = f"{layer_name}__{set_weights_names}" + + keras_names.append(layer_name) + else: + output = output.replace(":", "_") + keras_names.append(output) + keras_names = [k.lstrip("/") for k in keras_names] + if len(node.output) != 1: + logger.warning('Trying to convert multi-output node') + node_params['_outputs'] = list(node.output) + node_names.extend(keras_names) + else: + keras_names = keras_names[0] + node_names.append(keras_names) + pattern = r'[#:@]' # Example pattern to match #, /, and : + cleaned_node_name = re.sub(pattern, '_', node.name.rstrip("/").lstrip("/")) + if len(cleaned_node_name) == 0: + cleaned_node_name = re.sub(pattern, '_', node_name.rstrip("/").lstrip("/")) + node_params['cleaned_name'] = f'{cleaned_node_name}_tl' + logger.debug('######') + logger.debug(f"{node_index/len(onnx_nodes):.1%} completed") + logger.debug('...') + logger.debug('Converting ONNX operation') + logger.debug('type: %s', node_type) + logger.debug('node_name: %s', node_name) + logger.debug('node_params: %s', node_params) + logger.debug('...') + + logger.debug('Check if all inputs are available:') + if len(node.input) == 0 and node_type != 'Constant': + raise AttributeError('Operation doesn\'t have an input. Aborting.') + for i, node_input in enumerate(node.input): + logger.debug('Check input %i (name %s).', i, node_input) + + # for case of weights sharing, map the shared weights to determine + # if a Gather layer is an embedding layer + if node_type == 'Identity' and node_input in weights: + embedding_weights_mapping[node_name] = node_input + + # check conditions for embedding layer + is_in_weights = node_input in weights # is this node input in weights + is_mapped_to_weights = embedding_weights_mapping.get(node_input, + '') in weights # is this node inputs weights are shared with other input + is_embedding = ( + is_in_weights or is_mapped_to_weights) and i == 0 # if either is true this layer is a possible embedding layer + + # if a layer is of type Gather and its input is in weights (or mapped to a weights input) + # it's an embedding layer + if node_type == "Gather" and is_embedding: + node_params['is_embedding'] = True + + if node_input not in layers: + logger.debug('The input not found in layers / model inputs.') + if node_input in weights: + logger.debug('Found in weights, add as a numpy constant.') + layers[node_input] = weights[node_input] + else: + if node_input == "" and node_type in ('Pad', 'Resize', 'Clip', 'LSTM', 'GRU'): + continue + else: + raise AttributeError('Current node is not in weights / model inputs / layers.') + else: + logger.debug('... found all, continue') + + keras.backend.set_image_data_format('channels_first') + try: + layer_converter_func = AVAILABLE_CONVERTERS[node_type] + except KeyError: + raise UnsupportedLayer(node_type) + layer_converter_func( + node, + node_params, + layers, + lambda_funcs, + node_name, + keras_names + ) + # remove node inputs + for inp in node.input: + keras_middle_outputs.pop(inp, None) + # add node to middle map + if tf.is_tensor(layers[node_name]): + keras_middle_outputs[node_name] = layers[node_name] + + if isinstance(keras_names, list): + keras_names = keras_names[0] + + try: + logger.debug('Output TF Layer -> ' + str(layers[keras_names])) + except KeyError: + pass + + # Check for terminal nodes + for layer in onnx_outputs: + if layer in layers: + keras_outputs.append(layers[layer]) + + model = keras.models.Model(inputs=keras_inputs, outputs=keras_outputs) + except Exception as e: + if not allow_partial_compilation: + raise + + if len(keras_middle_outputs) == 0: + raise e + + error_info = repr(e) + if isinstance(e, UnsupportedLayer): + error_info = e.layer_description + + keras_outputs = list(keras_middle_outputs.values()) + try: + model = keras.models.Model(inputs=keras_inputs, outputs=keras_outputs) + except: + raise e + + if change_ordering: + change_ord_axes_map = { + 3: 2, + 1: 3, + -1: 1 + } + + import numpy as np + conf = model.get_config() + + for layer in conf['layers']: + if layer['config'] and 'shared_axes' in layer['config']: + # TODO: check axes first (if it's not 4D tensor) + layer['config']['shared_axes'] = [1, 2] + + if layer['config'] and 'batch_input_shape' in layer['config']: + layer['config']['batch_input_shape'] = \ + tuple(np.reshape(np.array( + [ + [None] + + list(layer['config']['batch_input_shape'][2:][:]) + + [layer['config']['batch_input_shape'][1]] + ]), -1 + )) + if layer['config'] and 'target_shape' in layer['config']: + if len(list(layer['config']['target_shape'][1:][:])) > 0: + layer['config']['target_shape'] = \ + tuple(np.reshape(np.array( + list(layer['config']['target_shape'][1:]) + + [layer['config']['target_shape'][0]] + ), -1), ) + + if layer['config'] and 'data_format' in layer['config']: + layer['config']['data_format'] = 'channels_last' + if layer['config'] and 'axis' in layer['config']: + axis = layer['config']['axis'] + # BatchNorm wrap axis with ListWrapper instead single INT value + if isinstance(axis, (tuple, list)): + axis = axis[0] + layer['config']['axis'] = change_ord_axes_map.get(axis, layer['config']['axis']) + + for layer in conf['layers']: + if 'function' in layer['config'] and layer['config']['function'][1] is not None: + kerasf = list(layer['config']['function']) + dargs = list(kerasf[1]) + func = lambda_funcs.get(layer['name']) + + if func: + # ReduceSum operation has 'axis' param as array of ints. When onnx uses ReduceSum + # to reproduce SoftMax - dargs become something like [[1]] (list of lists) + # that why we handle collections.Iterable + if len(dargs) > 1 or isinstance(dargs[0], (tuple, list)): + params = inspect.signature(func).parameters + i = list(params.keys()).index('axes') if ('axes' in params) else -1 + + if i > 0: + i -= 1 + axes = list(range(len(dargs[i].shape))) + axes = axes[0:1] + axes[2:] + axes[1:2] + dargs[i] = np.transpose(dargs[i], axes) + + i = list(params.keys()).index('axis') if ('axis' in params) else -1 + + if i > 0: + i -= 1 + axis = np.array(dargs[i]) + axes_map = np.array([0, 3, 1, 2]) + # to list because some tf operations check only for core python types (e.g tf.norm) + dargs[i] = axes_map[axis].tolist() + else: + # if map exits will change else will remain the same + dargs[0] = change_ord_axes_map.get(dargs[0], dargs[0]) + + kerasf[1] = tuple(dargs) + layer['config']['function'] = tuple(kerasf) + + keras.backend.set_image_data_format('channels_last') + model_tf_ordering = keras.models.Model.from_config(conf, custom_objects=onnx_custom_objects_map) + + for dst_layer, src_layer, conf in zip(model_tf_ordering.layers, model.layers, conf['layers']): + W = src_layer.get_weights() + # TODO: check axes first (if it's not 4D tensor) + if conf['config'] and 'shared_axes' in conf['config']: + W[0] = W[0].transpose(1, 2, 0) + dst_layer.set_weights(W) + + model = model_tf_ordering + + keras.backend.set_image_data_format(keras_fmt) + + response = ConvertedResponse(model, error_info) + return response + + +def extract_op_node(node_graph, layers, lambda_funcs, keras_names, change_ordering, name_policy): + op_node = None + for node_i, node in enumerate(node_graph): + if node.op_type == 'Constant': + node_params = onnx_node_attributes_to_dict(node.attribute) + # Add global converter info: + node_params['change_ordering'] = change_ordering + node_params['name_policy'] = name_policy + node_name = str(node.output[0]) + + AVAILABLE_CONVERTERS[node.op_type]( + node, + node_params, + layers, + lambda_funcs, + node_name, + keras_names + ) + else: # op type + if op_node is not None: + raise NotImplementedError('Not Implemented: inner graph in If node with multiple operator nodes') + op_node = node + if op_node is None: + raise NotImplementedError('Something is off with If node') + return op_node diff --git a/onnx2kerastl/convolution_layers.py b/onnx2kerastl/convolution_layers.py new file mode 100644 index 00000000..82ec521e --- /dev/null +++ b/onnx2kerastl/convolution_layers.py @@ -0,0 +1,399 @@ +import logging +from functools import partial +from typing import List + +import keras +import numpy as np +import tensorflow as tf +from tensorflow.python.framework.ops import EagerTensor + +from .utils import ensure_tf_type, is_numpy +from .tfops_funcs import tf_transpose, tf_pad, tf_shape, tf_reshape + +def calculate_permute_values(n_dims: int, to_channel_first: bool) -> List[int]: + if to_channel_first: + return [n_dims - 1] + list(range(1, n_dims - 1)) + else: + return list(range(2, n_dims)) + [1] + + +def permute_wrap_conv_if_constant(partial_func, conv_input, is_constant, conv_channels, params): + if is_constant: + input_shape = tf_shape(conv_input, tf_name=f"{params['cleaned_name']}_conv_wrap_shape") + permuted = keras.layers.Permute(calculate_permute_values(len(input_shape), to_channel_first=False), + name=f"{params['cleaned_name']}_conv_wrap_permute_1")(conv_input) + conv_res = partial_func(data_format="channels_last")(permuted) + result = keras.layers.Permute(calculate_permute_values(len(input_shape), to_channel_first=True), + name=f"{params['cleaned_name']}_conv_wrap_permute_2")(conv_res) + else: + data_fmt = keras.backend.image_data_format() + conv = partial_func(data_format=data_fmt) + if data_fmt == 'channels_first': + channels_idx = 1 + else: + channels_idx = -1 + if conv_input.shape[channels_idx] is None: # This will not serialize well unless we reshape input + conv_input_shape = tf_shape(conv_input, tf_name=f"{params['cleaned_name']}_conv_wrap_shape_1") + conv_input = tf_reshape(conv_input, [*conv_input_shape[:channels_idx], conv_channels, + *conv_input_shape[channels_idx + 1:]], + tf_name=f"{params['cleaned_name']}_conv_wrap_reshape_2") + if conv_input.shape[-1] is None: + conv.build((None, conv_channels, *conv_input.shape[2:])) + result = conv(conv_input) + return result + + +def convert_conv(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert convolution layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.conv') + + if len(node.input) == 3: + logger.debug('Conv with bias') + # Has bias + has_bias = True + W = layers[node.input[1]] + bias = layers[node.input[2]] + + elif len(node.input) == 2: + logger.debug('Conv without bias') + has_bias = False + W = layers[node.input[1]] + bias = None + + else: + raise NotImplementedError('Not implemented') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + is_W_constant = is_numpy(W) or isinstance(W, EagerTensor) + n_groups = params['group'] if 'group' in params else 1 + dilation = params['dilations'][0] if 'dilations' in params else 1 + pads = params['pads'] if 'pads' in params else [0, 0, 0] + strides = params['strides'] if 'strides' in params else [1, 1, 1] + auto_pad = params.get('auto_pad',"".encode()).decode() + if "SAME" in auto_pad: + input_size = np.array(input_0.shape[2:]) #Assuming NCHW + if None in input_size: + raise Exception("Conv Layers currently does not currently support auto_pad with dynamic input shape") + else: + output_size = tf.math.ceil(input_size/np.array(strides)) + kernel_size = np.array(W.shape[2:]) + pads = np.maximum(0, (output_size - 1) * np.array(strides) + dilation * (kernel_size - 1) + 1 - input_size).astype(int) + pad_before = np.floor(pads/2).astype(int) + pad_after = pads-pad_before + if "LOWER" in auto_pad: + #SAME LOWER means you pad more before + pad_after, pad_before = pad_before, pad_after + pads = np.column_stack((pad_before, pad_after)).ravel() + if len(W.shape) == 5: # 3D conv + logger.debug('3D convolution') + if pads[0] > 0 or pads[1] > 0 or pads[2] > 0: + logger.debug('Paddings exist, add ZeroPadding layer') + padding_name = f"{params['cleaned_name']}_" + 'conv_pad' + padding_layer = keras.layers.ZeroPadding3D( + padding=(pads[0], pads[1], pads[2]), + name=padding_name + ) + layers[padding_name] = input_0 = padding_layer(input_0) + out_channels, channels_per_group, dimension, height, width = W.shape + W = W.transpose(2, 3, 4, 1, 0) + + if has_bias: + weights = [W, bias] + else: + weights = [W] + conv_args = {"filters": out_channels, + "kernel_size": (dimension, height, width), + "strides": (strides[0], strides[1], strides[2]), + "padding": 'valid', + "weights": weights, + "use_bias": has_bias, + "activation": None, + "dilation_rate": dilation, + "name": f"{params['cleaned_name']}_" + 'conv', + "groups": n_groups} + partial_conv = partial(keras.layers.Conv3D, **conv_args) + layers[node_name] = permute_wrap_conv_if_constant(partial_conv, input_0, is_constant, weights[0].shape[-2]*n_groups, params) + + elif len(W.shape) == 4: # 2D conv + logger.debug('2D convolution') + + padding = None + if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): + padding = (pads[0], pads[1]) + elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): + padding = ((pads[0], pads[2]), (pads[1], pads[3])) + + if padding: + logger.debug('Paddings exist, add ZeroPadding layer') + padding_name = f"{params['cleaned_name']}_" + 'conv_pad_1' + padding_layer = keras.layers.ZeroPadding2D( + padding=padding, + name=padding_name, + data_format='channels_first' + ) + layers[padding_name] = input_0 = padding_layer(input_0) + + W = W.transpose(2, 3, 1, 0) if is_W_constant else tf.transpose(W, [2, 3, 1, 0]) + height, width, channels_per_group, out_channels = W.shape + + if has_bias: + weights = [W, bias] + else: + weights = [W] + if is_W_constant: + conv_args = {"filters": out_channels, + "kernel_size": (height, width), + "strides": (strides[0], strides[1]), + "padding": 'valid', + "weights": weights, + "use_bias": has_bias, + "activation": None, + "dilation_rate": dilation, + "name": f"{params['cleaned_name']}_" + 'conv', + "groups": n_groups} + + partial_conv = partial(keras.layers.Conv2D, **conv_args) + layers[node_name] = permute_wrap_conv_if_constant(partial_conv, input_0, is_constant, weights[0].shape[-2]*n_groups, params) + else: + input_0_nhwc = tf_transpose(input_0, [0, 2, 3, 1], + tf_name=f"{params['cleaned_name']}_" + 'conv_transpose_nhwc') + + # Perform the convolution in NHWC format + conv_nhwc = tf.nn.conv2d(input_0_nhwc, weights[0], strides=(strides[0], strides[1]), + dilations=dilation, + padding='VALID', data_format='NHWC', + name=f"{params['cleaned_name']}_" + 'conv') + + # Permute the result back to NCHW format + layers[node_name] = tf_transpose(conv_nhwc, [0, 3, 1, 2], + tf_name=f"{params['cleaned_name']}_" + 'conv_transpose_2_nchw') + else: + # 1D conv + W = W.transpose(2, 1, 0) + width, channels, n_filters = W.shape + print(width, channels, n_filters, has_bias) + + weights = [W] + conv_args = {"filters": n_filters, + "kernel_size": (width), + "strides": (strides[0]), + "weights": weights, + "use_bias": False, + "activation": None, + "dilation_rate": dilation, + "name": f"{params['cleaned_name']}_" + 'conv', + "groups": n_groups} + + padding = None + if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): + padding = (pads[0], pads[1]) + + if padding: + # find the dimension to pad and use the exact padding values + input_shape = np.asarray(keras.backend.int_shape(input_0)) + partitioned_dim = np.argwhere(input_shape == channels * n_groups)[0][0] + padding_dim = 2 if partitioned_dim == 1 else 1 + tf_padding = np.zeros((2, len(input_shape))).astype(int) + tf_padding[:, padding_dim] = [padding[0], padding[1]] + input_0 = tf_pad(input_0, tf.constant(list(tf_padding.transpose())), + tf_name=f"{params['cleaned_name']}_conv_pad_0") + else: + conv_args['padding'] = 'valid' + partial_conv = partial(keras.layers.Conv1D, **conv_args) + res = permute_wrap_conv_if_constant(partial_conv, input_0, is_constant, weights[0].shape[-2]*n_groups, params) + if has_bias: + res_shape = np.asarray(keras.backend.int_shape(res)) + bias_dim = np.argwhere(res_shape == bias.shape)[0][0] + expanded_dims = [dim for dim in range(len(res_shape)) if dim != bias_dim] + res = res + np.expand_dims(bias, expanded_dims) + + layers[node_name] = res + + +def convert_convtranspose(node, params, layers, + lambda_func, node_name, keras_name): + """ + Convert transposed convolution layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.convtranpose') + + if len(node.input) == 3: + logger.debug('ConvTranspose with bias') + # Has bias + has_bias = True + W = layers[node.input[1]] + bias = layers[node.input[2]] + + elif len(node.input) == 2: + logger.debug('ConvTranspose without bias') + has_bias = False + W = layers[node.input[1]] + bias = None + + else: + raise NotImplementedError('Not implemented') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + is_W_constant = is_numpy(W) or isinstance(W, EagerTensor) + n_groups = params['group'] if 'group' in params else 1 + dilation = params['dilations'][0] if 'dilations' in params else 1 + pads = params['pads'] if 'pads' in params else [0, 0] + strides = params['strides'] if 'strides' in params else [1, 1] + + if len(W.shape) == 5: # 3D conv + W = W.transpose(2, 3, 4, 1, 0) + height, width, depth, n_filters, channels = W.shape + + if has_bias: + weights = [W, bias] + else: + weights = [W] + + if n_groups > 1: + raise AttributeError('Cannot convert ConvTranspose2d with groups != 1') + + if dilation > 1: + raise AttributeError('Cannot convert ConvTranspose2d with dilation_rate != 1') + + conv = keras.layers.Conv3DTranspose( + filters=n_filters, + kernel_size=(height, width, depth), + strides=strides, + padding='valid', + output_padding=0, + weights=weights, + use_bias=has_bias, + activation=None, + dilation_rate=dilation, + name=f"{params['cleaned_name']}_convtranspose" + ) + + if 'output_shape' in params and 'pads' not in params: + logger.debug('!!!!! Paddings will be calculated automatically !!!!!') + pads = [strides[0] * (int(input_0.shape[2]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][0], + strides[1] * (int(input_0.shape[3]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][1]] + + layers[node_name] = input_0 = conv(input_0) + + # Magic ad-hoc. + # See the Keras issue: https://github.com/keras-team/keras/issues/6777 + # input_0.set_shape(input_0.shape) + + if 'output_padding' in params and (params['output_padding'][0] > 0 or params['output_padding'][1] > 0): + raise AttributeError('Cannot convert ConvTranspose2d with output_padding != 0') + + if pads[0] > 0: + logger.debug('Add cropping layer for output padding') + assert (len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) + + crop = keras.layers.Cropping2D( + pads[:2], + name=f"{params['cleaned_name']}_convtranspose" + '_crop' + ) + layers[node_name] = crop(input_0) + + elif len(W.shape) == 4: # 2D conv + W = W.transpose(2, 3, 1, 0) if is_W_constant else tf.transpose(W, [2, 3, 1, 0]) + height, width, n_filters, channels = W.shape + + if has_bias: + weights = [W, bias] + else: + weights = [W] + + if n_groups > 1: + raise AttributeError('Cannot convert ConvTranspose2d with groups != 1') + + if dilation > 1: + raise AttributeError('Cannot convert ConvTranspose2d with dilation_rate != 1') + if is_W_constant: + conv = keras.layers.Conv2DTranspose( + filters=n_filters, + kernel_size=(height, width), + strides=strides, + padding='valid', + output_padding=0, + weights=weights, + use_bias=has_bias, + activation=None, + dilation_rate=dilation, + name=f"{params['cleaned_name']}_convtranspose" + ) + + if 'output_shape' in params and 'pads' not in params: + logger.debug('!!!!! Paddings will be calculated automatically !!!!!') + pads = [ + strides[0] * (int(input_0.shape[2]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][0], + strides[1] * (int(input_0.shape[3]) - 1) + 0 + (height - 1) * dilation - params['output_shape'][1]] + + layers[node_name] = input_0 = conv(input_0) + else: + input_0_nhwc = tf.transpose(input_0, [0, 2, 3, 1]) + + output_shape = infer_output_shape(input_shape=tf.shape(input_0_nhwc), filter_shape=tf.shape(W), + strides=strides, + padding='VALID') + conv_transpose_nhwc = tf.nn.conv2d_transpose(input_0_nhwc, weights[0], output_shape=output_shape, + strides=(strides[0], strides[1]), dilations=dilation, padding='VALID', + data_format='NHWC', + name=f"{params['cleaned_name']}_convtranspose_nhwc") + + # Permute the result back to NCHW format + layers[node_name] = tf_transpose(conv_transpose_nhwc, [0, 3, 1, 2], + tf_name=f"{params['cleaned_name']}_convtranspose") + + # Magic ad-hoc. + # See the Keras issue: https://github.com/keras-team/keras/issues/6777 + # input_0.set_shape(input_0.shape) + + if 'output_padding' in params and (params['output_padding'][0] > 0 or params['output_padding'][1] > 0): + raise AttributeError('Cannot convert ConvTranspose2d with output_padding != 0') + + if pads[0] > 0: + logger.debug('Add cropping layer for output padding') + assert (len(pads) == 2 or (pads[2] == pads[0] and pads[3] == pads[1])) + + crop = keras.layers.Cropping2D( + pads[:2], + name=f"{params['cleaned_name']}_convtranspose" + '_crop_1' + ) + layers[node_name] = crop(input_0) + else: + raise AttributeError('Layer is not supported for now') + + +def infer_output_shape(input_shape, filter_shape, strides, padding): + input_size_h, input_size_w = input_shape[1:3] + filter_size_h, filter_size_w = filter_shape[0:2] + + if padding == 'SAME': + pad_h = max((input_size_h - 1) * strides[0] + filter_size_h - input_size_h, 0) // 2 + pad_w = max((input_size_w - 1) * strides[1] + filter_size_w - input_size_w, 0) // 2 + elif padding == 'VALID': + pad_h = 0 + pad_w = 0 + else: + raise ValueError("Padding must be 'SAME' or 'VALID'") + + output_size_h = (input_size_h - 1) * strides[0] + filter_size_h - 2 * pad_h + output_size_w = (input_size_w - 1) * strides[1] + filter_size_w - 2 * pad_w + + return [input_shape[0], output_size_h, + output_size_w, filter_shape[2]] # [batch_size, output_channels, output_height, output_width] diff --git a/onnx2kerastl/customonnxlayer/__init__.py b/onnx2kerastl/customonnxlayer/__init__.py new file mode 100644 index 00000000..86d6e194 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/__init__.py @@ -0,0 +1,10 @@ +from onnx2kerastl.customonnxlayer.onnxeinsum import OnnxEinsumLayer +from onnx2kerastl.customonnxlayer.onnxlstm import OnnxLSTM + +onnx_custom_objects_map = { + "OnnxLSTM": OnnxLSTM, +} + +onnx_custom_layers = { + "OnnxEinsumLayer": OnnxEinsumLayer +} diff --git a/onnx2kerastl/customonnxlayer/onnxabs.py b/onnx2kerastl/customonnxlayer/onnxabs.py new file mode 100644 index 00000000..dae65ade --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxabs.py @@ -0,0 +1,11 @@ +from keras.layers import Layer +import tensorflow as tf + + +class OnnxAbs(Layer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, inputs, **kwargs): + x = tf.math.abs(inputs) + return x diff --git a/onnx2kerastl/customonnxlayer/onnxeinsum.py b/onnx2kerastl/customonnxlayer/onnxeinsum.py new file mode 100644 index 00000000..d8901977 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxeinsum.py @@ -0,0 +1,47 @@ +from typing import Any, Optional, List + +from keras.layers import Layer, TFOpLambda +import tensorflow as tf +import numpy as np + + +# this custom layer needed because of a tensorflow bug on einsum serielization +class OnnxEinsumLayer(Layer): + """ + + Args: + equation: str + constant_input: Optional[List[float]] + constant_place: Optional[int] + """ + + def __init__(self, equation: str, constant_input: Optional[List[float]], constant_place: Optional[int], **kwargs): + super().__init__(**kwargs) + self.equation = equation + if constant_input is not None: + if hasattr(constant_input, 'numpy'): + constant_input = constant_input.numpy() + if not isinstance(constant_input, np.ndarray): + constant_input = np.array(constant_input) + self.constant_input = constant_input + else: + self.constant_input = None + self.constant_place = constant_place + + def call(self, inputs, *args, **kwargs): + if self.constant_input is not None: + if self.constant_place == 1: + inputs = [inputs, self.constant_input] + else: + inputs = [self.constant_input, inputs] + + return tf.einsum(self.equation, *inputs) + + def get_config(self): + config = super().get_config() + config.update({ + "equation": self.equation, + "constant_input": self.constant_input, + "constant_place": self.constant_place + }) + return config diff --git a/onnx2kerastl/customonnxlayer/onnxerf.py b/onnx2kerastl/customonnxlayer/onnxerf.py new file mode 100644 index 00000000..932776f7 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxerf.py @@ -0,0 +1,11 @@ +from keras.layers import Layer +import tensorflow as tf + + +class OnnxErf(Layer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, inputs, **kwargs): + x = tf.math.erf(inputs) + return x diff --git a/onnx2kerastl/customonnxlayer/onnxhardsigmoid.py b/onnx2kerastl/customonnxlayer/onnxhardsigmoid.py new file mode 100644 index 00000000..67440cf8 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxhardsigmoid.py @@ -0,0 +1,23 @@ +from keras.layers import Layer +import tensorflow as tf + + +class OnnxHardSigmoid(Layer): + def __init__(self, alpha: float = 0.2, beta: float = 0.5, **kwargs): + super().__init__(**kwargs) + self.alpha = alpha + self.beta = beta + + def call(self, inputs, **kwargs): + x = tf.multiply(inputs, self.alpha) + x = tf.add(x, self.beta) + x = tf.clip_by_value(x, 0., 1.) + return x + + def get_config(self): + config = super().get_config() + config.update({ + "alpha": self.alpha, + "beta": self.beta, + }) + return config diff --git a/onnx2kerastl/customonnxlayer/onnxlstm.py b/onnx2kerastl/customonnxlayer/onnxlstm.py new file mode 100644 index 00000000..780564b0 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxlstm.py @@ -0,0 +1,46 @@ +from keras.layers import Layer +import tensorflow as tf + + +class OnnxLSTM(Layer): + """ + + Args: + units: int + return_sequences: bool + return_lstm_state: bool + **kwargs: + """ + + def __init__(self, units: int, return_sequences: bool, return_lstm_state: bool, **kwargs): + super().__init__(**kwargs) + self.lstm_layer = tf.keras.layers.LSTM(units, return_sequences=return_sequences, + return_state=return_lstm_state) + self.return_lstm_state = return_lstm_state + self.return_sequences = return_sequences + self.units = units + + def call(self, inputs, initial_h_state=None, initial_c_state=None, **kwargs): + if initial_h_state is not None and initial_c_state is not None: + initial_states = [initial_h_state, initial_c_state] + else: + initial_states = None + res = self.lstm_layer(inputs, initial_state=initial_states, **kwargs) + if self.return_lstm_state: + lstm_tensor, h_out, c_out = res + concat_output = tf.concat([tf.expand_dims(h_out, 1), lstm_tensor, tf.expand_dims(c_out, 1)], axis=1) + return concat_output + else: + return res + + def build(self, input_shape): + self.lstm_layer.build(input_shape) + + def get_config(self): + config = super().get_config() + config.update({ + "return_sequences": self.return_sequences, + "return_lstm_state": self.return_lstm_state, + "units": self.units + }) + return config diff --git a/onnx2kerastl/customonnxlayer/onnxreducemean.py b/onnx2kerastl/customonnxlayer/onnxreducemean.py new file mode 100644 index 00000000..e5059a63 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxreducemean.py @@ -0,0 +1,23 @@ +from typing import List + +import keras.backend as K +from keras.layers import Layer + + +class OnnxReduceMean(Layer): + def __init__(self, axes: List[int], keepdims: bool = True, **kwargs): + super().__init__(**kwargs) + self.axes = axes + self.keepdims = keepdims + + def call(self, inputs, **kwargs): + tensor = K.mean(inputs, keepdims=self.keepdims, axis=self.axes) + return tensor + + def get_config(self): + config = super().get_config() + config.update({ + "axes": self.axes, + "keepdims": self.keepdims, + }) + return config diff --git a/onnx2kerastl/customonnxlayer/onnxsqrt.py b/onnx2kerastl/customonnxlayer/onnxsqrt.py new file mode 100644 index 00000000..2c1b2b76 --- /dev/null +++ b/onnx2kerastl/customonnxlayer/onnxsqrt.py @@ -0,0 +1,11 @@ +import tensorflow as tf +from keras.layers import Layer + + +class OnnxSqrt(Layer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, inputs, **kwargs): + x = tf.math.sqrt(inputs) + return x diff --git a/onnx2kerastl/elementwise_layers.py b/onnx2kerastl/elementwise_layers.py new file mode 100644 index 00000000..efd6c239 --- /dev/null +++ b/onnx2kerastl/elementwise_layers.py @@ -0,0 +1,442 @@ +import numpy as np +import keras +import logging + +from .utils import is_numpy, ensure_tf_type +from .tfops_funcs import tf_tensor_scatter_nd_update, tf_maximum, tf_minimum, tf_cast, tf_expand_dims, tf_repeat,\ + tf_equal, tf_where, tf_round, tf_sign, tf_abs, tf_math_mod, tf_bitwise_left_shift, tf_bitwise_right_shift,\ + tf_logical_not, tf_add +import tensorflow as tf +from tensorflow.python.framework.ops import EagerTensor + + +def _is_integer_type(dtype) -> bool: + return dtype in (tf.int32, tf.int64, tf.int16, tf.int8, np.int32, np.int64, np.int16, np.int8) + + +def convert_elementwise_div(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert element-wise division + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.div') + + if len(node.input) != 2: + raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + try: + logger.debug('Divide numpy arrays.') + div = input_0 / input_1 + if _is_integer_type(input_0.dtype) and _is_integer_type(input_1.dtype): + div = tf_cast(div, input_0.dtype, tf_name=f"{params['cleaned_name']}_div_cast") + if hasattr(div, 'numpy'): + div = div.numpy() + layers[node_name] = div + + except (IndexError, ValueError): + logger.debug('Convert inputs to Keras/TF layers if needed.') + + def target_layer(x): + import tensorflow as tf + layer = tf.divide( + x[0], + x[1] + ) + return layer + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_div") + layers[node_name] = lambda_layer([input_0, input_1]) + lambda_func[keras_name] = target_layer + + +def convert_elementwise_add(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert element-wise add. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.add') + + if len(node.input) != 2: + raise AttributeError('Number of inputs is not equal to 2 for element-wise layer') + + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) + + try: + if not input_0_is_constant and not input_1_is_constant: + # Both inputs are variables + if len(input_0.shape) != len(input_1.shape): + # Use TensorFlow add to handle shape differences + layers[node_name] = tf_add(input_0, input_1, tf_name=f"{params['cleaned_name']}_add") + else: + # Use Keras Add layer + layers[node_name] = keras.layers.Add(name=f"{params['cleaned_name']}_add")([input_0, input_1]) + else: + raise ValueError('Operands are different.') + except (IndexError, ValueError): + logger.warning('Failed to use keras.layers.Add. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x + const_val, + name=params['cleaned_name'] + )(variable_input) + else: + # Embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x + constant_value, + name=params['cleaned_name'] + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x + const_val, + name=params['cleaned_name'] + )(variable_input) + else: + # Embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x + constant_value, + name=params['cleaned_name'] + )(variable_input) + else: + # Both inputs are constants; compute the result now + layers[node_name] = input_0 + input_1 + + + +def convert_elementwise_mul(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert element-wise mul. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.mul') + + if len(node.input) != 2: + raise AttributeError('Number of inputs is not equal to 2 for element-wise layer') + + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) + + try: + if not input_0_is_constant and not input_1_is_constant: + mul = keras.layers.Multiply(name=f"{params['cleaned_name']}_mul") + layers[node_name] = mul([input_0, input_1]) + else: + raise ValueError('Operands are different.') + + except (IndexError, ValueError): + logger.warning('Failed to use keras.layers.Multiply. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x * const_val, + name=params['cleaned_name'] + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_value, + name=params['cleaned_name'] + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x * const_val, + name=params['cleaned_name'] + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x * constant_value, + name=params['cleaned_name'] + )(variable_input) + else: + # Both inputs are constants; compute the result now + layers[node_name] = input_0 * input_1 + + +def convert_elementwise_sub(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert element-wise sub. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.sub') + + if len(node.input) != 2: + raise AttributeError('Number of inputs is not equal 2 for element-wise layer') + + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + input_0_is_constant = is_numpy(input_0) or isinstance(input_0, EagerTensor) + input_1_is_constant = is_numpy(input_1) or isinstance(input_1, EagerTensor) + + try: + if not input_0_is_constant and not input_1_is_constant: + sub = keras.layers.Subtract(name=f"{params['cleaned_name']}_sub") + layers[node_name] = sub([input_0, input_1]) + else: + raise ValueError('Operands are different.') + + except (IndexError, ValueError): + logger.warning('Failed to use keras.layers.Subtract. Fallback to Lambda layer.') + + if input_0_is_constant and not input_1_is_constant: + # input_0 is constant, input_1 is variable: constant - variable + constant_value = np.asarray(tf.cast(input_0, dtype=input_1.dtype)) + variable_input = input_1 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: const_val - x, + name=params['cleaned_name'] + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: constant_value - x, + name=params['cleaned_name'] + )(variable_input) + + elif not input_0_is_constant and input_1_is_constant: + # input_0 is variable, input_1 is constant: variable - constant + constant_value = np.asarray(tf.cast(input_1, dtype=input_0.dtype)) + variable_input = input_0 + + if np.all(constant_value == constant_value.flat[0]): + # Constant tensor has the same value throughout + const_val = constant_value.flat[0] + layers[node_name] = keras.layers.Lambda( + lambda x: x - const_val, + name=params['cleaned_name'] + )(variable_input) + else: + # Cannot avoid embedding the constant tensor + layers[node_name] = keras.layers.Lambda( + lambda x: x - constant_value, + name=params['cleaned_name'] + )(variable_input) + else: + # Both inputs are constants; compute the result now + layers[node_name] = input_0 - input_1 + + + +def convert_min(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Min layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) < 2: + raise AttributeError('Less than 2 inputs for min layer.') + + inputs = [ + ensure_tf_type(layers[inp], name="%s_const%i" % (keras_name, i + 1)) + for i, inp in enumerate(node.input) + ] + + # Broadcast the inputs to the same shape + input1, input2 = inputs + # Applying the minimum operation + min_output = tf_minimum(input1, input2, tf_name=f"{params['cleaned_name']}_min") + + layers[node_name] = min_output + + +def convert_max(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Max layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) < 2: + raise AttributeError('Less than 2 inputs for max layer.') + + inputs = [ + ensure_tf_type(layers[inp], name="%s_const%i" % (keras_name, i + 1)) + for i, inp in enumerate(node.input) + ] + + # Broadcast the inputs to the same shape + input1, input2 = inputs + # Applying the maximum operation + max_output = tf_maximum(input1, input2, tf_name=f"{params['cleaned_name']}_maximum") + + layers[node_name] = max_output + + +def convert_mean(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Mean layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + :TODO: Test if this supports multidirectional (i.e., Numpy-style) broadcasting as required + """ + if len(node.input) < 2: + assert AttributeError('Less than 2 inputs for mean layer.') + + inputs = list() + for i, inp in enumerate(node.input): + input_ = ensure_tf_type(layers[inp], layers[list(layers)[0]], name="%s_const%i" % (keras_name, i + 1)) + inputs.append(input_) + layers[node_name] = keras.layers.Average(name=f"{params['cleaned_name']}_mean")(inputs) + + +def convert_equal(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_equal(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_equal") + + +def convert_where(node, params, layers, lambda_func, node_name, keras_name): + if layers[node.input[0]].dtype != tf.bool: + casted = tf_cast(layers[node.input[0]], tf.bool, tf_name=f"{params['cleaned_name']}_cast") + else: + casted = layers[node.input[0]] + if layers[node.input[1]].dtype == np.int64 and is_numpy(layers[node.input[1]]): + # serialization doesn't work well for first argument if it is np array of type int64 + layers[node_name] = tf_where(tf_logical_not(casted, + tf_name=f"{params['cleaned_name']}_not" + ), + layers[node.input[2]], + layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_where_1") + else: + try: + layers[node_name] = tf_where(casted, layers[node.input[1]], layers[node.input[2]], + tf_name=f"{params['cleaned_name']}_where_2") + except Exception as e: + print(1) + + +def convert_scatter_nd(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ScatterND layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + :TODO: Test if this supports multidirectional (i.e., Numpy-style) broadcasting as required + """ + if len(node.input) < 3: + assert AttributeError('Less than 3 inputs') + + data = ensure_tf_type(layers[node.input[0]]) + indices = ensure_tf_type(layers[node.input[1]]) + updates = ensure_tf_type(layers[node.input[2]]) + layers[node_name] = tf_tensor_scatter_nd_update(data, indices, updates, + tf_name=f"{params['cleaned_name']}_scatter_nd") + + +def convert_round(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_round(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_round") + + +def convert_mod(node, params, layers, lambda_func, node_name, keras_name): + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + if params.get('fmod') == 1: + sign = tf_sign(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_mod_sign") + input_0 = tf_abs(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_abs_0") + input_1 = tf_abs(layers[node.input[1]], tf_name=f"{params['cleaned_name']}_abs_1") + layers[node_name] = tf_math_mod(input_0, input_1, tf_name=f"{params['cleaned_name']}_mod") * sign + else: + layers[node_name] = tf_math_mod(input_0, input_1, tf_name=f"{params['cleaned_name']}_mod") + + +def convert_bitshift(node, params, layers, lambda_func, node_name, keras_name): + direction = params.get("direction").decode() + if direction == "LEFT": + shifter_pointer = tf_bitwise_left_shift + elif direction == "RIGHT": + shifter_pointer = tf_bitwise_right_shift + else: + raise AttributeError("Onnx2Kerras cannot convert the BitShift operator" + " since the 'direction' attribute was missing") + layers[node_name] = shifter_pointer(tf_cast(layers[node.input[0]], tf.uint64, + tf_name=f"{params['cleaned_name']}_bitshift_cast_0"), + tf_cast(layers[node.input[1]], tf.uint64, + tf_name=f"{params['cleaned_name']}_bitshift_cast_1"), + tf_name=f"{params['cleaned_name']}_bitshift") diff --git a/onnx2kerastl/exceptions.py b/onnx2kerastl/exceptions.py new file mode 100644 index 00000000..e7061dcb --- /dev/null +++ b/onnx2kerastl/exceptions.py @@ -0,0 +1,7 @@ +class UnsupportedLayer(Exception): + def __init__(self, layer_description: str): + self.layer_description = layer_description + + +class OnnxUnsupported(Exception): + pass diff --git a/onnx2kerastl/fft_layers.py b/onnx2kerastl/fft_layers.py new file mode 100644 index 00000000..33b1c7f5 --- /dev/null +++ b/onnx2kerastl/fft_layers.py @@ -0,0 +1,37 @@ +import tensorflow as tf +from .tfops_funcs import tf_signal_irfft, tf_signal_ifft, tf_signal_rfft, tf_signal_fft + + +def convert_dft(node, params, layers, lambda_func, node_name, keras_name): + # Currently, there's no way to run this - pytorch export not supported + Onnx Runtime version is too advanced. + raise AttributeError("DFT could not yet be converted - contact Tensorleap for support") + axis = params.get('axis', 1) + inverse = bool(params.get('inverse', 0)) + onesided = bool(params.get('onesided', 0)) + input_0 = layers[node.input[0]] + if len(node.input[0]) == 2: + fft_length = layers[node.input[0]] + else: + fft_length = None + rank_tensor = len(input_0.shape) + rotated_axis = False + if axis != rank_tensor-1 and axis != -1: # tf.fft works only on last dimension - permuting + if axis == 0: + raise AttributeError("FFT on the batch dimension isn't convertable") + output = tf.keras.layers.Permute((axis, rank_tensor-1), name=f"{params['cleaned_name']}_dft_permute_1")(input_0) + rotated_axis = True + else: + output = input_0 + if inverse and onesided: + output = tf_signal_irfft(input_0, fft_length=fft_length, tf_name=f"{params['cleaned_name']}_dft_irfft") + if inverse and not onesided: + output = tf_signal_ifft(input_0, fft_length=fft_length, tf_name=f"{params['cleaned_name']}_dft_ifft") + if onesided: + output = tf_signal_rfft(input_0, fft_length=fft_length, tf_name=f"{params['cleaned_name']}_dft_rfft") + if not onesided: + output = tf_signal_fft(input_0, fft_length=fft_length, tf_name=f"{params['cleaned_name']}_dft_fft") + if rotated_axis: + layers[node_name] = tf.keras.layers.Permute((rank_tensor-1, axis), + name=f"{params['cleaned_name']}_dft_permute_2")(output) + else: + layers[node_name] = output diff --git a/onnx2kerastl/layers.py b/onnx2kerastl/layers.py new file mode 100644 index 00000000..6c1378ed --- /dev/null +++ b/onnx2kerastl/layers.py @@ -0,0 +1,159 @@ +from .convolution_layers import convert_conv, convert_convtranspose +from .activation_layers import convert_relu, convert_elu, convert_lrelu, convert_selu, \ + convert_sigmoid, convert_tanh, convert_softmax, convert_prelu, convert_hard_sigmoid, convert_erf, convert_soft_plus, \ + convert_soft_sign, convert_mish, convert_gelu, convert_hard_swish +from .ltsm_layers import convert_lstm, convert_gru +from .operation_layers import convert_clip, convert_exp, convert_neg, convert_reduce_sum, convert_reduce_mean, \ + convert_log, convert_pow, convert_sqrt, convert_split, convert_cast, convert_floor, convert_identity, \ + convert_argmax, convert_reduce_l2, convert_reduce_max, convert_reciprocal, convert_abs, convert_not, convert_cosine, \ + convert_less, convert_less_equal, convert_and, convert_greater, convert_greater_equal, convert_xor, convert_or, \ + convert_trilu, convert_sign, convert_cosh, convert_sin, convert_sinh, convert_ceil, convert_acosh, convert_acos, \ + convert_asinh, convert_asin, convert_atanh, convert_atan, convert_bitwise_and, convert_argmin, convert_bitwise_xor, \ + convert_bitwise_or, convert_tan, convert_cumsum, convert_bitwise_not, convert_reduce_prod, convert_reduce_min, \ + convert_is_inf, convert_is_nan, convert_size, convert_non_zero, convert_gather_nd, convert_nms, convert_if, \ + convert_einsum +from .elementwise_layers import convert_elementwise_div, convert_elementwise_add, convert_elementwise_mul, \ + convert_elementwise_sub, convert_max, convert_min, convert_mean, convert_equal, convert_where, convert_scatter_nd, \ + convert_round, convert_mod, convert_bitshift +from .linear_layers import convert_gemm, convert_det +from .reshape_layers import convert_transpose, convert_shape, convert_gather, convert_unsqueeze, \ + convert_concat, convert_reshape, convert_flatten, convert_slice, convert_squeeze, convert_expand, convert_resize, \ + convert_tile, convert_gather_elements, col2im_onnx +from .constant_layers import convert_constant, convert_constant_of_shape, convert_one_hot +from .normalization_layers import convert_batchnorm, convert_instancenorm, convert_dropout, convert_lrn, convert_layernorm +from .pooling_layers import convert_avgpool, convert_global_max_pool, convert_maxpool, convert_global_avg_pool, convert_topk, convert_roi_align +from .padding_layers import convert_padding +from .upsampling_layers import convert_upsample +from .caffe2_layers import convert_alias_with_name, convert_resize_nearest +from .sampling_layers import convert_gridsample, convert_range, convert_unique +from .fft_layers import convert_dft + +AVAILABLE_CONVERTERS = { + 'Abs': convert_abs, + 'AliasWithName': convert_alias_with_name, + 'Conv': convert_conv, + 'ConvTranspose': convert_convtranspose, + 'Relu': convert_relu, + 'Resize': convert_resize, + 'Elu': convert_elu, + 'LeakyRelu': convert_lrelu, + 'Sigmoid': convert_sigmoid, + 'HardSigmoid': convert_hard_sigmoid, + 'Tanh': convert_tanh, + 'Selu': convert_selu, + 'Clip': convert_clip, + 'Exp': convert_exp, + 'Neg': convert_neg, + 'Log': convert_log, + 'Softmax': convert_softmax, + "ScatterND": convert_scatter_nd, + 'PRelu': convert_prelu, + 'ReduceMax': convert_reduce_max, + 'ReduceSum': convert_reduce_sum, + 'ReduceMean': convert_reduce_mean, + 'ReduceProd': convert_reduce_prod, + 'ReduceMin': convert_reduce_min, + 'Pow': convert_pow, + 'Slice': convert_slice, + 'Squeeze': convert_squeeze, + 'Expand': convert_expand, + 'Sqrt': convert_sqrt, + 'Split': convert_split, + 'Cast': convert_cast, + 'Floor': convert_floor, + 'Identity': convert_identity, + 'ArgMax': convert_argmax, + 'ReduceL2': convert_reduce_l2, + 'Max': convert_max, + 'Min': convert_min, + 'Mean': convert_mean, + 'Div': convert_elementwise_div, + 'Add': convert_elementwise_add, + 'Sum': convert_elementwise_add, + 'Mul': convert_elementwise_mul, + 'Sub': convert_elementwise_sub, + 'Gemm': convert_gemm, + 'MatMul': convert_gemm, + 'Transpose': convert_transpose, + 'Constant': convert_constant, + 'BatchNormalization': convert_batchnorm, + 'InstanceNormalization': convert_instancenorm, + 'Dropout': convert_dropout, + 'LRN': convert_lrn, + 'MaxPool': convert_maxpool, + 'GlobalMaxPool': convert_global_max_pool, + 'AveragePool': convert_avgpool, + 'GlobalAveragePool': convert_global_avg_pool, + 'Shape': convert_shape, + 'Gather': convert_gather, + 'Unsqueeze': convert_unsqueeze, + 'Concat': convert_concat, + 'Reshape': convert_reshape, + 'ResizeNearest': convert_resize_nearest, + 'Pad': convert_padding, + 'Flatten': convert_flatten, + 'Upsample': convert_upsample, + 'Erf': convert_erf, + 'Reciprocal': convert_reciprocal, + 'ConstantOfShape': convert_constant_of_shape, + 'Equal': convert_equal, + 'Where': convert_where, + 'LSTM': convert_lstm, + 'GRU': convert_gru, + 'Tile': convert_tile, + 'GridSample': convert_gridsample, + 'Range': convert_range, + 'Not': convert_not, + 'Less': convert_less, + 'Sign': convert_sign, + 'Cosh': convert_cosh, + 'Sin': convert_sin, + 'Sinh': convert_sinh, + 'LessOrEqual': convert_less_equal, + "And": convert_and, + "Greater": convert_greater, + "GreaterOrEqual": convert_greater_equal, + "Xor": convert_xor, + "Or": convert_or, + 'Cos': convert_cosine, + "Trilu": convert_trilu, + "Ceil": convert_ceil, + "Acosh": convert_acosh, + "Acos": convert_acos, + "Asinh": convert_asinh, + "Asin": convert_asin, + "Atanh": convert_atanh, + "Atan": convert_atan, + "BitwiseAnd": convert_bitwise_and, + "BitwiseOr": convert_bitwise_or, + "BitwiseXor": convert_bitwise_xor, + "BitwiseNot": convert_bitwise_not, + "ArgMin": convert_argmin, + "OneHot": convert_one_hot, + "Round": convert_round, + "Tan": convert_tan, + "CumSum": convert_cumsum, + "IsInf": convert_is_inf, + "IsNaN": convert_is_nan, + "Size": convert_size, + "Det": convert_det, + "NonZero": convert_non_zero, + "GatherND": convert_gather_nd, + "Softplus": convert_soft_plus, + "Softsign": convert_soft_sign, + "Mish": convert_mish, + "Gelu": convert_gelu, + "HardSwish": convert_hard_swish, + "DFT": convert_dft, + "Mod": convert_mod, + "BitShift": convert_bitshift, + "TopK": convert_topk, + 'GatherElements': convert_gather_elements, + 'NonMaxSuppression': convert_nms, + 'Unique': convert_unique, + 'If': convert_if, + 'RoiAlign': convert_roi_align, + 'Einsum': convert_einsum, + 'LayerNormalization': convert_layernorm, + 'Col2Im': col2im_onnx, +} diff --git a/onnx2kerastl/linear_layers.py b/onnx2kerastl/linear_layers.py new file mode 100644 index 00000000..cf3c2fd2 --- /dev/null +++ b/onnx2kerastl/linear_layers.py @@ -0,0 +1,65 @@ +import keras +import logging +from .utils import is_numpy +from .tfops_funcs import tf_matmul, tf_shape, tf_concat, tf_reshape, tf_linalg_det, tf_linalg_matmul +import tensorflow as tf + + +def convert_gemm(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Linear / GEMM layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.gemm') + + # Check if Bias available + if len(node.input) == 3: + has_bias = True + keras_weights = [layers[node.input[1]], layers[node.input[2]]] + logger.debug('Convert GEMM with bias.') + elif len(node.input) == 2: + has_bias = False + keras_weights = [layers[node.input[1]]] + logger.debug('Convert GEMM without bias.') + else: + raise AttributeError('More than 3 or less than 2 inputs') + + # Linear can have additional flag to transpose weights + if 'transB' in params and params['transB'] == 1: + logger.debug('Transposing W matrix.') + keras_weights[0] = keras_weights[0].transpose() + # Estimate input/output neurons + input_channels, output_channels = keras_weights[0].shape[-2:] + logger.debug('Input units %s, output units %s.', input_channels, output_channels) + if len(layers[node.input[1]].shape) > 2: #N-dim tensor multipication Dense doesn't work + assert len(node.input) == 2 + layers[node_name] = tf_matmul(layers[node.input[0]], layers[node.input[1]], tf_name=f"{params['cleaned_name']}_matmul") + else: + if is_numpy(keras_weights[0]): + dense = keras.layers.Dense( + output_channels, + weights=keras_weights, name=f"{params['cleaned_name']}_gemm_dense", use_bias=has_bias + ) + + # The first input - always X + try: + layers[node_name] = dense(layers[node.input[0]]) + except ValueError: + mid_shape = tf_shape(layers[node.input[0]], out_type=tf.int32, tf_name=f"{params['cleaned_name']}_shape")[:-1] + reshape_shape = tf_concat([mid_shape, [input_channels]], axis=0, tf_name=f"{params['cleaned_name']}_concat") + reshaped_x = tf_reshape(layers[node.input[0]], reshape_shape, tf_name=f"{params['cleaned_name']}_reshape") + layers[node_name] = dense(reshaped_x) + + else: + #MatMul branch should point here. If there is a bug here - split GEMM from matmul + layers[node_name] = tf_linalg_matmul(layers[node.input[0]], layers[node.input[1]], tf_name=f"{params['cleaned_name']}_multiply") + + +def convert_det(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_linalg_det(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_det") diff --git a/onnx2kerastl/ltsm_layers.py b/onnx2kerastl/ltsm_layers.py new file mode 100644 index 00000000..cc63f7c9 --- /dev/null +++ b/onnx2kerastl/ltsm_layers.py @@ -0,0 +1,169 @@ +import logging + +import numpy as np +import tensorflow as tf + +from onnx2kerastl.customonnxlayer.onnxlstm import OnnxLSTM +from .exceptions import UnsupportedLayer +from .utils import ensure_tf_type +from .tfops_funcs import tf_cast, tf_squeeze, tf_transpose, tf_expand_dims + + +def convert_lstm(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert convolution layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.conv') + + if node.input[4] != '': + raise UnsupportedLayer('LSTM with non default sequence_lens') + if 'direction' in params: + direction = params['direction'] + if isinstance(direction, bytes): + direction = direction.decode("utf-8") + if direction != 'forward': + raise UnsupportedLayer(f"LSTM with {direction} direction") + should_return_state = len(node.output) == 3 + input_tensor = tf_transpose(ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name[0]), + perm=[1, 0, 2], + tf_name=f"{params['cleaned_name']}_lstm_first_transpose") + weights_w = layers[node.input[1]][0] + weights_r = layers[node.input[2]][0] + weights_b = layers[node.input[3]][0] + + initial_h_state = tf_cast(tf_squeeze(ensure_tf_type(layers[node.input[5]]), + axis=0, + tf_name=f"{params['cleaned_name']}_lstm_squeeze_h" + ), + input_tensor.dtype, + tf_name=f"{params['cleaned_name']}_lstm_cast_h") + initial_c_state = tf_cast( + tf_squeeze( + ensure_tf_type(layers[node.input[6]]), + axis=0, + tf_name=f"{params['cleaned_name']}_lstm_squeeze_c"), input_tensor.dtype, + tf_name=f"{params['cleaned_name']}_lstm_cast_c") + + tf.keras.backend.set_image_data_format("channels_last") + hidden_size = params['hidden_size'] + lstm_layer = OnnxLSTM(hidden_size, return_sequences=True, return_lstm_state=should_return_state) + res = lstm_layer(input_tensor, initial_h_state, initial_c_state) + # prepare the keras lstm weights from the onnx inputs: + w1 = np.concatenate([weights_w[0:hidden_size, :], weights_w[2 * hidden_size:3 * hidden_size, :], + weights_w[3 * hidden_size:4 * hidden_size, :], + weights_w[hidden_size:2 * hidden_size, :]]).transpose() + w2 = np.concatenate([weights_r[0:hidden_size, :], weights_r[2 * hidden_size:3 * hidden_size, :], + weights_r[3 * hidden_size:4 * hidden_size, :], + weights_r[hidden_size:2 * hidden_size, :]]).transpose() + weights_b_part1 = weights_b[:w2.shape[1]] + weights_b_part2 = weights_b[w2.shape[1]:] + bias1 = np.concatenate([weights_b_part1[0:hidden_size], weights_b_part1[2 * hidden_size:3 * hidden_size], + weights_b_part1[3 * hidden_size:4 * hidden_size], + weights_b_part1[hidden_size:2 * hidden_size]]).transpose() + bias2 = np.concatenate([weights_b_part2[0:hidden_size], weights_b_part2[2 * hidden_size:3 * hidden_size], + weights_b_part2[3 * hidden_size:4 * hidden_size], + weights_b_part2[hidden_size:2 * hidden_size]]).transpose() + bias = bias1 + bias2 + res.node.layer.set_weights([w1, w2, bias]) + tf.keras.backend.set_image_data_format("channels_first") + if should_return_state: + c_out = res[:, -1, :] + h_out = res[:, 0, :] + + # the shapes of the hidden and cell should be [num_directions, batch_size, hidden_size] + # for now we support only direction=forward so num_direction = 1 and we add directions dimension, + # if we support direction=bidirectional we should handle it well in the lstm layer and probably remove the + # expand dims here + c_out = tf.expand_dims(c_out, 0) + h_out = tf.expand_dims(h_out, 0) + + lstm_tensor = res[:, 1:-1, :] + else: + lstm_tensor = res + + # Add identical dense contains the lstm tensor for easy fetch of latent space + input_dim = int(lstm_tensor.shape[2]) + dense = tf.keras.layers.Dense( + units=input_dim, + use_bias=False, + kernel_initializer=tf.keras.initializers.Identity() + ) + + lstm_tensor_dense = dense(lstm_tensor) + + if should_return_state: + mul_o = lstm_tensor_dense[0, 0, 0] * 0 + c_out = tf.add(c_out, mul_o) + h_out = tf.add(h_out, mul_o) + + layers[node.output[1]] = h_out + layers[node.output[2]] = c_out + + lstm_tensor = lstm_tensor_dense + + lstm_tensor_in_onnx_order = tf_transpose(lstm_tensor, perm=[1, 0, 2], + tf_name=f"{params['cleaned_name']}_lstm_transpose") + lstm_tensor_in_onnx_order = tf_expand_dims(lstm_tensor_in_onnx_order, axis=1, + tf_name=f"{params['cleaned_name']}_lstm_expand_dims") + layers[node_name] = lstm_tensor_in_onnx_order + + +def convert_gru(node, params, layers, lambda_func, node_name, keras_name): + logger = logging.getLogger('onnx2keras.convert_gru') + if len(params["_outputs"]) > 1: + logger.warning( + "The GRU return hidden state is currently not supported. Accessing in deeper layers will raise Exception") + if params.get('activation_alpha') or params.get('activation_beta') or params.get('activations'): + raise NotImplementedError('Custom Activations in GRU not implemented') + if params.get('clip'): + raise NotImplementedError('Clip in GRU not implemented') + if params.get( + 'direction'): # After implementation - verify weights reshaping, and h default_size for all directions + raise NotImplementedError('direction in GRU not implemented') + else: + num_directions = 1 + if params.get('layout'): + raise NotImplementedError('GRU layout not supported (currently supporting opset 7)') + else: + layout = 0 + if node.input[4] != "": + raise NotImplementedError('GRU sequence_lens is not yet implemented') + hidden_size = params.get('hidden_size') + linear_before_reset = bool(params.get('linear_before_reset', 0)) + x = layers[node.input[0]] # [seq_length, batch_size, input_size] iff layout = 0 + w = layers[node.input[1]] + r = layers[node.input[2]] + b = layers.get(node.input[3], np.zeros((num_directions, 6 * hidden_size), dtype=np.float32)) + h = layers.get(node.input[5], np.zeros((1, x.shape[1] if x.shape[1] is not None else 1, hidden_size), dtype=np.float32)) + if isinstance(h, np.ndarray): + tensor_h = tf.convert_to_tensor(h) + else: + tensor_h = h + tf.keras.backend.set_image_data_format("channels_last") + gru_layer = tf.keras.layers.GRU(units=hidden_size, + reset_after=linear_before_reset, + return_sequences=True, + name=f"{params['cleaned_name']}_gru") + if layout == 0: + batch_first_x = tf_transpose(x, [1, 0, 2], tf_name=f"{params['cleaned_name']}_gru_transpose") + res = gru_layer(batch_first_x, initial_state=tf.convert_to_tensor(tensor_h[0])) + # gru_layer.build(tf.shape(batch_first_x)) + gru_layer.set_weights([w[0].swapaxes(0, 1), r[0].swapaxes(0, 1), b[0].reshape(-1, 3 * hidden_size)]) + # res = gru_layer(batch_first_x, initial_state=tf.convert_to_tensor(tensor_h[0])) + if num_directions == 1: + reshaped_res = tf_expand_dims(tf_transpose(res, + [1, 0, 2], + tf_name=f"{params['cleaned_name']}_gru_transpose"), + axis=1, + tf_name=f"{params['cleaned_name']}") + else: + raise NotImplementedError("GRU bidirectional output reshaping is not implemented") + layers[node_name] = reshaped_res + tf.keras.backend.set_image_data_format("channels_first") diff --git a/onnx2kerastl/main.py b/onnx2kerastl/main.py new file mode 100644 index 00000000..2864c599 --- /dev/null +++ b/onnx2kerastl/main.py @@ -0,0 +1,16 @@ +import onnx +from onnx2kerastl import onnx_to_keras + + +def main(): + onnx_file_path = "onnx_models/model.onnx" + + # Load ONNX model + onnx_model = onnx.load(onnx_file_path) + + # Call the converter (input - is the main model input name, can be different for your model) + k_model = onnx_to_keras(onnx_model, ['input_1']) + + +if __name__ == '__main__': + main() diff --git a/onnx2keras/normalization_layers.py b/onnx2kerastl/normalization_layers.py similarity index 58% rename from onnx2keras/normalization_layers.py rename to onnx2kerastl/normalization_layers.py index 76b9071e..f3f7b29e 100644 --- a/onnx2keras/normalization_layers.py +++ b/onnx2kerastl/normalization_layers.py @@ -1,8 +1,11 @@ -from tensorflow import keras -import tensorflow as tf -import tensorflow_addons as tfa import logging -from .utils import ensure_tf_type, ensure_numpy_type + +import keras +import numpy as np +import tensorflow as tf + +from .utils import ensure_tf_type +from .tfops_funcs import tf_math_reduce_mean, tf_math_reduce_variance, tf_sqrt, tf_rank, tf_concat, tf_reshape def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name): @@ -17,20 +20,19 @@ def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name): :return: None """ logger = logging.getLogger('onnx2keras.batchnorm2d') - input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) if len(node.input) == 5: weights = [ - ensure_numpy_type(layers[node.input[1]]), - ensure_numpy_type(layers[node.input[2]]), - ensure_numpy_type(layers[node.input[3]]), - ensure_numpy_type(layers[node.input[4]]) + layers[node.input[1]], + layers[node.input[2]], + layers[node.input[3]], + layers[node.input[4]] ] elif len(node.input) == 3: weights = [ - ensure_numpy_type(layers[node.input[1]]), - ensure_numpy_type(layers[node.input[2]]) + layers[node.input[1]], + layers[node.input[2]] ] else: raise AttributeError('Unknown arguments for batch norm') @@ -38,19 +40,22 @@ def convert_batchnorm(node, params, layers, lambda_func, node_name, keras_name): eps = params['epsilon'] if 'epsilon' in params else 1e-05 # default epsilon momentum = params['momentum'] if 'momentum' in params else 0.9 # default momentum + if isinstance(keras_name, list): + keras_name = keras_name[0] + if len(weights) == 2: logger.debug('Batch normalization without running averages') bn = keras.layers.BatchNormalization( axis=1, momentum=momentum, epsilon=eps, center=False, scale=False, weights=weights, - name=keras_name + name=f"{params['cleaned_name']}_bn" ) else: bn = keras.layers.BatchNormalization( axis=1, momentum=momentum, epsilon=eps, weights=weights, - name=keras_name + name=f"{params['cleaned_name']}_bn" ) layers[node_name] = bn(input_0) @@ -67,26 +72,27 @@ def convert_instancenorm(node, params, layers, lambda_func, node_name, keras_nam :param keras_name: resulting layer name :return: None """ + # based on https://github.com/onnx/onnx/blob/main/docs/Operators.md#InstanceNormalization logger = logging.getLogger('onnx2keras.instancenorm2d') input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) if len(node.input) == 3: - gamma = ensure_numpy_type(layers[node.input[1]]) - beta = ensure_numpy_type(layers[node.input[2]]) + scale = layers[node.input[1]] + bias = layers[node.input[2]] else: raise AttributeError('Unknown arguments for instance norm') epsilon = params['epsilon'] - - instance_norm = tfa.layers.InstanceNormalization( - axis=1, - epsilon=epsilon, - beta_initializer=tf.constant_initializer(beta), - gamma_initializer=tf.constant_initializer(gamma), - trainable=False - ) - layers[node_name] = instance_norm(input_0) + dims_x = len(input_0.shape) + axis = list(range(2, dims_x)) + var = tf_math_reduce_variance(input_0, axis=axis, keepdims=True, name=None, tf_name=f"{params['cleaned_name']}_var") + mean = tf_math_reduce_mean(input_0, axis=axis, keepdims=True, name=None, tf_name=f"{params['cleaned_name']}_mean") + dim_ones = (1,) * (dims_x - 2) + scale = np.reshape(scale, (-1, *dim_ones)) + bias = np.reshape(bias, (-1, *dim_ones)) + layers[node_name] = (input_0 - mean) * scale / tf_sqrt(var + epsilon, tf_name=f"{params['cleaned_name']}_sqrt")\ + + bias def convert_dropout(node, params, layers, lambda_func, node_name, keras_name): @@ -109,7 +115,7 @@ def convert_dropout(node, params, layers, lambda_func, node_name, keras_name): input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) ratio = params['ratio'] if 'ratio' in params else 0.0 - lambda_layer = keras.layers.Dropout(ratio, name=keras_name) + lambda_layer = keras.layers.Dropout(ratio, name=f"{params['cleaned_name']}_dropout") layers[node_name] = lambda_layer(input_0) @@ -146,6 +152,42 @@ def target_layer(x, depth_radius=params['size'], bias=params['bias'], alpha=para return layer - lambda_layer = keras.layers.Lambda(target_layer, name=keras_name) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_lrn") layers[node_name] = lambda_layer(input_0) lambda_func[keras_name] = target_layer + + +def convert_layernorm(node, params, layers, lambda_func, node_name, keras_name): + axis = params.get('axis', -1) + epsilon = params.get('epsilon', 1e-05) + stash_type = params.get('stash_type') + if stash_type is not None: + raise Exception("LayerNorm stash_type attribute is not implemented") + input_x = layers[node.input[0]] + weight = layers[node.input[1]] + if len(node.input) > 2: + bias = layers[node.input[2]] + else: + bias = None + center = True if bias is not None else False + layer_norm = tf.keras.layers.LayerNormalization( + axis=axis, + epsilon=epsilon, + center=center, + name=f"{params['cleaned_name']}_LayerNorm" + ) + input_shape = input_x.shape.as_list() + if input_shape[axis] is None: + # reshape input such that the axis dim would be non-None (set by weights) + tf_input_shape = tf.shape(input_x) + if axis < 0: + axis = tf_rank(input_x, tf_name=f"{params['cleaned_name']}_LayerNorm_rank")._inferred_value[0] + axis + tf_new_shape = tf_concat([tf_input_shape[:axis], [weight.shape[0]], tf_input_shape[axis+1:]], axis=-1, + tf_name=f"{params['cleaned_name']}_LayerNorm_new_shape") + input_x = tf_reshape(input_x, tf_new_shape, tf_name=f"{params['cleaned_name']}_LayerNorm_reshape_none_axis") + layer_norm.build(input_x.shape) + if center: + layer_norm.set_weights([weight, bias]) + else: + layer_norm.set_weights([weight]) + layers[node_name] = layer_norm(input_x) diff --git a/onnx2kerastl/operation_layers.py b/onnx2kerastl/operation_layers.py new file mode 100644 index 00000000..dd4455f8 --- /dev/null +++ b/onnx2kerastl/operation_layers.py @@ -0,0 +1,931 @@ +import logging + +import keras +import numpy as np +from keras import backend as K +import tensorflow as tf + +from .customonnxlayer.onnxeinsum import OnnxEinsumLayer +from .exceptions import UnsupportedLayer +from .utils import is_numpy, ensure_tf_type, ensure_float +from .tfops_funcs import tf_math_abs, tf_clip_by_value, tf_math_negative, K_mean, tf_math_reduce_prod, \ + tf_math_reduce_min, tf_math_pow, tf_math_sqrt, tf_cast, tf_argmax, tf_expand_dims, tf_math_reciprocal, \ + tf_logical_not, tf_math_sign, tf_math_sin, tf_math_cosh, tf_math_ceil, tf_math_acosh, tf_math_acos, \ + tf_math_asinh, tf_math_asin, tf_math_atanh, tf_math_tan, tf_math_atan, tf_math_sinh, tf_math_less_equal, \ + tf_bitwise_invert, tf_bitwise_bitwise_and, tf_bitwise_bitwise_or, tf_bitwise_bitwise_xor, tf_cos, \ + tf_math_greater, tf_math_greater, tf_math_greater_equal, tf_logical_and, tf_math_logical_xor, tf_math_logical_or, \ + tf_argmin, tf_math_is_inf, tf_math_is_nan, tf_size, tf_not_equal, tf_where, tf_transpose, tf_gather_nd, \ + tf_multiply, tf_image_non_max_suppression, tf_ones_like, tf_stack, tf_concat + +# Handle python 2.7 import error +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable + + +def convert_clip(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert clip layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.clip') + if len(node.input) != 1: + assert AttributeError('More than 1 input for clip layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + clip_min = params.get('min') + clip_max = params.get('max') + if clip_min is None or clip_max is None: + if len(node.input) == 1: + raise UnsupportedLayer('Clip without max or min params') + if len(node.input) > 1 and node.input[1] != '': + clip_min = float(layers[node.input[1]]) + if len(node.input) == 3 and node.input[2] != '': + clip_max = float(layers[node.input[2]]) + + if clip_min is None and clip_max is None: + raise UnsupportedLayer('Clip without max or min params') + + if clip_min is None: + clip_min = tf.float32.min + + if clip_max is None: + clip_max = tf.float32.max + + if input_0.dtype == tf.int32: + clip_min = int(clip_min) + clip_max = int(clip_max) + + layers[node_name] = tf_clip_by_value(input_0, clip_min, clip_max, tf_name=f"{params['cleaned_name']}_clip") + + +def convert_log(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Log layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for log layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + def target_layer(x): + import keras.backend as K + return K.log(x) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_log") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + + +def convert_neg(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Neg layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for log layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + layers[node_name] = tf_math_negative(input_0, tf_name=f"{params['cleaned_name']}_neg") + + +def convert_exp(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Exp layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for log layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + def target_layer(x): + import keras.backend as K + return K.exp(x) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_exp") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + + +def convert_reduce_sum(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reduce sum. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for reduce sum layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + if 'axes' not in params: + axis = layers[node.input[1]] + else: + axis = params['axes'] + + keep_dims = True + if 'keepdims' in params: + if params['keepdims'] == 0: + keep_dims = False + + def target_layer(x, axis=axis, keep_dims=keep_dims): + import keras.backend as K + return K.sum(x, keepdims=keep_dims, axis=axis) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_sum") + layers[node_name] = lambda_layer(input_0) + layers[node_name].set_shape(layers[node_name].shape) + lambda_func[keras_name] = target_layer + + +def convert_reduce_mean(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reduce mean. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for reduce mean layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + param_keepdims = params.get('keepdims', 1) + keepdims = param_keepdims == 1 + axes = params['axes'] + layers[node_name] = K_mean(input_0, keepdims=keepdims, axis=axes, tf_name=f"{params['cleaned_name']}_mean") + + +def convert_reduce_max(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reduce max. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for reduce max layer.') + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + def target_layer(x, axis=params.get('axes'), keepdims=params['keepdims']): + import keras.backend as K + return K.max(x, keepdims=(keepdims == 1), axis=axis) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_max") + layers[node_name] = lambda_layer(input_0) + layers[node_name].set_shape(layers[node_name].shape) + lambda_func[keras_name] = target_layer + + +def convert_reduce_min(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reduce max. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if params.get("axes") is not None: # opset 13 + axes = params.get("axes") + elif len(node.input) == 2: + axes = layers.get(node.input[1]) + else: + axes = None + noop_with_empty_axes = bool(params.get("noop_with_empty_axes", False)) + keepdims = params.get("keepdims", True) + if noop_with_empty_axes and params.get("axes") is None: + layers[node_name] = layers[node.input[0]] + else: + if axes is None: + layers[node_name] = tf_math_reduce_min(layers[node.input[0]], keepdims=keepdims, + tf_name=f"{params['cleaned_name']}_min") + else: + layers[node_name] = tf_math_reduce_min(layers[node.input[0]], axis=axes, keepdims=keepdims, + tf_name=f"{params['cleaned_name']}_min") + +def convert_reduce_prod(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reduce max. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if params.get("axes") is not None: # opset 13 + axes = params.get("axes") + elif len(node.input) == 2: + axes = layers.get(node.input[1]) + else: + axes = None # default is to reduce over all dimensions + noop_with_empty_axes = bool(params.get("noop_with_empty_axes", False)) + keepdims = bool(params.get("keepdims", True)) + if noop_with_empty_axes and params.get("axes") is None: + layers[node_name] = layers[node.input[0]] + else: + layers[node_name] = tf_math_reduce_prod(layers[node.input[0]], + axis=axes, + keepdims=keepdims, + tf_name=f"{params['cleaned_name']}_reduce") + + +def convert_pow(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Pow layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 2: + assert AttributeError('More than 2 inputs for pow layer.') + layers[node_name] = tf_math_pow(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_pow") + + +def convert_sqrt(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Sqrt layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for sqrt layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf_math_sqrt(input_0, tf_name=f"{params['cleaned_name']}_sqrt") + + +def convert_split(node, params, layers, lambda_func, node_name, keras_names): + """ + Convert Split layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for split layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_names[0]) + axis = params.get("axis", 0) + try: # onnx opset12 + splits = params["split"] + except KeyError as e: # onnx opset 14 + if len(node.input) > 1: + splits = layers[node.input[1]] + else: + if layers[node.input[0]].shape[axis] % 2 != 0: + raise AttributeError("No splits supplied to the split block but there are uneven number of channels") + else: + splits = [layers[node.input[0]].shape[axis] // 2] * 2 + if not isinstance(splits, Iterable): + # This might not work if `split` is a tensor. + chunk_size = K.int_size(input_0)[axis] // splits + splits = (chunk_size,) * splits + cur = 0 + for i, split in enumerate(splits): + if len(splits) > 1: + node_name = params['_outputs'][i] + + def target_layer(x, axis=axis, start_i=cur, end_i=cur + split): + slices = [slice(None, None)] * len(K.int_shape(x)) + slices[axis] = slice(start_i, end_i) + return x[tuple(slices)] + + layers[node_name] = target_layer(input_0) + cur += split + + +def convert_cast(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Cast layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.cast') + + if len(node.input) != 1: + assert AttributeError('More than 1 input for cast layer.') + + if is_numpy(layers[node.input[0]]): + logger.debug('Cast numpy array') + + cast_map = { + 1: np.float32, + 2: np.uint8, + 3: np.int8, + 5: np.int16, + 6: np.int32, + 7: np.int64, + 9: np.bool, + 10: np.float16, + 11: np.double, + } + cast_result = layers[node.input[0]] + result = (layers[node.input[0]] == None) + if isinstance(result, (bool, np.bool_)) and not result: + cast_result = cast_map[params['to']](layers[node.input[0]]) + elif not isinstance(result, (bool, np.bool_)) and not np.any(result): + cast_result = cast_map[params['to']](layers[node.input[0]]) + layers[node_name] = cast_result + else: + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + check_cast_map = { + 1: tf.float32, + 2: tf.uint8, + 3: tf.int8, + 5: tf.int16, + 6: tf.int32, + 7: tf.int64, + 9: tf.bool, + 10: tf.float16, + 11: tf.double, + } + if input_0.dtype == check_cast_map[params['to']] and not isinstance(input_0, (tf.Tensor, np.ndarray)): + # casting a tensor to the same dtype create placeholder:0 tensor which does not process well in engine + # trying to ignore the conversion (since its identity) might result in wrong types due to the way + # keras changes types on serialization and deserialization. + # So we up-cast to the most informative type then downcast. + # I'm Sorry. + if input_0.dtype != tf.double: + input_0 = tf_cast(input_0, tf.double, tf_name=f"{params['cleaned_name']}_precast") + else: + # We can add an If operation to the graph here if needed + raise NotImplementedError("Does not support tf.double casting into itself") + + def target_layer(x, dtype=params['to'], k_name=f"{params['cleaned_name']}"): + import tensorflow as tf + cast_map = { + 1: tf.float32, + 2: tf.uint8, + 3: tf.int8, + 5: tf.int16, + 6: tf.int32, + 7: tf.int64, + 9: tf.bool, + 10: tf.float16, + 11: tf.double, + } + return tf_cast(x, cast_map[dtype], tf_name=f'{k_name}_cast') + + layers[node_name] = target_layer(input_0) + + +def convert_floor(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Floor layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for floor layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + def target_layer(x): + # Floor is absent in keras.backend + import tensorflow as tf + return tf.floor(x) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_floor") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + + +def convert_abs(node, params, layers, lambda_func, node_name, keras_name): + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + res = tf_math_abs(input_0, tf_name=f'{params["cleaned_name"]}_abs') + layers[node_name] = res + + +def convert_identity(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Identity layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for itentity layer.') + + layers[node_name] = layers[node.input[0]] + + +def convert_argmax(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ArgMax layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for argmax layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + axis = params.get("axis", -1) + should_keep_dims = params.get("keepdims", True) + + argmax = tf_argmax(input_0, axis=axis, tf_name=f"{params['cleaned_name']}_argmax") + if should_keep_dims: + argmax = tf_expand_dims(argmax, axis=axis, tf_name=f"{params['cleaned_name']}_expand") + layers[node_name] = argmax + + +def convert_argmin(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ArgMax layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for argmax layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + axis = params.get("axis", -1) + should_keep_dims = params.get("keepdims", True) + + argmin = tf_argmin(input_0, axis=axis, tf_name=f"{params['cleaned_name']}_argmin") + if should_keep_dims: + argmin = tf_expand_dims(argmin, axis=axis, tf_name=f"{params['cleaned_name']}_argmin_unsqueeze") + layers[node_name] = argmin + + +def convert_reduce_l2(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert ReduceL2 layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 1: + assert AttributeError('More than 1 input for reduce_l2 layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + axis = params.get("axes", [-1]) + keepdims = params.get("keepdims", 0) + + def target_layer(x, axis=axis, keepdims=keepdims): + import tensorflow as tf + if isinstance(axis, list) and len(axis) == 1: + axis = axis[0] + return tf.norm(x, axis=axis, keepdims=keepdims == 1) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_reduce_l2") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + + +def convert_reciprocal(node, params, layers, lambda_func, node_name, keras_name): + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf_math_reciprocal(input_0, tf_name=f"{params['cleaned_name']}_reciprocal") + + +def convert_not(node, params, layers, lambda_func, node_name, keras_name): + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf_logical_not(input_0, tf_name=f"{params['cleaned_name']}_not") + + +def convert_less(node, params, layers, lambda_func, node_name, keras_name): + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + if input_1.dtype == input_0.dtype and not isinstance(input_0, (tf.Tensor, np.ndarray)): + if input_0.dtype != tf.double: + # To see why this is needed, see inline comments on convert_cast + input_0 = tf_cast(input_0, dtype=tf.double, tf_name=f"{params['cleaned_name']}_less_cast") + else: + raise NotImplementedError("Casting a tensor to itself is not supported") + + def target_layer(y, x=input_0): + x = tf.cast(x, y.dtype) + return tf.math.less(x, y) + + lambda_less = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_less") + less_output = lambda_less(input_1) + layers[node_name] = less_output + + +def convert_sign(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_sign(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_sign") + + +def convert_sin(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_sin(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_sin") + + +def convert_cosh(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_cosh(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_cosh") + + +def convert_ceil(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_ceil(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_ceil") + + +def convert_acosh(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_acosh(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_acosh") + + +def convert_acos(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_acos(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_acos") + + +def convert_asinh(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_asinh(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_asinh") + + +def convert_asin(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_asin(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_asin") + + +def convert_atanh(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_atanh(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_atanh") + + +def convert_tan(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_tan(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_tan") + + +def convert_atan(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_atan(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_atan") + + +def convert_sinh(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_sinh(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_sinh") + + +def convert_less_equal(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_less_equal(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_less_equal") + + +def convert_bitwise_not(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_bitwise_invert(tf.cast(layers[node.input[0]], tf.int32), + tf_name=f"{params['cleaned_name']}_bitwise_not") + + +def convert_bitwise_and(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_bitwise_bitwise_and(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_bitwise_and") + + +def convert_bitwise_or(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_bitwise_bitwise_or(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_bitwise_or") + + +def convert_bitwise_xor(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_bitwise_bitwise_xor(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_bitwise_xor") + + +def convert_cosine(node, params, layers, lambda_func, node_name, keras_name): + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + layers[node_name] = tf_cos(input_0, tf_name=f"{params['cleaned_name']}_cos") + + +def convert_greater(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_greater(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_greater") + + +def convert_greater_equal(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_greater_equal(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_greater_equal") + + +def convert_and(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_logical_and(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_and") + + +def convert_xor(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_logical_xor(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_xor") + + +def convert_or(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_logical_or(layers[node.input[0]], layers[node.input[1]], + tf_name=f"{params['cleaned_name']}_or") + + +def convert_trilu(node, params, layers, lambda_func, node_name, keras_name): + x = layers[node.input[0]] + k = 0 + if len(node.input) > 1: + k_tensor = layers[node.input[1]] + try: + k = int(tf.keras.backend.get_value(k_tensor)) + except: + k = 0 # fallback if symbolic + upper = params.get("upper", 1) + + # cannot use tf.experimental.numpy.tril/triu because this is not an eager tensor and we dont know the shape + def trilu_fn(tensor): + shape = tf.shape(tensor) + m, n = shape[-2], shape[-1] + row_idx = tf.range(m)[:, None] + col_idx = tf.range(n)[None, :] + if upper: + mask = row_idx <= (col_idx - k) + else: + mask = row_idx >= (col_idx - k) + mask = tf.cast(mask, tensor.dtype) + mask = tf.broadcast_to(mask, shape) + return tensor * mask + + result = tf.keras.layers.Lambda(trilu_fn, name=keras_name)(x) + layers[node_name] = result + + +def convert_cumsum(node, params, layers, lambda_func, node_name, keras_name): + exclusive = bool(params.get("exclusive", 0)) + reverse = bool(params.get("reverse", 0)) + layers[node_name] = tf.math.cumsum(layers[node.input[0]], layers[node.input[1]], + exclusive=exclusive, reverse=reverse) + + +def convert_is_inf(node, params, layers, lambda_func, node_name, keras_name): + if params.get("detect_negative") is not None or params.get("detect_negative") is not None: + raise AttributeError("Unsupported params detected in isInf conversion: detect_negative/detect_positive") + layers[node_name] = tf_math_is_inf(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_is_inf") + + +def convert_is_nan(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_math_is_nan(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_is_nan") + + +def convert_size(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_size(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_size") + + +def convert_non_zero(node, params, layers, lambda_func, node_name, keras_name): + input_tensor = layers[node.input[0]] + condition = tf_not_equal( + input_tensor, + tf.zeros_like(input_tensor), + tf_name=f"{params['cleaned_name']}_non_zero_unequal" + ) + nonzero_indices = tf_where(condition, tf_name=f"{params['cleaned_name']}_non_zero_where") + nonzero_result = tf_transpose(nonzero_indices, tf_name=f"{params['cleaned_name']}_non_zero_transpose") + nonzero_result = tf_cast(nonzero_result, tf.int32, tf_name=f"{params['cleaned_name']}_non_zero_cast") + layers[node_name] = nonzero_result + # tf.experimental.numpy.nonzero(layers[node.input[0]]) was not giving the right results + + +def convert_gather_nd(node, params, layers, lambda_func, node_name, keras_name): + input_tensor = layers[node.input[0]] + indices_tensor = layers[node.input[1]] + batch_dims = params.get("batch_dims", 0) + # tesnsorflow implementation of gather_nd, in any case it fails please try also the pseudo_gathernd function here + # instead. basically it flattens the params and use normal gather to simulate the result of gathernd + res = tf_gather_nd(input_tensor, indices_tensor, batch_dims=batch_dims, + tf_name=f"{params['cleaned_name']}_gather_nd") + layers[node_name] = res + + +def pseudo_gathernd(input_tensor, indices_tensor): + params_shape = input_tensor.shape + idx_shape = indices_tensor.shape + idx_dims = idx_shape[-1] + gather_shape = params_shape[idx_dims:] + params_flat = tf.reshape( + input_tensor, + tf.concat([[-1], gather_shape], axis=0), + ) + axis_step = tf.math.cumprod( + params_shape[:idx_dims], + exclusive=True, + reverse=True, + ) + + NUMPY_DTYPES_TO_TF_DTYPES = { + np.dtype('float16'): tf.float16, + np.dtype('float32'): tf.float32, + np.dtype('float64'): tf.float64, + + np.dtype('uint8'): tf.uint8, + np.dtype('uint16'): tf.uint16, + np.dtype('uint32'): tf.uint32, + np.dtype('uint64'): tf.uint64, + + np.dtype('int8'): tf.int8, + np.dtype('int16'): tf.int16, + np.dtype('int32'): tf.int32, + np.dtype('int64'): tf.int64, + + np.dtype('bool_'): tf.bool, + } + + mul = tf.math.multiply( + indices_tensor, + tf.cast( + axis_step, + dtype=NUMPY_DTYPES_TO_TF_DTYPES[indices_tensor.dtype] \ + if isinstance(indices_tensor.dtype, np.dtype) else indices_tensor.dtype, + ), + ) + indices_flat = tf.reduce_sum( + mul, + axis=-1, + ) + result_flat = tf.gather( + params_flat, + indices_flat, + ) + if len(idx_shape) > 0 and len(idx_shape[:-1]) > 0 and idx_shape[:-1][0] is not None: + pseudo_gathernd_res = tf.reshape( + result_flat, + tf.concat([idx_shape[:-1], gather_shape], axis=0), + ) + else: + pseudo_gathernd_res = result_flat + + return pseudo_gathernd_res + + +def convert_nms(node, params, layers, lambda_func, node_name, keras_name): + scores = layers[node.input[1]] + boxes = layers[node.input[0]] + + batch_size = boxes.shape[0] + + if batch_size is None: + raise AttributeError("Onnx2kerras: NMS conversion does not support dynamic batch." + "Please change batch to static or remove NMS from model") + center_point_box = params.get("center_point_box", 0) + if center_point_box != 0: + raise AttributeError("Onnx2kerras: We do not support the center_point_box parameter") + + iou_threshold = 0 + score_threshold = float('-inf') + max_output_size = [2 ** 30] + if len(node.input) > 2: + max_output_size = [min(np.squeeze(layers.get(node.input[2], [2 ** 30])), 2 ** 30)] + if len(node.input) > 3: + iou_threshold = layers.get(node.input[3], [0]) + if len(node.input) > 4: + score_threshold = ensure_float(layers.get(node.input[4], float('-inf'))) + if isinstance(score_threshold, np.ndarray): + score_threshold = score_threshold[0] + num_classes = scores.shape[1] + all_results = [] + try: + iou_threshold = iou_threshold[0] + except IndexError: # iou threshold is already a scalar + pass + for batch in range(batch_size): + for c_class in range(num_classes): + indices = tf_image_non_max_suppression(boxes=boxes[batch], + scores=scores[batch, c_class], + max_output_size=tf.cast(max_output_size[0], tf.int32), + iou_threshold=iou_threshold, + score_threshold=score_threshold, + tf_name=f"{params['cleaned_name']}_nms_{batch}_{c_class}") + ones_indices = tf_ones_like(indices, tf_name=f"{params['cleaned_name']}_nms_ones_{batch}_{c_class}") + class_tensor = c_class * ones_indices + batch_tensor = batch * ones_indices + res = tf_stack([batch_tensor, class_tensor, indices], axis=-1 + , tf_name=f"{params['cleaned_name']}_nms_stack_{batch}_{c_class}") + all_results.append(res) + layers[node_name] = tf_cast(tf_concat(all_results, axis=0, tf_name=f"{params['cleaned_name']}_nms_concat"), + dtype=tf.int64, + tf_name=f"{params['cleaned_name']}_nms_cast") + + +def convert_if(node, params, layers, lambda_func, node_name, keras_name): + if len(layers[node.input[0]].shape) == 0: + cond = layers[node.input[0]] + else: + cond = layers[node.input[0]][0] + outputs = [layers[node.attribute[i].g.output[0].name] for i in range(2)] + outputs_dtypes = [output.dtype for output in outputs] + outputs_numpy_dtypes = [outputs_dtypes[i] if is_numpy(outputs[i]) else outputs_dtypes[i].as_numpy_dtype for i in + range(2)] + if outputs_numpy_dtypes[0] != outputs_numpy_dtypes[1]: + smallest_idx = np.argmin([np.iinfo(outputs_numpy_dtypes[i]).max for i in range(2)]) + if is_numpy(outputs[smallest_idx]): + outputs[smallest_idx] = outputs[smallest_idx].astype(outputs_numpy_dtypes[1 - smallest_idx]) + else: + outputs[smallest_idx] = tf_cast(outputs[smallest_idx], tf.as_dtype(outputs_dtypes[1 - smallest_idx]), + tf_name=f"{params['cleaned_name']}_if_cast") + in_vec = outputs[0] + if is_numpy(in_vec): # if this is a constant it would not be serialized well. connect it to input + # f_then = lambda x: in_vec + new_dtype = in_vec.dtype.type + + # The Tf conversion is required to pass args serialization in leap-model-parser + def get_empty_array(x, dtype=new_dtype, keras_name=keras_name): + return tf.convert_to_tensor(np.array([]), dtype=new_dtype, name=f'{params["cleaned_name"]}_if_empty_arr') + + if len(in_vec) == 0: # empty arrays does not serialize well in lambdas. + then_lambda = get_empty_array + else: + then_lambda = lambda x: in_vec + lambda_layer = tf.keras.layers.Lambda(then_lambda, name=f"{params['cleaned_name']}_if_serizlize_arr_helper") + if not K.is_keras_tensor(cond): + raise NotImplementedError( + "We do not support an if where both the then branch and the in-vector are constants") + then_output = lambda_layer(cond) # this assumes + else: + then_output = outputs[0] + layers[node_name] = tf.keras.backend.switch(cond, then_output, outputs[1]) + + +def convert_einsum(node, params, layers, lambda_func, node_name, keras_name): + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + equation = params['equation'].decode('utf-8') + + is_input_0_constant = isinstance(input_0, (tf.Tensor, np.ndarray)) + is_input_1_constant = isinstance(input_1, (tf.Tensor, np.ndarray)) + if is_input_0_constant and is_input_1_constant: + layers[node_name] = tf.einsum(equation, *[input_0, input_1], name=keras_name) + elif is_input_0_constant: + layers[node_name] = OnnxEinsumLayer(equation, input_0, 0)(input_1, name=keras_name) + elif is_input_1_constant: + layers[node_name] = OnnxEinsumLayer(equation, input_1, 1)(input_0, name=keras_name) + else: + layers[node_name] = OnnxEinsumLayer(equation, None, None)([input_0, input_1], name=keras_name) diff --git a/onnx2kerastl/padding_layers.py b/onnx2kerastl/padding_layers.py new file mode 100644 index 00000000..cca51044 --- /dev/null +++ b/onnx2kerastl/padding_layers.py @@ -0,0 +1,115 @@ +import keras +import logging +from .utils import ensure_tf_type +from .utils import is_numpy +from .tfops_funcs import tf_pad +import tensorflow as tf + + +def convert_padding(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Constant layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + # It's binary by-default + logger = logging.getLogger("onnx2keras.padding") + if 'mode' in params: + params['mode'] = params['mode'].decode('ascii') + else: + params['mode'] = 'constant' + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + if 'pads' in params: + pads = params['pads'] + else: + pads = layers[node.input[1]] + + if (is_numpy(pads) or not keras.backend.is_keras_tensor(pads)) and not any(pads): + layers[node_name] = input_0 + return + + if params['mode'] == 'constant': + + if 'value' in params and params['value'] != 0.0: + raise AssertionError('Cannot convert non-zero padding') + if pads.shape[0] == 6 and len(layers[node.input[0]].shape) == 3: + layers[node_name] = tf_pad(input_0, [[pads[0], pads[3]], [pads[1], pads[4]], [pads[2], pads[5]]], + tf_name=f"{params['cleaned_name']}_pad") + # Magic ordering + else: + if isinstance(pads, keras.engine.keras_tensor.KerasTensor) and pads.shape[0] == 8: + padding_layer = lambda x: tf_pad(x, + [[pads[0], pads[4]], + [pads[1], pads[5]], + [pads[2], pads[6]], + [pads[3], pads[7]]], + tf_name=f"{params['cleaned_name']}_pad_3") + elif pads.shape[0] == 8: + padding_layer = keras.layers.ZeroPadding2D( + padding=((pads[2], pads[6]), (pads[3], pads[7])), + name=f"{params['cleaned_name']}_pad_0" + ) + elif pads.shape[0] == 12: # Check for rank 6 input + padding_layer = keras.layers.Lambda( + lambda x: tf.pad( + x, + [ + [0, 0], # Batch dimension + [0, 0], # Channels dimension + [pads[2], pads[8]], # d1 dimension + [pads[3], pads[9]], # d2 dimension + [pads[4], pads[10]], # d3 dimension + [pads[5], pads[11]], # d4 dimension + ], + mode='CONSTANT' + ), + name=f"{params['cleaned_name']}_pad_1" + ) + layers[node_name] = padding_layer(input_0) + else: + logger.warning("Caution - no test yet") + padding_layer = keras.layers.ZeroPadding3D( + padding=((pads[2], pads[7]), (pads[3], pads[8]), (pads[4], pads[9])), + name=f"{params['cleaned_name']}_pad_2" + ) + layers[node_name] = padding_layer(input_0) + elif params['mode'] == 'reflect': + if pads.shape[0] == 6: + result = tf_pad(input_0, [[pads[0], pads[3]], [pads[1], pads[4]], [pads[2], pads[5]]], mode='REFLECT', + tf_name=f"{params['cleaned_name']}_reflect_pad") + layers[node_name] = result + else: + def target_layer(x, pads=pads): + if pads.shape[0] == 8: + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'REFLECT') + else: + logger.warning("Caution - no test yet") + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'REFLECT') + return layer + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_reflect") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + elif params['mode'] == 'edge': + + def target_layer(x, pads=pads): + import tensorflow as tf + if pads.shape[0] == 8: # TODO not tested yet + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], 'SYMMETRIC') + else: + logger.warning("Caution - no test yet") + layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[7]], [pads[3], pads[8]], [pads[4], pads[9]]], 'SYMMETRIC') + return layer + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_pad_edge") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + + else: + raise AttributeError('Unknown padding') diff --git a/onnx2kerastl/pooling_layers.py b/onnx2kerastl/pooling_layers.py new file mode 100644 index 00000000..3272bbb6 --- /dev/null +++ b/onnx2kerastl/pooling_layers.py @@ -0,0 +1,429 @@ +import keras +import logging + +from .utils import ensure_tf_type, is_numpy +from .tfops_funcs import tf_reshape, tf_rank, tf_concat, tf_shape, tf_cast, tf_image_crop_and_resize, tf_ones, \ + tf_nn_avg_pool, tf_nn_max_pool +import numpy as np +import string +import random +import tensorflow as tf +import keras.backend as K + + +def convert_maxpool(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert MaxPooling layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.maxpool') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + kernel_shape = params['kernel_shape'] + stride_shape = params['strides'] + + pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] + pad = 'valid' + + if all([shape % 2 == 1 for shape in kernel_shape]) and \ + all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ + all([shape == 1 for shape in stride_shape]): + pad = 'same' + logger.debug('Use `same` padding parameters.') + else: + logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') + padding_name = f"{params['cleaned_name']}_maxpool" + '_pad' + if len(kernel_shape) == 2: + padding = None + + if len(pads) == 2 and (pads[0] > 0 or pads[1] > 0): + padding = (pads[0], pads[1]) + elif len(pads) == 4 and (pads[0] > 0 or pads[1] > 0 or pads[2] > 0 or pads[3] > 0): + padding = ((pads[0], pads[2]), (pads[1], pads[3])) + + if padding is not None: + padding_layer = keras.layers.ZeroPadding2D( + padding=padding, + name=padding_name + ) + layers[padding_name] = input_0 = padding_layer(input_0) + else: # 3D padding + padding_layer = keras.layers.ZeroPadding3D( + padding=pads[:len(stride_shape)], + name=padding_name + ) + layers[padding_name] = input_0 = padding_layer(input_0) + if len(kernel_shape) == 2: + pooling = keras.layers.MaxPooling2D( + pool_size=kernel_shape, + strides=stride_shape, + padding=pad, + name=f"{params['cleaned_name']}_maxpool", + data_format='channels_first' + ) + else: + pooling = keras.layers.MaxPooling3D( + pool_size=kernel_shape, + strides=stride_shape, + padding=pad, + name=f"{params['cleaned_name']}_maxpool", + data_format='channels_first' + ) + ceil_mode = params.get('ceil_mode', False) + if ceil_mode: + if pad == 'valid': + output_shape = ((np.array(input_0.shape[-len(kernel_shape):]) - np.array(kernel_shape)) / np.array( + stride_shape)) + 1 + else: + output_shape = np.floor((np.array(input_0.shape[-len(kernel_shape):]) - 1) / np.array(stride_shape)) + 1 + if not np.array([output_shape[i].is_integer() for i in range(len(output_shape))]).all(): + padding = [0 if output_shape[i].is_integer() else stride_shape[i] for i in range(len(kernel_shape))] + rand_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3)) + if len(kernel_shape) == 2: + layers[node_name + "_pre_" + rand_string] = keras.layers.ZeroPadding2D( + ((0, padding[0]), (0, padding[1])), name=f"{params['cleaned_name']}_pre")(input_0) + else: + layers[node_name + "_pre_" + rand_string] = keras.layers.ZeroPadding3D( + ((0, padding[0]), (0, padding[1]), (0, padding[2])), name=f"{params['cleaned_name']}_pre")(input_0) + input_0 = layers[node_name + "_pre_" + rand_string] + layers[node_name] = pooling(input_0) + +def convert_global_max_pool(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert GlobalMaxPool layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + tensor_dim = len(input_0.shape) + if tensor_dim == 3: + global_pool = keras.layers.GlobalMaxPooling1D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_max_pool_3") + elif tensor_dim == 4: + global_pool = keras.layers.GlobalMaxPooling2D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_max_pool_4") + elif tensor_dim == 5: + global_pool = keras.layers.GlobalMaxPooling3D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_max_pool_5") + else: + raise NotImplementedError("Global max pooling of dims < 3 or dims > 5 is not supported") + input_0 = global_pool(input_0) + new_shape = input_0.shape.as_list() + new_shape = new_shape[1:] + new_shape.extend([1] * (tensor_dim - 2)) + reshape_layer = keras.layers.Reshape(new_shape, name=f"{params['cleaned_name']}_global_max_pool_reshape") + input_0 = reshape_layer(input_0) + + layers[node_name] = input_0 + + +def convert_avgpool(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert AvgPooling layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.avgpool') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + kernel_shape = params['kernel_shape'] + stride_shape = params['strides'] + + pads = params['pads'] if 'pads' in params else [0, 0, 0, 0, 0, 0] + + if not any(pads): + pad = 'valid' + + elif all([shape % 2 == 1 for shape in kernel_shape]) and \ + all([kernel_shape[i] // 2 == pads[i] for i in range(len(kernel_shape))]) and \ + all([shape == 1 for shape in stride_shape]): + pad = 'same' + logger.debug('Use `same` padding parameters.') + else: + pad = 'valid' + logger.warning('Unable to use `same` padding. Add ZeroPadding2D layer to fix shapes.') + padding_name = f"{params['cleaned_name']}_avgpool" + '_pad' + if len(kernel_shape) == 2: + padding_layer = keras.layers.ZeroPadding2D( + padding=pads[:len(stride_shape)], + name=padding_name + ) + else: # 3D padding + padding_layer = keras.layers.ZeroPadding3D( + padding=pads[:len(stride_shape)], + name=padding_name + ) + layers[padding_name] = input_0 = padding_layer(input_0) + + if len(kernel_shape) == 2: + pooling = keras.layers.AveragePooling2D( + pool_size=kernel_shape, + strides=stride_shape, + padding=pad, + name=f"{params['cleaned_name']}_avgpool", + data_format='channels_first' + ) + elif len(kernel_shape) == 1: + pooling = keras.layers.AveragePooling1D( + pool_size=kernel_shape, + strides=stride_shape, + padding=pad, + name=f"{params['cleaned_name']}_avgpool", + data_format='channels_first' + ) + else: + pooling = keras.layers.AveragePooling3D( + pool_size=kernel_shape, + strides=stride_shape, + padding=pad, + name=f"{params['cleaned_name']}_avgpool", + data_format='channels_first' + ) + layers[node_name] = pooling(input_0) + + +def convert_global_avg_pool(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert GlobalAvgPool layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + tensor_dim = len(input_0.shape) + if tensor_dim == 3: + global_pool = keras.layers.GlobalAveragePooling1D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_3", + keepdims=True) + elif tensor_dim == 4: + global_pool = keras.layers.GlobalAveragePooling2D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_4", + keepdims=True) + elif tensor_dim == 5: + global_pool = keras.layers.GlobalAveragePooling3D(data_format='channels_first', + name=f"{params['cleaned_name']}_global_avg_pool_5", + keepdims=True) + else: + raise NotImplementedError("Global average pooling of dims < 3 or dims > 5 is not supported") + input_0 = global_pool(input_0) + layers[node_name] = input_0 + + +def convert_topk(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert topk layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + axis = params.get('axis', -1) + largest = bool(params.get('largest', 1)) + to_sort = bool(params.get('sorted', 1)) + x = layers[node.input[0]] + k = layers[node.input[1]][0] + if not is_numpy(k) and not K.is_keras_tensor(k): # Eager tensor does not serialize well + k = k.numpy().astype(np.int32) + if not largest: + in_tensor = -x + else: + in_tensor = x + + def target_layer(composed_input, to_sort=to_sort, axis=axis): + in_tensor = composed_input[..., :-1] + k = composed_input[..., -1] + for i in range(len(k.shape)): + k = k[0] + k = tf.cast(k, tf.int32) + rank = len(in_tensor.shape) + + if axis >= rank - 1 or axis == -1: + permuted = in_tensor + else: + ord_permute = np.arange(rank) + ord_permute[axis] = rank - 1 + ord_permute[-1] = axis + permuted = tf.transpose(in_tensor, ord_permute) + + topk_res = tf.math.top_k(permuted, k=k, sorted=to_sort) + values_pre_permute = topk_res[0] + indices_pre_permute = topk_res[1] + topk_concat = tf.stack([values_pre_permute, tf.cast(indices_pre_permute, tf.float32)]) + if axis >= rank - 1 or axis == -1: + out = topk_concat + else: + ord_permute = [0] + (ord_permute + 1).tolist() + out = tf.transpose(topk_concat, ord_permute) + return out + in_shape = tf_shape(in_tensor, tf_name=f"{params['cleaned_name']}_topk_in_shape") + k_needed_shape_possible_keras_tensor = tf_concat( + [(in_shape)[:-1],[1]], axis=-1,tf_name=f"{params['cleaned_name']}_topk_k_needed_shape") + + if hasattr(k_needed_shape_possible_keras_tensor, "_inferred_value"): #is keras tensor + k_needed_shape = k_needed_shape_possible_keras_tensor._inferred_value + else: + k_needed_shape = k_needed_shape_possible_keras_tensor + k_unsqueezed = tf_ones(k_needed_shape, tf_name=f"{params['cleaned_name']}_topk_k_shape")*\ + tf_cast(k, tf.float32, tf_name=f"{params['cleaned_name']}_topk_k_cast") + k_reshaped = tf_cast(k_unsqueezed, in_tensor.dtype, tf_name=f"{params['cleaned_name']}_topk_k_reshaped") + composed_input = tf_concat([in_tensor, k_reshaped], axis=-1, + tf_name=f"{params['cleaned_name']}_topk_k_concat") + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_topk") + result = lambda_layer(composed_input) + pos_axis = axis if axis > 0 else in_shape.shape[0]-1 + new_shape = tf_concat([in_shape[:pos_axis], [k], in_shape[pos_axis+1:]], axis=-1, + tf_name=f"{params['cleaned_name']}_topk_output_shape") + values = tf_reshape(result[0], new_shape, + tf_name=f"{params['cleaned_name']}_topk_values_reshape") + indices = tf_reshape(tf_cast(result[1], + tf.int32, + tf_name=f"{params['cleaned_name']}_topk_indices_cast"), + new_shape, + tf_name=f"{params['cleaned_name']}_topk_indices_reshape") + if not largest: + out_tensor = -values + else: + out_tensor = values + layers[params['_outputs'][0]] = out_tensor + layers[params['_outputs'][1]] = indices + + +def convert_roi_align(node, params, layers, lambda_func, node_name, keras_name): + # extract params + output_height = params.get('output_height', 1) + output_width = params.get('output_width', 1) + sampling_ratio = params.get('sampling_ratio', 0) + spatial_scale = params.get('spatial_scale', 1.0) + mode = params.get('mode', 'avg') + if isinstance(mode, bytes): + mode = mode.decode('utf-8') + + feature_map = layers[node.input[0]] + rois = layers[node.input[1]] + batch_indices = layers[node.input[2]] + + adaptive_ratio = False + if sampling_ratio <= 0: + sampling_ratio = int((output_height + output_width) / 2) + adaptive_ratio = True + + rois = rois * spatial_scale + box_ind = tf_cast(batch_indices, tf.int32, tf_name=f"{params['cleaned_name']}_roi_cast_batch") + if keras.backend.image_data_format() == 'channels_first': + fm_shape = tf_shape(feature_map, tf_name=f"{params['cleaned_name']}_roi_hw")[2:] # H, W + else: + raise NotImplementedError("To support channels_last in RoiAlign - need to remove permutes") + # extract inputs + x0 = rois[:, 0:1] + y0 = rois[:, 1:2] + x1 = rois[:, 2:3] + y1 = rois[:, 3:4] + if not adaptive_ratio: + crop_shape = ( + output_height * sampling_ratio, + output_width * sampling_ratio, + ) + spacing_w = (x1 - x0) / tf_cast(crop_shape[1], dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop1") + spacing_h = (y1 - y0) / tf_cast(crop_shape[0], dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop0") + nx0 = (x0 + spacing_w / 2) / tf_cast(fm_shape[1] - 1, dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_fm_1") + ny0 = (y0 + spacing_h / 2) / tf_cast(fm_shape[0] - 1, dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_fm_0") + + nw = spacing_w * tf_cast( + crop_shape[1] - 1, + dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop_2" + ) / tf_cast( + fm_shape[1] - 1, + dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop_3" + ) + nh = spacing_h * tf_cast( + crop_shape[0] - 1, + dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop_3" + ) / tf_cast( + fm_shape[0] - 1, + dtype=tf.float32, + tf_name=f"{params['cleaned_name']}_roi_cast_crop_4" + ) + else: + roi_width = x1 - x0 + roi_height = y1 - y0 + fm_shape_1 = tf_cast(fm_shape[1] - 1, dtype=tf.float32, tf_name=f"{params['cleaned_name']}_roi_cast_fm_1") + fm_shape_0 = tf_cast(fm_shape[0] - 1, dtype=tf.float32, tf_name=f"{params['cleaned_name']}_roi_cast_fm_0") + nx0 = x0 / fm_shape_1 + ny0 = y0 / fm_shape_0 + nw = roi_width / fm_shape_1 + nh = roi_height / fm_shape_0 + + boxes = tf_concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1, + tf_name=f"{params['cleaned_name']}_roi_concat" + ) + + permuted_features = keras.layers.Permute([2, 3, 1], + name=f"{params['cleaned_name']}_roi_channels_last" + )(feature_map) # move to channels last + cropped_tensor = tf_image_crop_and_resize( + permuted_features, + boxes, + tf.cast(box_ind, dtype=tf.int32), + crop_size=( + output_height * sampling_ratio, + output_width * sampling_ratio, + ), + method='bilinear', + tf_name=f"{params['cleaned_name']}_roi_crop_resize" + ) + + pooled_tensor = None + if mode.lower() == 'avg': + pooled_tensor = tf_nn_avg_pool( + input=cropped_tensor, + ksize=[1, sampling_ratio, sampling_ratio, 1], + strides=[1, sampling_ratio, sampling_ratio, 1], + padding='SAME', + tf_name=f"{params['cleaned_name']}_roi_avg_pool", + data_format='NHWC' + ) + elif mode.lower() == 'max': + pooled_tensor = tf_nn_max_pool( + input=cropped_tensor, + ksize=[1, sampling_ratio, sampling_ratio, 1], + strides=[1, sampling_ratio, sampling_ratio, 1], + padding='SAME', + tf_name=f"{params['cleaned_name']}_roi_max_pool", + data_format='NHWC' + ) + else: + raise ValueError(f"Unknown pooling mode: {mode}") + layers[node_name] = keras.layers.Permute([3, 1, 2], + name=f"{params['cleaned_name']}_roi_channels_first")(pooled_tensor) diff --git a/onnx2kerastl/reshape_layers.py b/onnx2kerastl/reshape_layers.py new file mode 100644 index 00000000..c6e5c7a7 --- /dev/null +++ b/onnx2kerastl/reshape_layers.py @@ -0,0 +1,884 @@ +import logging + +import keras +import numpy as np +import tensorflow as tf +from keras import backend as K +from keras.engine.keras_tensor import KerasTensor +from keras.layers import SlicingOpLambda, Lambda +from typing import Union +from .utils import is_numpy, ensure_tf_type, unsqueeze_tensors_of_rank_one +from .tfops_funcs import tf_reshape, tf_shape, tf_cast, tf_stack, tf_image_resize, tf_strided_slice,\ + tf_squeeze, tf_transpose, tf_where, tf_gather, tf_range, tf_reduce_sum, tf_abs, tf_expand_dims, tf_concat, \ + tf_shape, tf_tile, tf_fill, tf_gather_nd, tf_reduce_sum, tf_zeros_like, tf_multiply, tf_tensor_scatter_nd_update,\ + tf_ones + + +def convert_transpose(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert transpose. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.transpose') + input_name = node.input[0] + + if params['perm'][0] != 0: + logger.warning('Can\'t permute batch dimension. Result may be wrong.') + if is_numpy(layers[input_name]): + logger.warning('Transposing numpy array.') + layers[node_name] = np.transpose(layers[input_name], axes=params['perm']) + else: + layers[node_name] = tf_transpose(layers[input_name], perm=params['perm'], + tf_name=f"{params['cleaned_name']}_transpose") + else: + permute = keras.layers.Permute(params['perm'][1:], name=f"{params['cleaned_name']}_transpose") + layers[node_name] = permute(layers[input_name]) + + +def convert_shape(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert shape. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.shape') + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + logger.debug('Actual shape:') + logger.debug(np.array(input_0.shape)) + is_unknown_tensor = input_0.shape == None + if not is_unknown_tensor and ( + not K.is_keras_tensor(input_0) or not any([input_0.shape[i] == None for i in range(len(input_0.shape))])): + shapes = [] + for i in input_0.shape: + if i is not None: + shapes.append(i) + else: + shapes.append(None) + layers[node_name] = np.array(shapes) + else: + layers[node_name] = tf_shape(input_0, out_type=tf.int64, tf_name=f"{params['cleaned_name']}_shape") + + +def optimize_constant_array_for_serialization(input_0: tf.Tensor, params, indices: Union[np.ndarray, tf.Tensor], logger): + """ + Some models contain repetition in the constant array tf.gather gathers from. + In such cases, serialization takes a long time. serializing 3000 elements take ~3 sec (and scale linearly). + To optimize for this case, we detect the repetition, take the modulu of the repetition from indices, + and making the param array shorter. + This is especially useful for models containing pre-top-k elements. + Args: + input_0: the array to gather on + indices: the indices to gather + + Returns: + input_0: the new array to gather on + indices: the new indices to gather + """ + # This Optimization is a must for models with Pre-Top-K needs + logger.debug('onnx2keras.gather - Encountered long gather. ' + 'Trying to shorten it for easy serialization') + max_inp = tf.reduce_max(input_0) + min_inp = tf.reduce_min(input_0) + if len(input_0) % (max_inp - min_inp + 1) == 0: + range_inp = tf_range(min_inp, max_inp + 1, dtype=input_0.dtype, + tf_name=f"{params['cleaned_name']}_optimize_range") + reshape_columns = tf_reshape(input_0, [-1, max_inp + 1], tf_name=f"{params['cleaned_name']}_optimize_reshape") + if tf_reduce_sum(tf_abs((reshape_columns - range_inp[None, :]), + tf_name=f"{params['cleaned_name']}_optimize_abs"), + tf_name=f"{params['cleaned_name']}_optimize_sum") == 0: + # This tests for a long constant array that has a repeating series like [0,1,2,3,0,1,2,3] + logger.debug('onnx2keras.gather - Shortening sequence - columns') + indices = indices % (max_inp + 1) + input_0 = range_inp + else: + repetition_len = np.argmin(input_0 == input_0[0]) + if repetition_len > 0 and len(input_0) % repetition_len == 0: + # This tests for a long constant array that has a repeating series like [0,0,0,1,1,1,2,2,2] + reshaped_rows = tf_reshape(input_0, [-1, repetition_len], + tf_name=f"{params['cleaned_name']}_optimize_reshape") + first_row = reshaped_rows[:, 0] + if tf_reduce_sum(tf_abs(reshaped_rows - first_row[:, None], + tf_name=f"{params['cleaned_name']}_optimize_abs_2"), + tf_name=f"{params['cleaned_name']}_optimize_sum_2") == 0: + logger.debug('onnx2keras.gather - Shortening sequence - rows') + indices = indices // repetition_len + input_0 = first_row + return input_0, indices + + +def convert_gather(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert gather. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.gather') + OPTIMIZE_ARRAY_LENGTH = 50000 + axis = params.get('axis', 0) + if is_numpy(layers[node.input[0]]) and is_numpy(layers[node.input[1]]) and not 'is_embedding' in params: + logger.debug('Gather from numpy array') + if axis == 0: + gathered = np.array(layers[node.input[0]][layers[node.input[1]]]) + elif axis == 1: + gathered = np.array(layers[:, node.input[0]][layers[node.input[1]]]) + elif axis == 2: + gathered = np.array(layers[:, :, node.input[0]][layers[node.input[1]]]) + elif axis == 3: + gathered = np.array(layers[:, :, :, node.input[0]][layers[node.input[1]]]) + else: + raise AttributeError('Can\'t gather by axis more than 3.') + + if gathered.dtype == np.object0: + try: + gathered = gathered.astype(np.int32) + except TypeError: + pass + layers[node_name] = gathered + else: + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + if isinstance(layers[node.input[1]], (np.integer, int)) or \ + (not isinstance(layers[node.input[1]], np.ndarray) and \ + K.is_keras_tensor(layers[node.input[1]])): + #indices are keras tensor or int + indices = layers[node.input[1]] + else: #indices are numpy/tf.eager + indices = layers[node.input[1]] + if not is_numpy(layers[node.input[1]]): + indices = indices.numpy() + indices = indices.tolist() + if "is_embedding" in params: + if len(input_0.shape) == 2: + emb = tf.keras.layers.Embedding(input_0.shape[0], input_0.shape[1], weights=[layers[node.input[0]]], + name=f"{params['cleaned_name']}_gather_emb") + if isinstance(indices, list): + layers[node_name] = emb(np.array(indices)) + else: + layers[node_name] = emb(indices) + else: + raise AttributeError("Cannot transform gather into embedding with non 2D array") + else: + if tf.is_tensor(indices) and indices.dtype not in [tf.int16, tf.int32, tf.int64]: + indices = tf_cast(indices, tf.int32, tf_name=f"{params['cleaned_name']}_gather_cast_indices") + + if type(indices) == int: + out_type = tf.int32 + else: + if isinstance(indices, list): + out_type = np.array(indices).dtype + else: + out_type = indices.dtype + + dim_len = tf_shape(input_0, out_type=out_type, + tf_name=f"{params['cleaned_name']}_gather_input_shape")[axis] # support None + if isinstance(indices, list): + for i in range(len(indices)): + try: + if indices[i] < 0: + indices[i] = int(indices[i]) + dim_len + except TypeError: + pass + + + if isinstance(indices, (int, np.integer)) and indices < 0: + indices += dim_len + if tf.is_tensor(indices): + indices = tf_where(indices < 0, indices + dim_len, indices, + tf_name=f"{params['cleaned_name']}_gather_indices_where") + if isinstance(input_0, np.ndarray) or not K.is_keras_tensor(input_0): + if len(input_0) > OPTIMIZE_ARRAY_LENGTH: + input_0, indices = optimize_constant_array_for_serialization(input_0, params, indices, logger) + layers[node_name] = tf_gather(input_0, indices, axis=axis, tf_name=f"{params['cleaned_name']}_gather") + + +def convert_concat(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert concat. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.concat') + + layer_input = [layers[node.input[i]] for i in range(len(node.input))] + + if all([is_numpy(layers[node.input[i]]) for i in range(len(node.input))]): + logger.debug('Concat numpy arrays.') + layers[node_name] = np.concatenate(layer_input, axis=params['axis']) + else: + logger.debug('Concat Keras layers.') + if len(layer_input) > 1: + if not np.array([tf.is_tensor(layer_input[i]) and K.is_keras_tensor(layer_input[i]) for i in + range(len(layer_input))]).all() or any( + [layer_input[i].shape == None for i in range(len(layer_input))]): + try: + layers[node_name] = tf_concat(layer_input, axis=params['axis'], + tf_name=f"{params['cleaned_name']}_concat") + except Exception as ex: + # might be due to type mismatch between different inputs of tf.concat + raise + + else: + layer_input = unsqueeze_tensors_of_rank_one(layer_input, axis=params['axis'], name=params['cleaned_name']) + layers[node_name] = keras.layers.concatenate(inputs=layer_input, + axis=params['axis'], + name=f"{params['cleaned_name']}_concat_2") + else: + layers[node_name] = layer_input[0] + + +def convert_reshape(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert reshape. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.reshape') + + input_0 = layers[node.input[0]] + input_1 = layers[node.input[1]] + + logger.debug(f'input_0: {str(input_0)}') + logger.debug(f'input_1: {str(input_1)}') + + if is_numpy(input_1): + dims_to_set_as_zero = None + dims_to_keep_unchanged = None + allow_zero = params.get('allowzero', False) + contains_zero_dim = np.isin(input_1, 0).any() + contains_infer_dim = np.isin(input_1, -1).any() + if allow_zero: + if contains_infer_dim and contains_zero_dim: + raise ValueError( + "Reshape parameter 'allowzero' is set and reshaping argument contains both '0' dim and '-1'" + "which is not allowed" + f"node name: {node_name}") + elif contains_zero_dim: + dims_to_set_as_zero = np.argwhere(input_1 == 0) + elif not allow_zero and contains_zero_dim: + dims_to_keep_unchanged = np.squeeze(np.argwhere(input_1 == 0)) + + logger.debug('The second argument is numpy array.') + if is_numpy(input_0): + logger.debug('The first argument is numpy array. Apply np.reshape.') + layers[node_name] = np.reshape(input_0, np.int32(input_1)) + else: + if params['change_ordering']: + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + # Fix critical issue with NHWC + if input_1[0] is None and input_1[1] == -1: + logger.warning('!!! IMPORTANT INFORMATION !!!') + logger.warning('The target shape if [None, -1] that means flatten.') + logger.warning('But the target ordering is NHWC, so we cant simply perform flatten') + logger.warning('The layer will be converted as lambda with tf.transpose') + logger.warning('---') + + def target_layer(x): + import tensorflow as tf + x = tf.transpose(x, [0, 3, 1, 2]) + return x + + lambda_layer = keras.layers.Lambda(target_layer, + name="%s_CHW" % f"{params['cleaned_name']}_reshape_lambda") + layers[node_name] = lambda_layer(input_0) + lambda_func[keras_name] = target_layer + else: + layers[node_name] = input_0 + + reshape = keras.layers.Reshape(np.int32(input_1[1:]), + name=f"{params['cleaned_name']}_reshape_input_2_1") # keras reshape ignores batch dimension but onnx axis do not + layers[node_name] = reshape(layers[node_name]) + + else: + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + input_0_shape = input_0.shape + if len(input_0_shape) > 0: + first_mismatch = np.argmin(np.array(input_0_shape[:len(input_1)]) == input_1) + else: # does not need to reshape dynamicalyy (skip next section) + first_mismatch = 0 + if (input_1 == None).any() and (np.array(input_0_shape) == None).any() and len(input_1) < len( + input_0_shape) \ + and input_1[first_mismatch] == -1: # reshape end + end_match_arr = np.array(input_0_shape[-len(input_1):]) == input_1 + end_idx_match = np.argmax((np.array(input_0_shape[-len(input_1):]) == input_1)) + end_idx_match = end_idx_match + len(input_0_shape) - len(input_1) if end_idx_match > first_mismatch \ + and end_match_arr[ + end_idx_match] else len( + input_0_shape) + 1 + ten_shape = tf_shape(input_0, tf_name=f"{params['cleaned_name']}_shape") + layers[node_name] = tf_reshape(input_0, + [*ten_shape[:first_mismatch], -1, + *ten_shape[end_idx_match:]], + tf_name=f"{params['cleaned_name']}_reshape") + else: + logger.debug('The first argument is Keras/tf layer. Apply keras.Reshape.') + logger.debug('Target shape :') + logger.debug(np.int32(input_1[1:])) + if len(np.int32(input_1[1:])) == 1 and np.int32(input_1[1:])[0] == -1: + if input_0.shape.rank == 1: + input_0 = tf_expand_dims(input_0, 0, tf_name=f"{params['cleaned_name']}_expand_dims") + logger.debug('The first argument is Keras/tf layer. Apply keras.Flatten.') + flatten = keras.layers.Reshape(target_shape=input_1[1:], + name=f"{params['cleaned_name']}_reshape_input_2_2") + layers[node_name] = flatten(input_0) + elif len(input_1) == 1 and input_1[0] == -1: + layers[node_name] = tf_reshape(input_0, [-1], tf_name=f"{params['cleaned_name']}_reshape_1") + else: + if len(input_0.shape) == 0 or ( + input_0.shape[0] != input_1[0] and input_1[0] != 0): # keras reshape don't work + new_shape = input_1.copy() + if dims_to_set_as_zero is not None: + new_shape[dims_to_set_as_zero] = 0 + elif dims_to_keep_unchanged is not None: + new_shape[dims_to_keep_unchanged] = np.array(input_0.shape)[dims_to_keep_unchanged] + layers[node_name] = tf_reshape(input_0, new_shape, + tf_name=f"{params['cleaned_name']}_reshape_2") + else: + reshape = keras.layers.Reshape(np.int32(input_1[1:]), + name=f"{params['cleaned_name']}_reshape_input_2_3") + layers[node_name] = reshape(input_0) + else: # dynamic reshape + layers[node_name] = tf_reshape(input_0, input_1, tf_name=f"{params['cleaned_name']}_reshape_3") + + +def convert_unsqueeze(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert unsqueeze. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.unsqueeze') + + if len(node.input) != 1: + if len(node.input) == 2: + params['axes'] = layers[node.input[1]] + else: + raise AttributeError('Number of inputs is not equal 1 for unsqueeze layer') + + if len(np.unique(params['axes'])) < len(params['axes']): + raise AttributeError(f"The specified axes contains duplicates values: {params['axes']}") + + if is_numpy(layers[node.input[0]]): + logger.debug('Work with numpy types.') + layers[node_name] = layers[node.input[0]] + for axis in params['axes']: + layers[node_name] = np.expand_dims(layers[node_name], axis) + else: + unsqueezed_input = layers[node.input[0]] + for axis in params['axes']: + unsqueezed_input = tf_expand_dims(unsqueezed_input, axis, + tf_name=f"{params['cleaned_name']}_expand_dims_ax_{str(axis)}") + + layers[node_name] = unsqueezed_input + + +def convert_flatten(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert flatten. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.flatten') + + if len(node.input) != 1: + raise AttributeError('Number of inputs is not equal 1 for flatten layer') + + logger.debug('Convert inputs to Keras/TF layers if needed.') + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + input_dims = tf_shape(input_0, tf_name=f"{params['cleaned_name']}_shape") + flatten_axis = params.get('axis', 1) + reshaped_input = tf_reshape(input_0, [tf.reduce_prod(input_dims[:flatten_axis]), + tf.reduce_prod(input_dims[flatten_axis:])], + tf_name=f"{params['cleaned_name']}_flatten") + layers[node_name] = reshaped_input + + +def convert_slice(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert slice. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.slice') + max_ends_val = np.iinfo(np.int32).max + + if params['change_ordering']: + raise NotImplementedError("change_ordering for Slice is not implemented") + if 'axes' in params: + axes = list(params["axes"]) + ends = list(params["ends"]) + starts = list(params["starts"]) + steps = list(params.get("steps", [None] * len(axes))) + else: + starts = list(layers[node.input[1]]) + ends = list(layers[node.input[2]]) + try: + axes = list(layers[node.input[3]]) + except: + input_rank = len(layers[node.input[0]].shape) + axes = list(range(input_rank)) + try: + steps = list(layers[node.input[4]]) + except IndexError: + steps = list(params.get("steps", [None] * len(axes))) + + # when the 'ends' value is the int64 maximum, probably happen because [idx:] sets large end num in conversion + if not isinstance(ends[0], KerasTensor): + for i in range(len(ends)): + if hasattr(ends[i], 'dtype'): + if ends[i].dtype == np.int64 and ends[i] > max_ends_val: + ends[i] = np.int32(max_ends_val) + elif isinstance(ends[i], int) and ends[i] > max_ends_val: + ends[i] = np.int32(max_ends_val) + try: + max_len = len(layers[node.input[0]].shape) + axes_positives = [axis if axis >= 0 else max_len + axis for axis in axes] + except ValueError as e: + if layers[node.input[0]].shape == None: # tensor with unknown shape (not the same as dynamic) + max_len = max(axes) + 1 + if any([axis < 0 for axis in axes]): + raise NotImplementedError("For a tensor with unknown shape, can't use negative axis") + else: + axes_positives = axes + else: + raise NotImplementedError(f"Couldn't transform the axis in a slice layer {node_name}") + slice_spec_param = [] + is_dynamic = False + for i in range(len(starts)): + for index_li in [starts, steps, ends]: + if index_li[i] is not None and not isinstance(index_li[i], int) and not is_numpy( + index_li[i]) and K.is_keras_tensor(index_li[i]): + is_dynamic = True + if not is_dynamic: + for axis in range(max_len): + if axis in axes_positives: + axis_index = axes_positives.index(axis) + start = starts[axis_index] + end = ends[axis_index] + step = steps[axis_index] + slice_spec_param.append({'start': start, 'step': step, 'stop': end}) + else: + slice_spec_param.append({'start': None, 'step': None, 'stop': None}) + if is_numpy(layers[node.input[0]]) and np.array([_shape is None for _shape in layers[node.input[0]]]).any() \ + and len(layers[node.input[0]].shape) == 1: # slice numpy array which is a shape + sliced = layers[node.input[0]][start:end:step] + else: + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + slicing_layer = SlicingOpLambda(tf.__operators__.getitem) + sliced = slicing_layer(input_0, slice_spec=slice_spec_param) + if is_numpy(layers[node.input[0]]) and not K.is_keras_tensor(sliced): + sliced = sliced.numpy() + layers[node_name] = sliced + else: + try: + steps = list(layers[node.input[4]]) + except IndexError: + steps = list(params.get("steps", [1] * len(axes))) + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + keras_shape = tf_shape(layers[node.input[0]], tf_name=f"{params['cleaned_name']}_shape") + start_vec = [0] * max_len + end_vec = [keras_shape[i] for i in range(max_len)] + step_vec = [1] * max_len + for axis in range(max_len): + if axis in axes_positives: + axis_index = axes_positives.index(axis) + for res_list, input_list in zip([start_vec, step_vec, end_vec], [starts, steps, ends]): + slice_index = input_list[axis_index] + if input_list[axis_index] is not None and not isinstance(slice_index, int) and not is_numpy( + input_list[axis_index]) and input_list[axis_index].dtype != tf.int32: + slice_index = tf_cast(slice_index, tf.int32, tf_name=f"{params['cleaned_name']}_cast") + res_list[axis] = slice_index + layers[node_name] = tf_strided_slice(input_0, + tf.concat([start_vec], axis=0), + tf.concat([end_vec], axis=0), + tf.concat([step_vec], axis=0), + tf_name=f"{params['cleaned_name']}_strided_slice") + + +def convert_squeeze(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Squeeze layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + + axis = None + if 'axes' in params: + axis = params['axes'][0] + + if len(node.input) == 2: + axis = layers[node.input[1]].tolist() + layers[node_name] = tf_squeeze(input_0, axis=axis, tf_name=f"{params['cleaned_name']}_squeeze") + + +def convert_resize(node, params, layers, lambda_func, node_name, keras_name): + logger = logging.getLogger('onnx2keras.reshape') + input_tensor = layers[node.input[0]] + roi = None if len(node.input[1]) == 0 else layers[node.input[1]] + scales = [] if len(node.input[2]) == 0 else layers[node.input[2]] + sizes = None + nearest_mode = params.get('nearest_mode', b'round_prefer_floor') + if len(node.input) == 4: + sizes = layers[node.input[3]] + if roi: + raise Exception("Resize with roi not supported") + if params['mode'] == b'nearest': + resize_method = tf.image.ResizeMethod.NEAREST_NEIGHBOR + elif params['mode'] == b'cubic': + resize_method = tf.image.ResizeMethod.BICUBIC + elif params['mode'] == b'linear': + resize_method = tf.image.ResizeMethod.BILINEAR + else: + raise Exception("unsupported resize method") + + rank = len(input_tensor.shape) + axes = params.get('axes', list(range(rank))) # Default to resizing all axes + + # Validate axes + axes = [a if a >= 0 else a + rank for a in axes] # Convert negative axes to positive + if any(a < 0 or a >= rank for a in axes): # check that all axes values are within input rank + raise ValueError("Invalid axes value") + to_channel_last = keras.layers.Permute((2, 3, 1), + name=f"{params['cleaned_name']}_resize_channels_last")(input_tensor) # (B, W, H, C) + shape = tf_cast(tf_shape(input_tensor, tf_name=f"{params['cleaned_name']}_resize_shape"), tf.int32, + tf_name=f"{params['cleaned_name']}_resize_cast") + if shape.shape != 4: + raise Exception("resize layer for input tensor with rank != 4 is not supported") + if isinstance(sizes, KerasTensor) or isinstance(scales, KerasTensor): + tf_resize_shapes = tf_zeros_like(shape, tf_name=f"{params['cleaned_name']}_resize_zeros_like") + + if len(scales) > 0: + for i, axis in enumerate(axes): + indices = tf.constant([[axis]]) + update = tf_cast(tf_multiply(scales[i], + tf_cast(shape[axis], + tf.float32, + tf_name=f"{params['cleaned_name']}_resize_cast_2_ax_{i}" + ), + tf_name=f"{params['cleaned_name']}_resize_multiply_ax_{i}"), + tf.int32, + tf_name=f"{params['cleaned_name']}_resize_cast_3_ax_{i}") + updates = tf_reshape(update, (1,), tf_name=f"{params['cleaned_name']}_resize_reshape_1_ax_{i}") + tf_resize_shapes = tf_tensor_scatter_nd_update(tf_resize_shapes, + indices, + updates, + tf_name=f"{params['cleaned_name']}_resize_scatter_ax_{i}") + + else: + for i, axis in enumerate(axes): + indices = tf.constant([[axis]]) + # The value to update at the specified index + update = tf_cast(sizes[i], + tf.int32, + tf_name=f"{params['cleaned_name']}_resize_cast_4_ax_{i}") + updates = tf_reshape(update, + (1,), + tf_name=f"{params['cleaned_name']}_resize_reshape_1_ax_{i}") + # Apply the update using tf.scatter_nd + curr_name_str = f"{params['cleaned_name']}_resize_scatter_1_ax_{i}" + tf_resize_shapes = tf_tensor_scatter_nd_update(tf_resize_shapes, + indices, + updates, + tf_name=curr_name_str) + resize_size = tf_stack(tf_gather(tf_resize_shapes, + [2, 3], + tf_name=f"{params['cleaned_name']}_resize_gather"), + axis=0, + tf_name=f"{params['cleaned_name']}_resize_stack") + else: + tf_resize_shapes = [shape[i] for i in range(2, 4)] # (W, H) for input tensor of shape [B, C, W, H] + if len(scales) > 0: + for i, axis in enumerate(axes): + if scales[i] != 1: + tf_resize_shapes[axis - 2] = tf_cast(scales[i] * tf_cast(tf_resize_shapes[axis - 2], + tf.float32, + tf_name=f"{params['cleaned_name']}_resize_cast_5_ax_{i}"), + tf.int32, + tf_name=f"{params['cleaned_name']}_resize_cast_6_ax_{i}") + else: + for i, axis in enumerate(axes): + if sizes[i] != input_tensor.shape[axis]: + tf_resize_shapes[axis - 2] = int(sizes[i]) + resize_size = tf_stack(tf_resize_shapes, + axis=0, + tf_name=f"{params['cleaned_name']}_resize_stack_1") + + if ( + resize_method == tf.image.ResizeMethod.NEAREST_NEIGHBOR + and isinstance(resize_size, keras.engine.keras_tensor.KerasTensor) + and nearest_mode.decode() == "floor" + ): + logger.warning("floor nearest mode will result in faulty conversion") + if resize_method == tf.image.ResizeMethod.NEAREST_NEIGHBOR and nearest_mode.decode() == 'floor'\ + and not isinstance(resize_size, keras.engine.keras_tensor.KerasTensor): + if not isinstance(resize_size, np.ndarray) : + resize_size = np.array(resize_size) + + def target_layer(x, resize_size=resize_size): + from tensorflow.python.ops.image_ops import resize_nearest_neighbor + + return resize_nearest_neighbor(x, resize_size, half_pixel_centers=False) + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_resize_lambda") + resized = lambda_layer(to_channel_last) + else: + resized = tf_image_resize(to_channel_last, + size=resize_size, + method=resize_method, + tf_name=f"{params['cleaned_name']}_image_resize") + to_channel_first = keras.layers.Permute((3, 1, 2), + name=f"{params['cleaned_name']}_resize_channels_first")(resized) + layers[node_name] = to_channel_first + + +def convert_expand(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert Expand layer + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + if len(node.input) != 2: + assert AttributeError('More than 2 input for expand layer.') + + input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name) + input_1 = layers[node.input[1]] + was_converted_from_bool = False + if input_0.dtype.is_bool: + input_0 = tf_cast(input_0, dtype='int32', tf_name=f"{params['cleaned_name']}_bool_to_int") + was_converted_from_bool = True + multiply_res = input_0 * tf_ones(shape=input_1, dtype=input_0.dtype, + tf_name=f"{params['cleaned_name']}_expand_use_ones") + # input_0.dtype == np.int32 since we can't serialize constants as int64, need to cast to true type + if layers[node.input[0]].dtype == np.int64: + multiply_res = tf_cast(multiply_res, tf.int64, tf_name=f"{params['cleaned_name']}_to_int64") + if was_converted_from_bool: + multiply_res = tf_cast(multiply_res, tf.bool, tf_name=f"{params['cleaned_name']}_int_to_bool") + layers[node_name] = multiply_res + + +def convert_tile(node, params, layers, lambda_func, node_name, keras_name): + layers[node_name] = tf_tile(layers[node.input[0]], layers[node.input[1]], tf_name=f"{params['cleaned_name']}_tile") + + +def convert_gather_elements(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert gather. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + logger = logging.getLogger('onnx2keras.gather_elements') + axis = params.get('axis', 0) + data_input = layers[node.input[0]] + indices_input = layers[node.input[1]] + + def torch_gather(x, indices, gather_axis): + all_indices = tf_where(tf_fill(indices.shape, True, tf_name=f"{params['cleaned_name']}_gather_fill"), + tf_name=f"{params['cleaned_name']}_gather_where") + gather_locations = tf_reshape(indices, [indices.shape.num_elements()], + tf_name=f"{params['cleaned_name']}_gather_reshape") + + gather_indices = [] + for axis in range(len(indices.shape)): + if axis == gather_axis: + gather_indices.append(tf_cast(gather_locations, dtype=tf.int64, + tf_name=f"{params['cleaned_name']}_gather_cast_loc")) + else: + gather_indices.append(tf_cast(all_indices[:, axis], dtype=tf.int64, + tf_name=f"{params['cleaned_name']}_gather_cast_all")) + + gather_indices = tf_stack(gather_indices, axis=-1, tf_name=f"{params['cleaned_name']}_gather_indices") + gathered = tf_gather_nd(x, gather_indices, tf_name=f"{params['cleaned_name']}_gather_nd") + reshaped = tf_reshape(gathered, indices.shape, tf_name=f"{params['cleaned_name']}_reshape") + return reshaped + + layers[node_name] = torch_gather(data_input, indices_input, axis) + + +def col2im_onnx(node, params, layers, lambda_func, node_name, keras_name): + + input_cols = layers[node.input[0]] # (N, C*kH*kW, L) KerasTensor + image_shape = layers[node.input[1]] # constant [H_img, W_img] + block_shape = layers[node.input[2]] # constant [kH, kW] + + dilations = params.get('dilations', [1, 1]) + pads = params.get('pads', [0, 0, 0, 0]) # [pt, pl, pb, pr] + strides = params.get('strides', [1, 1]) + + def _col2im_lambda(x): + """x is input_cols inside Lambda → symbolic tf.Tensor""" + + input_cols_sym = x # (N, C*kH*kW, L) + + # image_shape / block_shape come from ONNX initializers + H_img = int(image_shape[0]) + W_img = int(image_shape[1]) + + kH = int(block_shape[0]) + kW = int(block_shape[1]) + + dH, dW = dilations + pt, pl, pb, pr = pads + sH, sW = strides + + # ----------------------------- + # Shapes + # ----------------------------- + N = tf.shape(input_cols_sym)[0] + Ck = tf.shape(input_cols_sym)[1] # C * kH * kW + L = tf.shape(input_cols_sym)[2] + + C = Ck // (kH * kW) # recover C + + # Effective kernel with dilation + kH_eff = kH + (kH - 1) * (dH - 1) + kW_eff = kW + (kW - 1) * (dW - 1) + + # Padded output spatial size + H_pad = H_img + pt + pb + W_pad = W_img + pl + pr + + # Sliding-window grid (ONNX spec) + H_out = (H_pad - kH_eff) // sH + 1 + W_out = (W_pad - kW_eff) // sW + 1 + # Optional sanity check: + # tf.debugging.assert_equal(L, H_out * W_out) + + # ----------------------------- + # Reshape ONNX (N,C*kH*kW,L) → (N,C,kH,kW,H_out,W_out) + # ----------------------------- + cols = tf.reshape(input_cols_sym, (N, C, kH, kW, L)) + cols = tf.reshape(cols, (N, C, kH, kW, H_out, W_out)) + + # ----------------------------- + # Prepare accumulation buffer (no normalization) + # ----------------------------- + out_pad = tf.zeros((N, C, H_pad, W_pad), dtype=cols.dtype) + + # Precompute sliding indices (int32) + h_idx_base = tf.range(H_out, dtype=tf.int32)[None, :, None] # (1,H_out,1) + w_idx_base = tf.range(W_out, dtype=tf.int32)[None, None, :] # (1,1,W_out) + + # ----------------------------- + # Main accumulation loop (sum overlaps like nn.Fold) + # ----------------------------- + for kh in range(kH): + for kw in range(kW): + # Offset inside effective kernel + h_off = kh * dH + w_off = kw * dW + + # Target pixel coordinates for this kernel position + H_idx = h_off + h_idx_base * sH # (1,H_out,1), int32 + W_idx = w_off + w_idx_base * sW # (1,1,W_out), int32 + + # Values: (N,C,H_out,W_out) + patch_vals = cols[:, :, kh, kw] + pv_flat = tf.reshape(patch_vals, [-1]) + + # ------------------------- + # Build index tensor (N*C*H_out*W_out, 4) + # All int32 to avoid concat/type issues + # ------------------------- + # N indices + NN = tf.repeat( + tf.range(N, dtype=tf.int32), + C * H_out * W_out + )[:, None] # (?,1) + + # C indices + CC = tf.tile( + tf.repeat( + tf.range(C, dtype=tf.int32), + H_out * W_out + )[None], + [N, 1] + ) + CC = tf.reshape(CC, [-1, 1]) # (?,1) + + # H indices + HH_base = tf.repeat(tf.reshape(H_idx, [-1]), W_out) # (H_out*W_out,) + HH = tf.tile(HH_base[None], [N * C, 1]) + HH = tf.reshape(HH, [-1, 1]) # (?,1) + HH = tf.cast(HH, tf.int32) + + # W indices + WW_base = tf.reshape(W_idx, [-1]) # (W_out,) + WW = tf.tile(WW_base, [N * C * H_out]) # (N*C*H_out*W_out,) + WW = tf.reshape(WW, [-1, 1]) # (?,1) + WW = tf.cast(WW, tf.int32) + + idx = tf.concat([NN, CC, HH, WW], axis=1) # (?,4), int32 + + # ------------------------- + # Scatter-add into padded output (SUM, no normalization) + # ------------------------- + out_pad = tf.tensor_scatter_nd_add(out_pad, idx, pv_flat) + + # Crop padding, keep NCHW (matches torch.nn.Fold) + out_nchw = out_pad[:, :, pt:pt + H_img, pl:pl + W_img] + # If you later want NHWC for Keras, uncomment: + # out_nhwc = tf.transpose(out_nchw, (0, 2, 3, 1)) + # return out_nhwc + + return out_nchw + + # Wrap in a Lambda and return a KERASTENSOR + out = tf.keras.layers.Lambda(_col2im_lambda, name=keras_name)(input_cols) + layers[node_name] = out + diff --git a/onnx2kerastl/sampling_layers.py b/onnx2kerastl/sampling_layers.py new file mode 100644 index 00000000..5e98f297 --- /dev/null +++ b/onnx2kerastl/sampling_layers.py @@ -0,0 +1,196 @@ +import keras +import tensorflow as tf +from .tfops_funcs import tf_shape, tf_expand_dims, tf_cast, tf_reshape,\ + tf_math_minimum, tf_math_maximum, tf_range, tf_gather, tf_size, tf_math_floor, tf_concat + + +def convert_range(node, params, layers, lambda_func, node_name, keras_name): + start_range = layers[node.input[0]] + limit_range = layers[node.input[1]] + delta_range = layers[node.input[2]] + layers[node_name] = tf_range(start_range, limit_range, delta_range, tf_name=f"{params['cleaned_name']}_range") + + +def convert_gridsample(node, params, layers, lambda_func, node_name, keras_name): + """ + Convert gridsample. + :param node: current operation node + :param params: operation attributes + :param layers: available keras layers + :param lambda_func: function for keras Lambda layer + :param node_name: internal converter name + :param keras_name: resulting layer name + :return: None + """ + assert params['mode'].decode('ascii') == 'bilinear' + assert params['padding_mode'].decode('ascii') == 'zeros' + params['mode'] = params['mode'].decode('ascii') + params['padding_mode'] = params['padding_mode'].decode('ascii') + img = layers[node.input[0]] + sample_grid = layers[node.input[1]] + torch_shape = tf_shape(img, tf_name=f"{params['cleaned_name']}_gridsample_img_shape") + max_xy = tf_expand_dims( + tf_expand_dims(tf_expand_dims(tf.convert_to_tensor([torch_shape[3] - 1, torch_shape[2] - 1]), + 0, + tf_name=f"{params['cleaned_name']}_gridsample_max_xy_expand_1"), + 0, + tf_name=f"{params['cleaned_name']}_gridsample_max_xy_expand_2"), + 0, + tf_name=f"{params['cleaned_name']}_gridsample_max_xy_expand_3") + max_xy = tf_cast(max_xy, tf.float32, tf_name=f"{params['cleaned_name']}_gridsample_cast") + + if params['align_corners'] == 1: + # Case when align_corners is 1 + grid_index_coords = 0.5 * (sample_grid + 1.) * max_xy + grid_index_coords = grid_index_coords + 1 + else: + # Case when align_corners is 0 + grid_index_coords = ((sample_grid + 1.) * max_xy - 1) / 2 + + orig_query_shape = tf_shape(grid_index_coords, + tf_name=f"{params['cleaned_name']}_gridsample_coords_shape") + query_points = tf_reshape(grid_index_coords, [orig_query_shape[0], -1, 2], + tf_name=f"{params['cleaned_name']}_gridsample_coords_reshape") + padded_img = tf.keras.layers.ZeroPadding2D(padding=(1, 1), data_format="channels_first", + name=f"{params['cleaned_name']}_gridsample_pad_img")(img) + grid = tf.keras.layers.Permute((2, 3, 1), + name=f"{params['cleaned_name']}_gridsample_rotate")(padded_img) + indexing = 'ji' + grid_shape = tf_shape(grid, tf_name=f"{params['cleaned_name']}_gridsample_grid_shape") + query_shape = tf_shape(query_points, tf_name=f"{params['cleaned_name']}_query_shape") + batch_size, height, width, channels = ( + grid_shape[0], + grid_shape[1], + grid_shape[2], + grid_shape[3], + ) + num_queries = query_shape[1] + + query_type = query_points.dtype + grid_type = grid.dtype + + alphas = [] + floors = [] + ceils = [] + index_order = [0, 1] if indexing == "ij" else [1, 0] + # unstacked_query_points = tf.unstack(query_points, axis=2, num=2) + + for i, dim in enumerate(index_order): + queries = query_points[:, :, dim] + # queries = unstacked_query_points[dim] + + size_in_indexing_dimension = grid_shape[i + 1] + + # max_floor is size_in_indexing_dimension - 2 so that max_floor + 1 + # is still a valid index into the grid. + max_floor = tf_cast(size_in_indexing_dimension - 2, query_type, + tf_name=f"{params['cleaned_name']}_gridsample_cast_max") + min_floor = tf.constant(0.0, dtype=query_type) + floor = tf_math_minimum( + tf_math_maximum(min_floor, + tf_math_floor(queries, tf_name=f"{params['cleaned_name']}_gridsample_floor"), + tf_name=f"{params['cleaned_name']}_gridsample_max"), + max_floor, + tf_name=f"{params['cleaned_name']}_gridsample_min" + ) + int_floor = tf_cast(floor, tf.dtypes.int32, tf_name=f"{params['cleaned_name']}_gridsample_cast_floor") + floors.append(int_floor) + ceil = int_floor + 1 + ceils.append(ceil) + + # alpha has the same type as the grid, as we will directly use alpha + # when taking linear combinations of pixel values from the image. + alpha = tf_cast(queries - floor, grid_type, tf_name=f"{params['cleaned_name']}_gridsample_alpha") + min_alpha = tf.constant(0.0, dtype=grid_type) + max_alpha = tf.constant(1.0, dtype=grid_type) + alpha = tf_math_minimum(tf_math_maximum(min_alpha, + alpha, + tf_name=f"{params['cleaned_name']}_gridsample_alpha_max"), + max_alpha, + tf_name=f"{params['cleaned_name']}_gridsample_alpha_min") + + # Expand alpha to [b, n, 1] so we can use broadcasting + # (since the alpha values don't depend on the channel). + alpha = tf_expand_dims(alpha, 2, tf_name=f"{params['cleaned_name']}_gridsample_expand_dim") + alphas.append(alpha) + + flattened_grid = tf_reshape(grid, [batch_size * height * width, channels], + tf_name=f"{params['cleaned_name']}_gridsample_flatenned_reshape") + batch_offsets = tf_reshape( + tf_range(batch_size, tf_name=f"{params['cleaned_name']}_gridsample_range") * height * width, [batch_size, 1], + tf_name=f"{params['cleaned_name']}_gridsample_flatenned_offset" + ) + + # This wraps tf.gather. We reshape the image data such that the + # batch, y, and x coordinates are pulled into the first dimension. + # Then we gather. Finally, we reshape the output back. It's possible this + # code would be made simpler by using tf.gather_nd. + def gather(y_coords, x_coords, name=None): + linear_coordinates = batch_offsets + y_coords * width + x_coords + gathered_values = tf_gather(flattened_grid, linear_coordinates, + tf_name=f"{params['cleaned_name']}_gridsample_gather_{name}") + return tf_reshape(gathered_values, [batch_size, num_queries, channels], + tf_name=f"{params['cleaned_name']}_gridsample_reshape_{name}") + + # grab the pixel values in the 4 corners around each query point + top_left = gather(floors[0], floors[1], "top_left") + top_right = gather(floors[0], ceils[1], "top_right") + bottom_left = gather(ceils[0], floors[1], "bottom_left") + bottom_right = gather(ceils[0], ceils[1], "bottom_right") + + interp_top = alphas[1] * (top_right - top_left) + top_left + interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left + interp = alphas[0] * (interp_bottom - interp_top) + interp_top + tf_reshaped_results = tf_reshape(interp, + tf_concat([orig_query_shape[:-1], torch_shape[1:2]], axis=0, + tf_name=f"{params['cleaned_name']}_gridsample_concat"), + tf_name=f"{params['cleaned_name']}_gridsample_reshape_res") + ret = tf.keras.layers.Permute((3, 1, 2), + name=f"{params['cleaned_name']}_gridsample_last_permute")(tf_reshaped_results) + layers[node_name] = ret + + +def convert_unique(node, params, layers, lambda_func, node_name, keras_name): + to_sort = params.get('sorted', 1) == 1 + axis = params.get('axis') + if axis is not None: + raise AttributeError("Onnx2kerras: Unique does not currently support an operation on a non-flattened array") + lambda_input = layers[node.input[0]] + rev_idx_length = tf_size(lambda_input, tf_name=f"{params['cleaned_name']}_unique_size_1") + + def target_layer(x): + input_keras = x + if axis is None: + input_final = tf.reshape(input_keras, [-1]) + res, rev_idx, count = tf.unique_with_counts(input_final) + idx = tf.math.unsorted_segment_min(tf.range(tf.shape(rev_idx)[0]), rev_idx, tf.shape(res)[0]) + if to_sort: + linspace = tf.range(tf.shape(count)[0]) + argsorted = tf.argsort(res) + lookup_table = tf.lookup.StaticHashTable(tf.lookup.KeyValueTensorInitializer(linspace, argsorted), + default_value=-1) + + rev_idx_sorted = lookup_table.lookup(rev_idx) + + res_sorted = tf.scatter_nd(tf.expand_dims(argsorted, -1), res, tf.shape(res)) + count_sorted = tf.scatter_nd(tf.expand_dims(argsorted, -1), count, tf.shape(res)) + idx_sorted = tf.scatter_nd(tf.expand_dims(argsorted, -1), idx, tf.shape(res)) + return tf.concat([tf.cast(rev_idx_sorted, tf.float32), res_sorted, tf.cast(idx_sorted, tf.float32), + tf.cast(count_sorted, tf.float32)], + axis=0) + else: + return tf.concat([tf.cast(rev_idx, tf.float32), res, tf.cast(idx, tf.float32), tf.cast(count, tf.float32)], + axis=0) + + lambda_layer = keras.layers.Lambda(target_layer, name=f"{params['cleaned_name']}_unique") + lambda_res = lambda_layer(lambda_input) + rev_idx = lambda_res[:rev_idx_length] + lambda_length = tf_size(lambda_res, tf_name=f"{params['cleaned_name']}_unique_size_2") + remainder = tf_cast((lambda_length - rev_idx_length) / 3, tf.int32, tf_name=f"{params['cleaned_name']}_unique_cast1") # not working need to fix + count = tf_cast(lambda_res[-remainder:], tf.int32, tf_name=f"{params['cleaned_name']}_unique_cast2") + idx = tf_cast(lambda_res[-2 * remainder:-remainder], tf.int32, tf_name=f"{params['cleaned_name']}_unique_cast3") + res = tf_cast(lambda_res[-3 * remainder:-2 * remainder], tf.int32, tf_name=f"{params['cleaned_name']}_unique_cast4") + layers[keras_name[0]] = res + layers[keras_name[1]] = idx + layers[keras_name[2]] = rev_idx + layers[keras_name[3]] = count diff --git a/onnx2kerastl/tfops_funcs.py b/onnx2kerastl/tfops_funcs.py new file mode 100644 index 00000000..6e8b7d4f --- /dev/null +++ b/onnx2kerastl/tfops_funcs.py @@ -0,0 +1,128 @@ +import numpy as np +import tensorflow as tf +from typing import Callable +from keras import backend as K +import logging +logger = logging.getLogger('onnx2keras.tfops_funcs') + +layer_names_counter = {} + + +def named_tfop(func: Callable): + def wrapped_function(*args, tf_name=None, **kwargs): + result = func(*args, **kwargs) + if tf_name is None or tf_name=="" or not isinstance(tf_name, str): + raise ValueError(f"The layer {result.node.layer} with name" + f" {result.node.layer.name} was provided with an empty or None Name") + if not isinstance(result, (tf.Tensor, np.ndarray)): + if tf_name not in layer_names_counter: + layer_names_counter[tf_name] = 0 + else: + layer_names_counter[tf_name] = layer_names_counter[tf_name] + 1 + tf_name = tf_name + f"_{layer_names_counter[tf_name]}" + if not isinstance(result, (tf.Tensor, np.ndarray)): + logger.debug(f"The op {result.node.layer.symbol} with name" + f"{result.node.layer.name} has a duplicate name {tf_name}") + result.node.layer._name = tf_name + return result + return wrapped_function + + +tf_cast = named_tfop(tf.cast) +tf_shape = named_tfop(tf.shape) +tf_math_abs = named_tfop(tf.math.abs) +tf_reshape = named_tfop(tf.reshape) +tf_stack = named_tfop(tf.stack) +tf_add = named_tfop(tf.add) +tf_image_resize = named_tfop(tf.image.resize) +tf_multiply = named_tfop(tf.multiply) +tf_clip_by_value = named_tfop(tf.clip_by_value) +tf_math_negative = named_tfop(tf.math.negative) +tf_tensor_scatter_nd_update = named_tfop(tf.tensor_scatter_nd_update) +K_mean = named_tfop(K.mean) +tf_math_reduce_prod = named_tfop(tf.math.reduce_prod) +tf_math_reduce_min = named_tfop(tf.math.reduce_min) +tf_math_pow = named_tfop(tf.math.pow) +tf_math_sqrt = named_tfop(tf.math.sqrt) +tf_strided_slice = named_tfop(tf.strided_slice) +tf_squeeze = named_tfop(tf.squeeze) +tf_argmax = named_tfop(tf.argmax) +tf_expand_dims = named_tfop(tf.expand_dims) +tf_maximum = named_tfop(tf.maximum) +tf_minimum = named_tfop(tf.minimum) +tf_repeat = named_tfop(tf.repeat) +tf_matmul = named_tfop(tf.matmul) +tf_concat = named_tfop(tf.concat) +tf_transpose = named_tfop(tf.transpose) +tf_math_reduce_variance = named_tfop(tf.math.reduce_variance) +tf_math_reduce_mean = named_tfop(tf.math.reduce_mean) +tf_sqrt = named_tfop(tf.sqrt) +tf_where = named_tfop(tf.where) +tf_gather = named_tfop(tf.gather) +tf_range = named_tfop(tf.range) +tf_abs = named_tfop(tf.abs) +tf_reduce_sum = named_tfop(tf.reduce_sum) +tf_pad = named_tfop(tf.pad) +tf_math_erf = named_tfop(tf.math.erf) +tf_math_reciprocal = named_tfop(tf.math.reciprocal) +tf_logical_not = named_tfop(tf.logical_not) +tf_equal = named_tfop(tf.equal) +tf_tile = named_tfop(tf.tile) +tf_math_minimum = named_tfop(tf.math.minimum) +tf_math_maximum = named_tfop(tf.math.maximum) +tf_math_sign = named_tfop(tf.math.sign) +tf_math_sin = named_tfop(tf.math.sin) +tf_math_cosh = named_tfop(tf.math.cosh) +tf_math_ceil = named_tfop(tf.math.ceil) +tf_math_acosh = named_tfop(tf.math.acosh) +tf_math_acos = named_tfop(tf.math.acos) +tf_math_asinh = named_tfop(tf.math.asinh) +tf_math_asin = named_tfop(tf.math.asin) +tf_math_atanh = named_tfop(tf.math.atanh) +tf_math_tan = named_tfop(tf.math.tan) +tf_math_atan = named_tfop(tf.math.atan) +tf_math_sinh = named_tfop(tf.math.sinh) +tf_math_less_equal = named_tfop(tf.math.less_equal) +tf_bitwise_invert = named_tfop(tf.bitwise.invert) +tf_bitwise_bitwise_and = named_tfop(tf.bitwise.bitwise_and) +tf_bitwise_bitwise_or = named_tfop(tf.bitwise.bitwise_or) +tf_bitwise_bitwise_xor = named_tfop(tf.bitwise.bitwise_xor) +tf_cos = named_tfop(tf.cos) +tf_math_greater = named_tfop(tf.math.greater) +tf_math_greater_equal = named_tfop(tf.math.greater_equal) +tf_logical_and = named_tfop(tf.logical_and) +tf_math_logical_xor = named_tfop(tf.math.logical_xor) +tf_math_logical_or = named_tfop(tf.math.logical_or) +tf_argmin = named_tfop(tf.argmin) +tf_one_hot = named_tfop(tf.one_hot) +tf_round = named_tfop(tf.round) +tf_math_cumsum = named_tfop(tf.math.cumsum) +tf_math_is_inf = named_tfop(tf.math.is_inf) +tf_math_is_nan = named_tfop(tf.math.is_nan) +tf_size = named_tfop(tf.size) +tf_linalg_det = named_tfop(tf.linalg.det) +tf_not_equal = named_tfop(tf.not_equal) +tf_gather_nd = named_tfop(tf.gather_nd) +tf_math_softplus = named_tfop(tf.math.softplus) +tf_math_tanh = named_tfop(tf.math.tanh) +tf_signal_irfft = named_tfop(tf.signal.irfft) +tf_signal_ifft = named_tfop(tf.signal.ifft) +tf_signal_rfft = named_tfop(tf.signal.rfft) +tf_signal_fft = named_tfop(tf.signal.fft) +tf_sign = named_tfop(tf.sign) +tf_abs = named_tfop(tf.abs) +tf_math_mod = named_tfop(tf.math.mod) +tf_bitwise_left_shift = named_tfop(tf.bitwise.left_shift) +tf_bitwise_right_shift = named_tfop(tf.bitwise.right_shift) +tf_rank = named_tfop(tf.rank) +tf_fill = named_tfop(tf.fill) +tf_image_non_max_suppression = named_tfop(tf.image.non_max_suppression) +tf_ones_like = named_tfop(tf.ones_like) +tf_image_crop_and_resize = named_tfop(tf.image.crop_and_resize) +tf_ones = named_tfop(tf.ones) +tf_math_floor = named_tfop(tf.math.floor) +tf_zeros_like = named_tfop(tf.zeros_like) +tf_tensor_scatter_nd_update = named_tfop(tf.tensor_scatter_nd_update) +tf_nn_avg_pool = named_tfop(tf.nn.avg_pool) +tf_nn_max_pool = named_tfop(tf.nn.max_pool) +tf_linalg_matmul = named_tfop(tf.linalg.matmul) \ No newline at end of file diff --git a/onnx2keras/upsampling_layers.py b/onnx2kerastl/upsampling_layers.py similarity index 58% rename from onnx2keras/upsampling_layers.py rename to onnx2kerastl/upsampling_layers.py index 586e9405..1d2e3a18 100644 --- a/onnx2keras/upsampling_layers.py +++ b/onnx2kerastl/upsampling_layers.py @@ -1,4 +1,4 @@ -from tensorflow import keras +import keras import numpy as np import logging @@ -27,12 +27,18 @@ def convert_upsample(node, params, layers, lambda_func, node_name, keras_name): # Upsample since opset version 9 uses input[1] as 'scales' instead of attributes. scale = np.uint8(layers[node.input[1]][-2:]) - if params['mode'].decode('utf-8') != 'nearest': - logger.error('Cannot convert non-nearest upsampling.') - raise AssertionError('Cannot convert non-nearest upsampling') + interpolation_mode = params['mode'].decode('utf-8') + if interpolation_mode == 'nearest': + interpolation = "nearest" + elif interpolation_mode in ['bilinear', 'linear']: + interpolation = "bilinear" + elif interpolation_mode in "cubic": + interpolation = "bicubic" + else: + logger.error(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported') + raise AssertionError(f'Cannot convert upsampling. interpolation mode: {interpolation_mode} is not supported') - upsampling = keras.layers.UpSampling2D( - size=scale, name=keras_name - ) + upsampling = keras.layers.UpSampling2D(size=scale, name=f"{params['cleaned_name']}_upsample_2d", + interpolation=interpolation) layers[node_name] = upsampling(layers[node.input[0]]) diff --git a/onnx2kerastl/utils.py b/onnx2kerastl/utils.py new file mode 100644 index 00000000..f430aa41 --- /dev/null +++ b/onnx2kerastl/utils.py @@ -0,0 +1,190 @@ +from typing import List, Union, Callable + +import numpy as np +import keras +from keras.engine.keras_tensor import KerasTensor +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +from.tfops_funcs import tf_reshape + +ONNX_ELEM_TO_TF_TYPE = { + 1: tf.float32, + 2: tf.uint8, + 3: tf.int8, + 4: tf.uint16, + 5: tf.int16, + 6: tf.int32, + 7: tf.int64, + 8: tf.string, + 9: tf.bool, + 10: tf.float16, + 11: tf.double, + 12: tf.uint32, + 13: tf.uint64, + 14: tf.complex64, + 15: tf.complex128, + 16: tf.bfloat16 +} + + +def is_numpy(obj): + """ + Check of the type is instance of numpy array + :param obj: object to check + :return: True if the object is numpy-type array. + """ + return isinstance(obj, (np.ndarray, np.generic)) + + +def ensure_tf_type(obj, name="Const"): + import numpy as np + import tensorflow as tf + """ + Convert to Keras Constant if needed + :param obj: numpy / tf type + :param fake_input_layer: fake input layer to add constant + :return: tf type + """ + if is_numpy(obj): # TF < v1.16 assumes all ints are int32 and all floats are float32 + if obj.dtype == np.int64: + obj = np.int32(obj) + return tf.constant(obj, name=name) + else: + return obj + + +def check_torch_keras_error(model, k_model, input_np, epsilon=1e-5, change_ordering=False, + should_transform_inputs=False): + """ + Check difference between Torch and Keras models + :param model: torch model + :param k_model: keras model + :param input_np: input data as numpy array or list of numpy array + :param epsilon: allowed difference + :param change_ordering: change ordering for keras input + :param should_transform_inputs: default False, set to True for converting channel first inputs to channel last format + :return: actual difference + + """ + from torch.autograd import Variable + import torch + + if isinstance(input_np, np.ndarray): + input_np = [input_np.astype(np.float32)] + + input_var = [Variable(torch.FloatTensor(i)) for i in input_np] + pytorch_output = model(*input_var) + if isinstance(pytorch_output, dict): + pytorch_output = [p.data.numpy() for p in list(pytorch_output.values())] + elif isinstance(pytorch_output, (tuple, list)): + pytorch_output = [p.data.numpy() for p in pytorch_output] + else: + pytorch_output = [pytorch_output.data.numpy()] + + if change_ordering: + # change image data format + + # to proper work with Lambda layers that transpose weights based on image_data_format + keras.backend.set_image_data_format("channels_last") + + _input_np = [] + for i in input_np: + axes = list(range(len(i.shape))) + axes = axes[0:1] + axes[2:] + axes[1:2] + _input_np.append(np.transpose(i, axes)) + input_np = _input_np + + # run keras model + keras_output = k_model.predict(input_np) + if not isinstance(keras_output, list): + keras_output = [keras_output] + + # change image data format if output shapes are different (e.g. the same for global_avgpool2d) + _koutput = [] + for i, k in enumerate(keras_output): + if k.shape != pytorch_output[i].shape: + axes = list(range(len(k.shape))) + axes = axes[0:1] + axes[-1:] + axes[1:-1] + k = np.transpose(k, axes) + _koutput.append(k) + keras_output = _koutput + else: + inputs_to_transpose = [] + if should_transform_inputs: + inputs_to_transpose = [k_input.name for k_input in k_model.inputs] + + _input_np = [] + for i in input_np: + axes = list(range(len(i.shape))) + axes = axes[0:1] + axes[2:] + axes[1:2] + _input_np.append(np.transpose(i, axes)) + input_np = _input_np + + k_model = convert_channels_first_to_last(k_model, inputs_to_transpose) + if len(input_np) == 1: + input_np = input_np[0] + keras_output = k_model(input_np) + if not isinstance(keras_output, list): + keras_output = [keras_output] + + _koutput = [] + for i, k in enumerate(keras_output): + if k.shape != pytorch_output[i].shape: + axes = list(range(len(k.shape))) + axes = axes[0:1] + axes[-1:] + axes[1:-1] + k = np.transpose(k, axes) + _koutput.append(k) + keras_output = _koutput + + max_error = 0 + for p, k in zip(pytorch_output, keras_output): + error = np.max(np.abs(p - k)) + np.testing.assert_allclose(p, k, atol=epsilon, rtol=0.0) + if error > max_error: + max_error = error + + return max_error + + +def unsqueeze_tensors_of_rank_one(tensor_list, axis: int, name: str): + """ + Adjusts the ranks of tensors of rank 1 in a given list to match the maximum rank by adding dummy dimensions + e.g., for input tensors shapes [(2,), (1, 4)] the unsqueezed tensors are [(1, 2), (1, 4)] + + Args: + tensor_list (list): A list of tensors. + + Returns: + list: A new list of tensors with adjusted ranks to match the maximum rank. + If all tensors in the input list already have the same rank, the original list is returned. + """ + ranks = [tensor.shape.rank for tensor in tensor_list] + max_rank = max(ranks) + + if len(set(ranks)) == 1: + return tensor_list + elif len(set(ranks)) > 2: + raise ValueError(f"More than 2 different ranks detected, broadcasting is ambiguous.\n" + f"Check the outputs of layers: \n" + '\n'.join([tensor.name for tensor in tensor_list])) + + unsqueezed_tensors = [] + for tensor in tensor_list: + tensor_rank = tensor.shape.rank + if tensor_rank == 1: + rank_diff = max_rank - 1 + new_shape = [1] * axis + list(tensor.shape) + [1] * (rank_diff - axis) + unsqueezed_tensor = tf_reshape(tensor, new_shape, tf_name=f"{name}_rank_one_reshape") + unsqueezed_tensors.append(unsqueezed_tensor) + else: + unsqueezed_tensors.append(tensor) + + return unsqueezed_tensors + + +def ensure_float(value): + if isinstance(value, (list, np.ndarray)): + return float(value[0]) + elif isinstance(value, tf.Tensor): + return float(value.numpy().item()) + else: + return float(value) diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..ed21d5df --- /dev/null +++ b/poetry.lock @@ -0,0 +1,5937 @@ +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. + +[[package]] +name = "absl-py" +version = "2.1.0" +description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"}, + {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.4" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, + {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.11" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffbfde2443696345e23a3c597049b1dd43049bb65337837574205e7368472177"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20b3d9e416774d41813bc02fdc0663379c01817b0874b932b81c7f777f67b217"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b943011b45ee6bf74b22245c6faab736363678e910504dd7531a58c76c9015a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48bc1d924490f0d0b3658fe5c4b081a4d56ebb58af80a6729d4bd13ea569797a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e12eb3f4b1f72aaaf6acd27d045753b18101524f72ae071ae1c91c1cd44ef115"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f14ebc419a568c2eff3c1ed35f634435c24ead2fe19c07426af41e7adb68713a"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:72b191cdf35a518bfc7ca87d770d30941decc5aaf897ec8b484eb5cc8c7706f3"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ab2328a61fdc86424ee540d0aeb8b73bbcad7351fb7cf7a6546fc0bcffa0038"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa93063d4af05c49276cf14e419550a3f45258b6b9d1f16403e777f1addf4519"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:30283f9d0ce420363c24c5c2421e71a738a2155f10adbb1a11a4d4d6d2715cfc"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e5358addc8044ee49143c546d2182c15b4ac3a60be01c3209374ace05af5733d"}, + {file = "aiohttp-3.10.11-cp310-cp310-win32.whl", hash = "sha256:e1ffa713d3ea7cdcd4aea9cddccab41edf6882fa9552940344c44e59652e1120"}, + {file = "aiohttp-3.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:778cbd01f18ff78b5dd23c77eb82987ee4ba23408cbed233009fd570dda7e674"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:80ff08556c7f59a7972b1e8919f62e9c069c33566a6d28586771711e0eea4f07"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c8f96e9ee19f04c4914e4e7a42a60861066d3e1abf05c726f38d9d0a466e695"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fb8601394d537da9221947b5d6e62b064c9a43e88a1ecd7414d21a1a6fba9c24"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea224cf7bc2d8856d6971cea73b1d50c9c51d36971faf1abc169a0d5f85a382"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db9503f79e12d5d80b3efd4d01312853565c05367493379df76d2674af881caa"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0f449a50cc33f0384f633894d8d3cd020e3ccef81879c6e6245c3c375c448625"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82052be3e6d9e0c123499127782a01a2b224b8af8c62ab46b3f6197035ad94e9"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20063c7acf1eec550c8eb098deb5ed9e1bb0521613b03bb93644b810986027ac"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:489cced07a4c11488f47aab1f00d0c572506883f877af100a38f1fedaa884c3a"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ea9b3bab329aeaa603ed3bf605f1e2a6f36496ad7e0e1aa42025f368ee2dc07b"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ca117819d8ad113413016cb29774b3f6d99ad23c220069789fc050267b786c16"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2dfb612dcbe70fb7cdcf3499e8d483079b89749c857a8f6e80263b021745c730"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9b615d3da0d60e7d53c62e22b4fd1c70f4ae5993a44687b011ea3a2e49051b8"}, + {file = "aiohttp-3.10.11-cp311-cp311-win32.whl", hash = "sha256:29103f9099b6068bbdf44d6a3d090e0a0b2be6d3c9f16a070dd9d0d910ec08f9"}, + {file = "aiohttp-3.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:236b28ceb79532da85d59aa9b9bf873b364e27a0acb2ceaba475dc61cffb6f3f"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7480519f70e32bfb101d71fb9a1f330fbd291655a4c1c922232a48c458c52710"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f65267266c9aeb2287a6622ee2bb39490292552f9fbf851baabc04c9f84e048d"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7400a93d629a0608dc1d6c55f1e3d6e07f7375745aaa8bd7f085571e4d1cee97"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f34b97e4b11b8d4eb2c3a4f975be626cc8af99ff479da7de49ac2c6d02d35725"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7b825da878464a252ccff2958838f9caa82f32a8dbc334eb9b34a026e2c636"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9f92a344c50b9667827da308473005f34767b6a2a60d9acff56ae94f895f385"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f1ab987a27b83c5268a17218463c2ec08dbb754195113867a27b166cd6087"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1dc0f4ca54842173d03322793ebcf2c8cc2d34ae91cc762478e295d8e361e03f"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7ce6a51469bfaacff146e59e7fb61c9c23006495d11cc24c514a455032bcfa03"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aad3cd91d484d065ede16f3cf15408254e2469e3f613b241a1db552c5eb7ab7d"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f4df4b8ca97f658c880fb4b90b1d1ec528315d4030af1ec763247ebfd33d8b9a"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2e4e18a0a2d03531edbc06c366954e40a3f8d2a88d2b936bbe78a0c75a3aab3e"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6ce66780fa1a20e45bc753cda2a149daa6dbf1561fc1289fa0c308391c7bc0a4"}, + {file = "aiohttp-3.10.11-cp312-cp312-win32.whl", hash = "sha256:a919c8957695ea4c0e7a3e8d16494e3477b86f33067478f43106921c2fef15bb"}, + {file = "aiohttp-3.10.11-cp312-cp312-win_amd64.whl", hash = "sha256:b5e29706e6389a2283a91611c91bf24f218962717c8f3b4e528ef529d112ee27"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:703938e22434d7d14ec22f9f310559331f455018389222eed132808cd8f44127"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9bc50b63648840854e00084c2b43035a62e033cb9b06d8c22b409d56eb098413"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f0463bf8b0754bc744e1feb61590706823795041e63edf30118a6f0bf577461"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6c6dec398ac5a87cb3a407b068e1106b20ef001c344e34154616183fe684288"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcaf2d79104d53d4dcf934f7ce76d3d155302d07dae24dff6c9fffd217568067"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25fd5470922091b5a9aeeb7e75be609e16b4fba81cdeaf12981393fb240dd10e"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbde2ca67230923a42161b1f408c3992ae6e0be782dca0c44cb3206bf330dee1"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:249c8ff8d26a8b41a0f12f9df804e7c685ca35a207e2410adbd3e924217b9006"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:878ca6a931ee8c486a8f7b432b65431d095c522cbeb34892bee5be97b3481d0f"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8663f7777ce775f0413324be0d96d9730959b2ca73d9b7e2c2c90539139cbdd6"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6cd3f10b01f0c31481fba8d302b61603a2acb37b9d30e1d14e0f5a58b7b18a31"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e8d8aad9402d3aa02fdc5ca2fe68bcb9fdfe1f77b40b10410a94c7f408b664d"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:38e3c4f80196b4f6c3a85d134a534a56f52da9cb8d8e7af1b79a32eefee73a00"}, + {file = "aiohttp-3.10.11-cp313-cp313-win32.whl", hash = "sha256:fc31820cfc3b2863c6e95e14fcf815dc7afe52480b4dc03393c4873bb5599f71"}, + {file = "aiohttp-3.10.11-cp313-cp313-win_amd64.whl", hash = "sha256:4996ff1345704ffdd6d75fb06ed175938c133425af616142e7187f28dc75f14e"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:74baf1a7d948b3d640badeac333af581a367ab916b37e44cf90a0334157cdfd2"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:473aebc3b871646e1940c05268d451f2543a1d209f47035b594b9d4e91ce8339"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c2f746a6968c54ab2186574e15c3f14f3e7f67aef12b761e043b33b89c5b5f95"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d110cabad8360ffa0dec8f6ec60e43286e9d251e77db4763a87dcfe55b4adb92"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0099c7d5d7afff4202a0c670e5b723f7718810000b4abcbc96b064129e64bc7"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0316e624b754dbbf8c872b62fe6dcb395ef20c70e59890dfa0de9eafccd2849d"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a5f7ab8baf13314e6b2485965cbacb94afff1e93466ac4d06a47a81c50f9cca"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c891011e76041e6508cbfc469dd1a8ea09bc24e87e4c204e05f150c4c455a5fa"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9208299251370ee815473270c52cd3f7069ee9ed348d941d574d1457d2c73e8b"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:459f0f32c8356e8125f45eeff0ecf2b1cb6db1551304972702f34cd9e6c44658"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:14cdc8c1810bbd4b4b9f142eeee23cda528ae4e57ea0923551a9af4820980e39"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:971aa438a29701d4b34e4943e91b5e984c3ae6ccbf80dd9efaffb01bd0b243a9"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9a309c5de392dfe0f32ee57fa43ed8fc6ddf9985425e84bd51ed66bb16bce3a7"}, + {file = "aiohttp-3.10.11-cp38-cp38-win32.whl", hash = "sha256:9ec1628180241d906a0840b38f162a3215114b14541f1a8711c368a8739a9be4"}, + {file = "aiohttp-3.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:9c6e0ffd52c929f985c7258f83185d17c76d4275ad22e90aa29f38e211aacbec"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc493a2e5d8dc79b2df5bec9558425bcd39aff59fc949810cbd0832e294b106"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3e70f24e7d0405be2348da9d5a7836936bf3a9b4fd210f8c37e8d48bc32eca6"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968b8fb2a5eee2770eda9c7b5581587ef9b96fbdf8dcabc6b446d35ccc69df01"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deef4362af9493d1382ef86732ee2e4cbc0d7c005947bd54ad1a9a16dd59298e"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:686b03196976e327412a1b094f4120778c7c4b9cff9bce8d2fdfeca386b89829"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3bf6d027d9d1d34e1c2e1645f18a6498c98d634f8e373395221121f1c258ace8"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:099fd126bf960f96d34a760e747a629c27fb3634da5d05c7ef4d35ef4ea519fc"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c73c4d3dae0b4644bc21e3de546530531d6cdc88659cdeb6579cd627d3c206aa"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0c5580f3c51eea91559db3facd45d72e7ec970b04528b4709b1f9c2555bd6d0b"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fdf6429f0caabfd8a30c4e2eaecb547b3c340e4730ebfe25139779b9815ba138"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d97187de3c276263db3564bb9d9fad9e15b51ea10a371ffa5947a5ba93ad6777"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:0acafb350cfb2eba70eb5d271f55e08bd4502ec35e964e18ad3e7d34d71f7261"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c13ed0c779911c7998a58e7848954bd4d63df3e3575f591e321b19a2aec8df9f"}, + {file = "aiohttp-3.10.11-cp39-cp39-win32.whl", hash = "sha256:22b7c540c55909140f63ab4f54ec2c20d2635c0289cdd8006da46f3327f971b9"}, + {file = "aiohttp-3.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:7b26b1551e481012575dab8e3727b16fe7dd27eb2711d2e63ced7368756268fb"}, + {file = "aiohttp-3.10.11.tar.gz", hash = "sha256:9dc2b8f3dcab2e39e0fa309c8da50c3b55e6f34ab25f1a71d3288f24924d33a7"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.12.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiohttp" +version = "3.13.2" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2372b15a5f62ed37789a6b383ff7344fc5b9f243999b0cd9b629d8bc5f5b4155"}, + {file = "aiohttp-3.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f8659a48995edee7229522984bd1009c1213929c769c2daa80b40fe49a180c"}, + {file = "aiohttp-3.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:939ced4a7add92296b0ad38892ce62b98c619288a081170695c6babe4f50e636"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6315fb6977f1d0dd41a107c527fee2ed5ab0550b7d885bc15fee20ccb17891da"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e7352512f763f760baaed2637055c49134fd1d35b37c2dedfac35bfe5cf8725"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e09a0a06348a2dd73e7213353c90d709502d9786219f69b731f6caa0efeb46f5"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a09a6d073fb5789456545bdee2474d14395792faa0527887f2f4ec1a486a59d3"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b59d13c443f8e049d9e94099c7e412e34610f1f49be0f230ec656a10692a5802"}, + {file = "aiohttp-3.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:20db2d67985d71ca033443a1ba2001c4b5693fe09b0e29f6d9358a99d4d62a8a"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:960c2fc686ba27b535f9fd2b52d87ecd7e4fd1cf877f6a5cba8afb5b4a8bd204"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6c00dbcf5f0d88796151e264a8eab23de2997c9303dd7c0bf622e23b24d3ce22"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fed38a5edb7945f4d1bcabe2fcd05db4f6ec7e0e82560088b754f7e08d93772d"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:b395bbca716c38bef3c764f187860e88c724b342c26275bc03e906142fc5964f"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:204ffff2426c25dfda401ba08da85f9c59525cdc42bda26660463dd1cbcfec6f"}, + {file = "aiohttp-3.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:05c4dd3c48fb5f15db31f57eb35374cb0c09afdde532e7fb70a75aede0ed30f6"}, + {file = "aiohttp-3.13.2-cp310-cp310-win32.whl", hash = "sha256:e574a7d61cf10351d734bcddabbe15ede0eaa8a02070d85446875dc11189a251"}, + {file = "aiohttp-3.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:364f55663085d658b8462a1c3f17b2b84a5c2e1ba858e1b79bff7b2e24ad1514"}, + {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0"}, + {file = "aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb"}, + {file = "aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592"}, + {file = "aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782"}, + {file = "aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8"}, + {file = "aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec"}, + {file = "aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c"}, + {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b"}, + {file = "aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc"}, + {file = "aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e"}, + {file = "aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169"}, + {file = "aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248"}, + {file = "aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e"}, + {file = "aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45"}, + {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be"}, + {file = "aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742"}, + {file = "aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e"}, + {file = "aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476"}, + {file = "aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23"}, + {file = "aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254"}, + {file = "aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a"}, + {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b"}, + {file = "aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61"}, + {file = "aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011"}, + {file = "aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4"}, + {file = "aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a"}, + {file = "aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940"}, + {file = "aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4"}, + {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673"}, + {file = "aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd"}, + {file = "aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e"}, + {file = "aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be"}, + {file = "aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c"}, + {file = "aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734"}, + {file = "aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f"}, + {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7fbdf5ad6084f1940ce88933de34b62358d0f4a0b6ec097362dcd3e5a65a4989"}, + {file = "aiohttp-3.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7c3a50345635a02db61792c85bb86daffac05330f6473d524f1a4e3ef9d0046d"}, + {file = "aiohttp-3.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e87dff73f46e969af38ab3f7cb75316a7c944e2e574ff7c933bc01b10def7f5"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2adebd4577724dcae085665f294cc57c8701ddd4d26140504db622b8d566d7aa"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e036a3a645fe92309ec34b918394bb377950cbb43039a97edae6c08db64b23e2"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:23ad365e30108c422d0b4428cf271156dd56790f6dd50d770b8e360e6c5ab2e6"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f9b2c2d4b9d958b1f9ae0c984ec1dd6b6689e15c75045be8ccb4011426268ca"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a92cf4b9bea33e15ecbaa5c59921be0f23222608143d025c989924f7e3e0c07"}, + {file = "aiohttp-3.13.2-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:070599407f4954021509193404c4ac53153525a19531051661440644728ba9a7"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:29562998ec66f988d49fb83c9b01694fa927186b781463f376c5845c121e4e0b"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4dd3db9d0f4ebca1d887d76f7cdbcd1116ac0d05a9221b9dad82c64a62578c4d"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d7bc4b7f9c4921eba72677cd9fedd2308f4a4ca3e12fab58935295ad9ea98700"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:dacd50501cd017f8cccb328da0c90823511d70d24a323196826d923aad865901"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8b2f1414f6a1e0683f212ec80e813f4abef94c739fd090b66c9adf9d2a05feac"}, + {file = "aiohttp-3.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04c3971421576ed24c191f610052bcb2f059e395bc2489dd99e397f9bc466329"}, + {file = "aiohttp-3.13.2-cp39-cp39-win32.whl", hash = "sha256:9f377d0a924e5cc94dc620bc6366fc3e889586a7f18b748901cf016c916e2084"}, + {file = "aiohttp-3.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:9c705601e16c03466cb72011bd1af55d68fa65b045356d8f96c216e5f6db0fa5"}, + {file = "aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.5.0" +aiosignal = ">=1.4.0" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "backports.zstd ; platform_python_implementation == \"CPython\" and python_version < \"3.14\"", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "aiosignal" +version = "1.4.0" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e"}, + {file = "aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" +typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""} + +[[package]] +name = "astunparse" +version = "1.6.3" +description = "An AST unparser for Python" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, + {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, +] + +[package.dependencies] +six = ">=1.6.1,<2.0" +wheel = ">=0.23.0,<1.0" + +[[package]] +name = "async-timeout" +version = "5.0.1" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, + {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, +] + +[[package]] +name = "attrs" +version = "25.3.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, + {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "attrs" +version = "25.4.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373"}, + {file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"}, +] + +[[package]] +name = "audioread" +version = "3.0.1" +description = "Multi-library, cross-platform audio decoding." +optional = false +python-versions = ">=3.6" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "audioread-3.0.1-py3-none-any.whl", hash = "sha256:4cdce70b8adc0da0a3c9e0d85fb10b3ace30fbdf8d1670fd443929b61d117c33"}, + {file = "audioread-3.0.1.tar.gz", hash = "sha256:ac5460a5498c48bdf2e8e767402583a4dcd13f4414d286f42ce4379e8b35066d"}, +] + +[package.extras] +test = ["tox"] + +[[package]] +name = "audioread" +version = "3.1.0" +description = "Multi-library, cross-platform audio decoding." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "audioread-3.1.0-py3-none-any.whl", hash = "sha256:b30d1df6c5d3de5dcef0fb0e256f6ea17bdcf5f979408df0297d8a408e2971b4"}, + {file = "audioread-3.1.0.tar.gz", hash = "sha256:1c4ab2f2972764c896a8ac61ac53e261c8d29f0c6ccd652f84e18f08a4cab190"}, +] + +[package.extras] +gi = ["pygobject (>=3.54.2,<4.0.0)"] +mad = ["pymad[mad] (>=0.11.3,<0.12.0)"] +test = ["pytest (>=8.4.2)", "pytest-cov (>=7.0.0)"] + +[[package]] +name = "boto3" +version = "1.36.13" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "boto3-1.36.13-py3-none-any.whl", hash = "sha256:20d97739cea1b0f549e9096c453ac727a350da28bd0451098714260b655a85ea"}, + {file = "boto3-1.36.13.tar.gz", hash = "sha256:c8031aa1c4a7c331081b2d86c49a362654b86e0b89d0a41fa166a68b226f4aba"}, +] + +[package.dependencies] +botocore = ">=1.36.13,<1.37.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.11.0,<0.12.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.36.13" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "botocore-1.36.13-py3-none-any.whl", hash = "sha256:d644a814440bf8d55f4e29b1c0e6f021e2573b7784e0c91f55f4d9d689e08005"}, + {file = "botocore-1.36.13.tar.gz", hash = "sha256:50a3ff292f8dfdde21074b5c916afe847b01e074ab16d9c9fe71b34960c77134"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = [ + {version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""}, + {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}, +] + +[package.extras] +crt = ["awscrt (==0.23.8)"] + +[[package]] +name = "cachetools" +version = "5.5.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "cachetools-5.5.1-py3-none-any.whl", hash = "sha256:b76651fdc3b24ead3c648bbdeeb940c1b04d365b38b4af66788f9ec4a81d42bb"}, + {file = "cachetools-5.5.1.tar.gz", hash = "sha256:70f238fbba50383ef62e55c6aff6d9673175fe59f7c6782c7a0b9e38f4a9df95"}, +] + +[[package]] +name = "certifi" +version = "2025.1.31" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cffi" +version = "2.0.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, +] + +[package.dependencies] +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} + +[[package]] +name = "coloredlogs" +version = "15.0.1" +description = "Colored terminal output for Python's logging module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] +files = [ + {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, + {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, +] + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "datasets" +version = "2.21.0" +description = "HuggingFace community-driven open-source library of datasets" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "datasets-2.21.0-py3-none-any.whl", hash = "sha256:25e4e097110ce28824b746a107727ada94024cba11db8bc588d468414692b65a"}, + {file = "datasets-2.21.0.tar.gz", hash = "sha256:998f85a8460f1bd982e5bd058f8a0808eef424249e3df1e8cdd594ccd0dc8ba2"}, +] + +[package.dependencies] +aiohttp = "*" +dill = ">=0.3.0,<0.3.9" +filelock = "*" +fsspec = {version = ">=2023.1.0,<=2024.6.1", extras = ["http"]} +huggingface-hub = ">=0.21.2" +multiprocess = "*" +numpy = ">=1.17" +packaging = "*" +pandas = "*" +pyarrow = ">=15.0.0" +pyyaml = ">=5.1" +requests = ">=2.32.2" +tqdm = ">=4.66.3" +xxhash = "*" + +[package.extras] +apache-beam = ["apache-beam (>=2.26.0)"] +audio = ["librosa", "soundfile (>=0.12.1)", "soxr (>=0.4.0) ; python_version >= \"3.9\""] +benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] +dev = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "ruff (>=0.3.0)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0) ; python_version >= \"3.9\"", "sqlalchemy", "tensorflow (>=2.16.0) ; python_version >= \"3.10\"", "tensorflow (>=2.6.0)", "tensorflow (>=2.6.0) ; python_version < \"3.10\"", "tiktoken", "torch", "torch (>=2.0.0)", "transformers", "transformers (>=4.42.0)", "typing-extensions (>=4.6.1)", "zstandard"] +docs = ["s3fs", "tensorflow (>=2.6.0)", "torch", "transformers"] +jax = ["jax (>=0.3.14)", "jaxlib (>=0.3.14)"] +metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk (<3.8.2)", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] +quality = ["ruff (>=0.3.0)"] +s3 = ["s3fs"] +tensorflow = ["tensorflow (>=2.6.0)"] +tensorflow-gpu = ["tensorflow (>=2.6.0)"] +tests = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.8.0.post1)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0) ; python_version >= \"3.9\"", "sqlalchemy", "tensorflow (>=2.16.0) ; python_version >= \"3.10\"", "tensorflow (>=2.6.0) ; python_version < \"3.10\"", "tiktoken", "torch (>=2.0.0)", "transformers (>=4.42.0)", "typing-extensions (>=4.6.1)", "zstandard"] +tests-numpy2 = ["Pillow (>=9.4.0)", "absl-py", "decorator", "elasticsearch (<8.0.0)", "jax (>=0.3.14) ; sys_platform != \"win32\"", "jaxlib (>=0.3.14) ; sys_platform != \"win32\"", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "moto[server]", "polars[timezone] (>=0.20.0)", "protobuf (<4.0.0)", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "soxr (>=0.4.0) ; python_version >= \"3.9\"", "sqlalchemy", "tiktoken", "torch (>=2.0.0)", "typing-extensions (>=4.6.1)", "zstandard"] +torch = ["torch"] +vision = ["Pillow (>=9.4.0)"] + +[[package]] +name = "decorator" +version = "5.2.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, + {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, + {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "filelock" +version = "3.20.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2"}, + {file = "filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4"}, +] + +[[package]] +name = "flatbuffers" +version = "25.1.24" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +groups = ["main", "dev"] +files = [ + {file = "flatbuffers-25.1.24-py2.py3-none-any.whl", hash = "sha256:1abfebaf4083117225d0723087ea909896a34e3fec933beedb490d595ba24145"}, + {file = "flatbuffers-25.1.24.tar.gz", hash = "sha256:e0f7b7d806c0abdf166275492663130af40c11f89445045fbef0aa3c9a8643ad"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[[package]] +name = "frozenlist" +version = "1.5.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, +] + +[[package]] +name = "frozenlist" +version = "1.8.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "frozenlist-1.8.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b37f6d31b3dcea7deb5e9696e529a6aa4a898adc33db82da12e4c60a7c4d2011"}, + {file = "frozenlist-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef2b7b394f208233e471abc541cc6991f907ffd47dc72584acee3147899d6565"}, + {file = "frozenlist-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a88f062f072d1589b7b46e951698950e7da00442fc1cacbe17e19e025dc327ad"}, + {file = "frozenlist-1.8.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f57fb59d9f385710aa7060e89410aeb5058b99e62f4d16b08b91986b9a2140c2"}, + {file = "frozenlist-1.8.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:799345ab092bee59f01a915620b5d014698547afd011e691a208637312db9186"}, + {file = "frozenlist-1.8.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c23c3ff005322a6e16f71bf8692fcf4d5a304aaafe1e262c98c6d4adc7be863e"}, + {file = "frozenlist-1.8.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8a76ea0f0b9dfa06f254ee06053d93a600865b3274358ca48a352ce4f0798450"}, + {file = "frozenlist-1.8.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c7366fe1418a6133d5aa824ee53d406550110984de7637d65a178010f759c6ef"}, + {file = "frozenlist-1.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13d23a45c4cebade99340c4165bd90eeb4a56c6d8a9d8aa49568cac19a6d0dc4"}, + {file = "frozenlist-1.8.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4a3408834f65da56c83528fb52ce7911484f0d1eaf7b761fc66001db1646eff"}, + {file = "frozenlist-1.8.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42145cd2748ca39f32801dad54aeea10039da6f86e303659db90db1c4b614c8c"}, + {file = "frozenlist-1.8.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e2de870d16a7a53901e41b64ffdf26f2fbb8917b3e6ebf398098d72c5b20bd7f"}, + {file = "frozenlist-1.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:20e63c9493d33ee48536600d1a5c95eefc870cd71e7ab037763d1fbb89cc51e7"}, + {file = "frozenlist-1.8.0-cp310-cp310-win32.whl", hash = "sha256:adbeebaebae3526afc3c96fad434367cafbfd1b25d72369a9e5858453b1bb71a"}, + {file = "frozenlist-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:667c3777ca571e5dbeb76f331562ff98b957431df140b54c85fd4d52eea8d8f6"}, + {file = "frozenlist-1.8.0-cp310-cp310-win_arm64.whl", hash = "sha256:80f85f0a7cc86e7a54c46d99c9e1318ff01f4687c172ede30fd52d19d1da1c8e"}, + {file = "frozenlist-1.8.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:09474e9831bc2b2199fad6da3c14c7b0fbdd377cce9d3d77131be28906cb7d84"}, + {file = "frozenlist-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:17c883ab0ab67200b5f964d2b9ed6b00971917d5d8a92df149dc2c9779208ee9"}, + {file = "frozenlist-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa47e444b8ba08fffd1c18e8cdb9a75db1b6a27f17507522834ad13ed5922b93"}, + {file = "frozenlist-1.8.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2552f44204b744fba866e573be4c1f9048d6a324dfe14475103fd51613eb1d1f"}, + {file = "frozenlist-1.8.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e7c38f250991e48a9a73e6423db1bb9dd14e722a10f6b8bb8e16a0f55f695"}, + {file = "frozenlist-1.8.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8585e3bb2cdea02fc88ffa245069c36555557ad3609e83be0ec71f54fd4abb52"}, + {file = "frozenlist-1.8.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:edee74874ce20a373d62dc28b0b18b93f645633c2943fd90ee9d898550770581"}, + {file = "frozenlist-1.8.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c9a63152fe95756b85f31186bddf42e4c02c6321207fd6601a1c89ebac4fe567"}, + {file = "frozenlist-1.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6db2185db9be0a04fecf2f241c70b63b1a242e2805be291855078f2b404dd6b"}, + {file = "frozenlist-1.8.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f4be2e3d8bc8aabd566f8d5b8ba7ecc09249d74ba3c9ed52e54dc23a293f0b92"}, + {file = "frozenlist-1.8.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c8d1634419f39ea6f5c427ea2f90ca85126b54b50837f31497f3bf38266e853d"}, + {file = "frozenlist-1.8.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1a7fa382a4a223773ed64242dbe1c9c326ec09457e6b8428efb4118c685c3dfd"}, + {file = "frozenlist-1.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11847b53d722050808926e785df837353bd4d75f1d494377e59b23594d834967"}, + {file = "frozenlist-1.8.0-cp311-cp311-win32.whl", hash = "sha256:27c6e8077956cf73eadd514be8fb04d77fc946a7fe9f7fe167648b0b9085cc25"}, + {file = "frozenlist-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac913f8403b36a2c8610bbfd25b8013488533e71e62b4b4adce9c86c8cea905b"}, + {file = "frozenlist-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:d4d3214a0f8394edfa3e303136d0575eece0745ff2b47bd2cb2e66dd92d4351a"}, + {file = "frozenlist-1.8.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78f7b9e5d6f2fdb88cdde9440dc147259b62b9d3b019924def9f6478be254ac1"}, + {file = "frozenlist-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:229bf37d2e4acdaf808fd3f06e854a4a7a3661e871b10dc1f8f1896a3b05f18b"}, + {file = "frozenlist-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f833670942247a14eafbb675458b4e61c82e002a148f49e68257b79296e865c4"}, + {file = "frozenlist-1.8.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:494a5952b1c597ba44e0e78113a7266e656b9794eec897b19ead706bd7074383"}, + {file = "frozenlist-1.8.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96f423a119f4777a4a056b66ce11527366a8bb92f54e541ade21f2374433f6d4"}, + {file = "frozenlist-1.8.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3462dd9475af2025c31cc61be6652dfa25cbfb56cbbf52f4ccfe029f38decaf8"}, + {file = "frozenlist-1.8.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4c800524c9cd9bac5166cd6f55285957fcfc907db323e193f2afcd4d9abd69b"}, + {file = "frozenlist-1.8.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d6a5df73acd3399d893dafc71663ad22534b5aa4f94e8a2fabfe856c3c1b6a52"}, + {file = "frozenlist-1.8.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:405e8fe955c2280ce66428b3ca55e12b3c4e9c336fb2103a4937e891c69a4a29"}, + {file = "frozenlist-1.8.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:908bd3f6439f2fef9e85031b59fd4f1297af54415fb60e4254a95f75b3cab3f3"}, + {file = "frozenlist-1.8.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:294e487f9ec720bd8ffcebc99d575f7eff3568a08a253d1ee1a0378754b74143"}, + {file = "frozenlist-1.8.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:74c51543498289c0c43656701be6b077f4b265868fa7f8a8859c197006efb608"}, + {file = "frozenlist-1.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:776f352e8329135506a1d6bf16ac3f87bc25b28e765949282dcc627af36123aa"}, + {file = "frozenlist-1.8.0-cp312-cp312-win32.whl", hash = "sha256:433403ae80709741ce34038da08511d4a77062aa924baf411ef73d1146e74faf"}, + {file = "frozenlist-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:34187385b08f866104f0c0617404c8eb08165ab1272e884abc89c112e9c00746"}, + {file = "frozenlist-1.8.0-cp312-cp312-win_arm64.whl", hash = "sha256:fe3c58d2f5db5fbd18c2987cba06d51b0529f52bc3a6cdc33d3f4eab725104bd"}, + {file = "frozenlist-1.8.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8d92f1a84bb12d9e56f818b3a746f3efba93c1b63c8387a73dde655e1e42282a"}, + {file = "frozenlist-1.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96153e77a591c8adc2ee805756c61f59fef4cf4073a9275ee86fe8cba41241f7"}, + {file = "frozenlist-1.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f21f00a91358803399890ab167098c131ec2ddd5f8f5fd5fe9c9f2c6fcd91e40"}, + {file = "frozenlist-1.8.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fb30f9626572a76dfe4293c7194a09fb1fe93ba94c7d4f720dfae3b646b45027"}, + {file = "frozenlist-1.8.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eaa352d7047a31d87dafcacbabe89df0aa506abb5b1b85a2fb91bc3faa02d822"}, + {file = "frozenlist-1.8.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:03ae967b4e297f58f8c774c7eabcce57fe3c2434817d4385c50661845a058121"}, + {file = "frozenlist-1.8.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6292f1de555ffcc675941d65fffffb0a5bcd992905015f85d0592201793e0e5"}, + {file = "frozenlist-1.8.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29548f9b5b5e3460ce7378144c3010363d8035cea44bc0bf02d57f5a685e084e"}, + {file = "frozenlist-1.8.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ec3cc8c5d4084591b4237c0a272cc4f50a5b03396a47d9caaf76f5d7b38a4f11"}, + {file = "frozenlist-1.8.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:517279f58009d0b1f2e7c1b130b377a349405da3f7621ed6bfae50b10adf20c1"}, + {file = "frozenlist-1.8.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:db1e72ede2d0d7ccb213f218df6a078a9c09a7de257c2fe8fcef16d5925230b1"}, + {file = "frozenlist-1.8.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b4dec9482a65c54a5044486847b8a66bf10c9cb4926d42927ec4e8fd5db7fed8"}, + {file = "frozenlist-1.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21900c48ae04d13d416f0e1e0c4d81f7931f73a9dfa0b7a8746fb2fe7dd970ed"}, + {file = "frozenlist-1.8.0-cp313-cp313-win32.whl", hash = "sha256:8b7b94a067d1c504ee0b16def57ad5738701e4ba10cec90529f13fa03c833496"}, + {file = "frozenlist-1.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:878be833caa6a3821caf85eb39c5ba92d28e85df26d57afb06b35b2efd937231"}, + {file = "frozenlist-1.8.0-cp313-cp313-win_arm64.whl", hash = "sha256:44389d135b3ff43ba8cc89ff7f51f5a0bb6b63d829c8300f79a2fe4fe61bcc62"}, + {file = "frozenlist-1.8.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e25ac20a2ef37e91c1b39938b591457666a0fa835c7783c3a8f33ea42870db94"}, + {file = "frozenlist-1.8.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07cdca25a91a4386d2e76ad992916a85038a9b97561bf7a3fd12d5d9ce31870c"}, + {file = "frozenlist-1.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e0c11f2cc6717e0a741f84a527c52616140741cd812a50422f83dc31749fb52"}, + {file = "frozenlist-1.8.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b3210649ee28062ea6099cfda39e147fa1bc039583c8ee4481cb7811e2448c51"}, + {file = "frozenlist-1.8.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:581ef5194c48035a7de2aefc72ac6539823bb71508189e5de01d60c9dcd5fa65"}, + {file = "frozenlist-1.8.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3ef2d026f16a2b1866e1d86fc4e1291e1ed8a387b2c333809419a2f8b3a77b82"}, + {file = "frozenlist-1.8.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5500ef82073f599ac84d888e3a8c1f77ac831183244bfd7f11eaa0289fb30714"}, + {file = "frozenlist-1.8.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:50066c3997d0091c411a66e710f4e11752251e6d2d73d70d8d5d4c76442a199d"}, + {file = "frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5c1c8e78426e59b3f8005e9b19f6ff46e5845895adbde20ece9218319eca6506"}, + {file = "frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:eefdba20de0d938cec6a89bd4d70f346a03108a19b9df4248d3cf0d88f1b0f51"}, + {file = "frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cf253e0e1c3ceb4aaff6df637ce033ff6535fb8c70a764a8f46aafd3d6ab798e"}, + {file = "frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:032efa2674356903cd0261c4317a561a6850f3ac864a63fc1583147fb05a79b0"}, + {file = "frozenlist-1.8.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6da155091429aeba16851ecb10a9104a108bcd32f6c1642867eadaee401c1c41"}, + {file = "frozenlist-1.8.0-cp313-cp313t-win32.whl", hash = "sha256:0f96534f8bfebc1a394209427d0f8a63d343c9779cda6fc25e8e121b5fd8555b"}, + {file = "frozenlist-1.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5d63a068f978fc69421fb0e6eb91a9603187527c86b7cd3f534a5b77a592b888"}, + {file = "frozenlist-1.8.0-cp313-cp313t-win_arm64.whl", hash = "sha256:bf0a7e10b077bf5fb9380ad3ae8ce20ef919a6ad93b4552896419ac7e1d8e042"}, + {file = "frozenlist-1.8.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cee686f1f4cadeb2136007ddedd0aaf928ab95216e7691c63e50a8ec066336d0"}, + {file = "frozenlist-1.8.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:119fb2a1bd47307e899c2fac7f28e85b9a543864df47aa7ec9d3c1b4545f096f"}, + {file = "frozenlist-1.8.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4970ece02dbc8c3a92fcc5228e36a3e933a01a999f7094ff7c23fbd2beeaa67c"}, + {file = "frozenlist-1.8.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:cba69cb73723c3f329622e34bdbf5ce1f80c21c290ff04256cff1cd3c2036ed2"}, + {file = "frozenlist-1.8.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:778a11b15673f6f1df23d9586f83c4846c471a8af693a22e066508b77d201ec8"}, + {file = "frozenlist-1.8.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0325024fe97f94c41c08872db482cf8ac4800d80e79222c6b0b7b162d5b13686"}, + {file = "frozenlist-1.8.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:97260ff46b207a82a7567b581ab4190bd4dfa09f4db8a8b49d1a958f6aa4940e"}, + {file = "frozenlist-1.8.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:54b2077180eb7f83dd52c40b2750d0a9f175e06a42e3213ce047219de902717a"}, + {file = "frozenlist-1.8.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2f05983daecab868a31e1da44462873306d3cbfd76d1f0b5b69c473d21dbb128"}, + {file = "frozenlist-1.8.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:33f48f51a446114bc5d251fb2954ab0164d5be02ad3382abcbfe07e2531d650f"}, + {file = "frozenlist-1.8.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:154e55ec0655291b5dd1b8731c637ecdb50975a2ae70c606d100750a540082f7"}, + {file = "frozenlist-1.8.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4314debad13beb564b708b4a496020e5306c7333fa9a3ab90374169a20ffab30"}, + {file = "frozenlist-1.8.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:073f8bf8becba60aa931eb3bc420b217bb7d5b8f4750e6f8b3be7f3da85d38b7"}, + {file = "frozenlist-1.8.0-cp314-cp314-win32.whl", hash = "sha256:bac9c42ba2ac65ddc115d930c78d24ab8d4f465fd3fc473cdedfccadb9429806"}, + {file = "frozenlist-1.8.0-cp314-cp314-win_amd64.whl", hash = "sha256:3e0761f4d1a44f1d1a47996511752cf3dcec5bbdd9cc2b4fe595caf97754b7a0"}, + {file = "frozenlist-1.8.0-cp314-cp314-win_arm64.whl", hash = "sha256:d1eaff1d00c7751b7c6662e9c5ba6eb2c17a2306ba5e2a37f24ddf3cc953402b"}, + {file = "frozenlist-1.8.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d3bb933317c52d7ea5004a1c442eef86f426886fba134ef8cf4226ea6ee1821d"}, + {file = "frozenlist-1.8.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8009897cdef112072f93a0efdce29cd819e717fd2f649ee3016efd3cd885a7ed"}, + {file = "frozenlist-1.8.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2c5dcbbc55383e5883246d11fd179782a9d07a986c40f49abe89ddf865913930"}, + {file = "frozenlist-1.8.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:39ecbc32f1390387d2aa4f5a995e465e9e2f79ba3adcac92d68e3e0afae6657c"}, + {file = "frozenlist-1.8.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92db2bf818d5cc8d9c1f1fc56b897662e24ea5adb36ad1f1d82875bd64e03c24"}, + {file = "frozenlist-1.8.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dc43a022e555de94c3b68a4ef0b11c4f747d12c024a520c7101709a2144fb37"}, + {file = "frozenlist-1.8.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb89a7f2de3602cfed448095bab3f178399646ab7c61454315089787df07733a"}, + {file = "frozenlist-1.8.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:33139dc858c580ea50e7e60a1b0ea003efa1fd42e6ec7fdbad78fff65fad2fd2"}, + {file = "frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:168c0969a329b416119507ba30b9ea13688fafffac1b7822802537569a1cb0ef"}, + {file = "frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:28bd570e8e189d7f7b001966435f9dac6718324b5be2990ac496cf1ea9ddb7fe"}, + {file = "frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b2a095d45c5d46e5e79ba1e5b9cb787f541a8dee0433836cea4b96a2c439dcd8"}, + {file = "frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:eab8145831a0d56ec9c4139b6c3e594c7a83c2c8be25d5bcf2d86136a532287a"}, + {file = "frozenlist-1.8.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:974b28cf63cc99dfb2188d8d222bc6843656188164848c4f679e63dae4b0708e"}, + {file = "frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df"}, + {file = "frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd"}, + {file = "frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79"}, + {file = "frozenlist-1.8.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8b7138e5cd0647e4523d6685b0eac5d4be9a184ae9634492f25c6eb38c12a47"}, + {file = "frozenlist-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a6483e309ca809f1efd154b4d37dc6d9f61037d6c6a81c2dc7a15cb22c8c5dca"}, + {file = "frozenlist-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b9290cf81e95e93fdf90548ce9d3c1211cf574b8e3f4b3b7cb0537cf2227068"}, + {file = "frozenlist-1.8.0-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:59a6a5876ca59d1b63af8cd5e7ffffb024c3dc1e9cf9301b21a2e76286505c95"}, + {file = "frozenlist-1.8.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6dc4126390929823e2d2d9dc79ab4046ed74680360fc5f38b585c12c66cdf459"}, + {file = "frozenlist-1.8.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:332db6b2563333c5671fecacd085141b5800cb866be16d5e3eb15a2086476675"}, + {file = "frozenlist-1.8.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9ff15928d62a0b80bb875655c39bf517938c7d589554cbd2669be42d97c2cb61"}, + {file = "frozenlist-1.8.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7bf6cdf8e07c8151fba6fe85735441240ec7f619f935a5205953d58009aef8c6"}, + {file = "frozenlist-1.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:48e6d3f4ec5c7273dfe83ff27c91083c6c9065af655dc2684d2c200c94308bb5"}, + {file = "frozenlist-1.8.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:1a7607e17ad33361677adcd1443edf6f5da0ce5e5377b798fba20fae194825f3"}, + {file = "frozenlist-1.8.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3a935c3a4e89c733303a2d5a7c257ea44af3a56c8202df486b7f5de40f37e1"}, + {file = "frozenlist-1.8.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:940d4a017dbfed9daf46a3b086e1d2167e7012ee297fef9e1c545c4d022f5178"}, + {file = "frozenlist-1.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b9be22a69a014bc47e78072d0ecae716f5eb56c15238acca0f43d6eb8e4a5bda"}, + {file = "frozenlist-1.8.0-cp39-cp39-win32.whl", hash = "sha256:1aa77cb5697069af47472e39612976ed05343ff2e84a3dcf15437b232cbfd087"}, + {file = "frozenlist-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:7398c222d1d405e796970320036b1b563892b65809d9e5261487bb2c7f7b5c6a"}, + {file = "frozenlist-1.8.0-cp39-cp39-win_arm64.whl", hash = "sha256:b4f3b365f31c6cd4af24545ca0a244a53688cad8834e32f56831c4923b50a103"}, + {file = "frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d"}, + {file = "frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad"}, +] + +[[package]] +name = "fsspec" +version = "2024.6.1" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, + {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, +] + +[package.dependencies] +aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "fvcore" +version = "0.1.5.post20221221" +description = "Collection of common code shared among different research projects in FAIR computer vision team" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "fvcore-0.1.5.post20221221.tar.gz", hash = "sha256:f2fb0bb90572ae651c11c78e20493ed19b2240550a7e4bbb2d6de87bdd037860"}, +] + +[package.dependencies] +iopath = ">=0.1.7" +numpy = "*" +Pillow = "*" +pyyaml = ">=5.1" +tabulate = "*" +termcolor = ">=1.1" +tqdm = "*" +yacs = ">=0.1.6" + +[package.extras] +all = ["shapely"] + +[[package]] +name = "gast" +version = "0.4.0" +description = "Python AST that abstracts the underlying Python version" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "gast-0.4.0-py3-none-any.whl", hash = "sha256:b7adcdd5adbebf1adf17378da5ba3f543684dbec47b1cda1f3997e573cd542c4"}, + {file = "gast-0.4.0.tar.gz", hash = "sha256:40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1"}, +] + +[[package]] +name = "google-auth" +version = "2.38.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, + {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-oauthlib" +version = "1.0.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.6" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"}, + {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"}, +] + +[package.dependencies] +google-auth = ">=2.15.0" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click (>=6.0.0)"] + +[[package]] +name = "google-pasta" +version = "0.2.0" +description = "pasta is an AST-based Python refactoring library" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, + {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, + {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "grpcio" +version = "1.70.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"}, + {file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:374d014f29f9dfdb40510b041792e0e2828a1389281eb590df066e1cc2b404e5"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2af68a6f5c8f78d56c145161544ad0febbd7479524a59c16b3e25053f39c87f"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7df14b2dcd1102a2ec32f621cc9fab6695effef516efbc6b063ad749867295"}, + {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c78b339869f4dbf89881e0b6fbf376313e4f845a42840a7bdf42ee6caed4b11f"}, + {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58ad9ba575b39edef71f4798fdb5c7b6d02ad36d47949cd381d4392a5c9cbcd3"}, + {file = "grpcio-1.70.0-cp310-cp310-win32.whl", hash = "sha256:2b0d02e4b25a5c1f9b6c7745d4fa06efc9fd6a611af0fb38d3ba956786b95199"}, + {file = "grpcio-1.70.0-cp310-cp310-win_amd64.whl", hash = "sha256:0de706c0a5bb9d841e353f6343a9defc9fc35ec61d6eb6111802f3aa9fef29e1"}, + {file = "grpcio-1.70.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:17325b0be0c068f35770f944124e8839ea3185d6d54862800fc28cc2ffad205a"}, + {file = "grpcio-1.70.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:dbe41ad140df911e796d4463168e33ef80a24f5d21ef4d1e310553fcd2c4a386"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5ea67c72101d687d44d9c56068328da39c9ccba634cabb336075fae2eab0d04b"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb5277db254ab7586769e490b7b22f4ddab3876c490da0a1a9d7c695ccf0bf77"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7831a0fc1beeeb7759f737f5acd9fdcda520e955049512d68fda03d91186eea"}, + {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27cc75e22c5dba1fbaf5a66c778e36ca9b8ce850bf58a9db887754593080d839"}, + {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63764963412e22f0491d0d32833d71087288f4e24cbcddbae82476bfa1d81fd"}, + {file = "grpcio-1.70.0-cp311-cp311-win32.whl", hash = "sha256:bb491125103c800ec209d84c9b51f1c60ea456038e4734688004f377cfacc113"}, + {file = "grpcio-1.70.0-cp311-cp311-win_amd64.whl", hash = "sha256:d24035d49e026353eb042bf7b058fb831db3e06d52bee75c5f2f3ab453e71aca"}, + {file = "grpcio-1.70.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:ef4c14508299b1406c32bdbb9fb7b47612ab979b04cf2b27686ea31882387cff"}, + {file = "grpcio-1.70.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:aa47688a65643afd8b166928a1da6247d3f46a2784d301e48ca1cc394d2ffb40"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:880bfb43b1bb8905701b926274eafce5c70a105bc6b99e25f62e98ad59cb278e"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e654c4b17d07eab259d392e12b149c3a134ec52b11ecdc6a515b39aceeec898"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2394e3381071045a706ee2eeb6e08962dd87e8999b90ac15c55f56fa5a8c9597"}, + {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b3c76701428d2df01964bc6479422f20e62fcbc0a37d82ebd58050b86926ef8c"}, + {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac073fe1c4cd856ebcf49e9ed6240f4f84d7a4e6ee95baa5d66ea05d3dd0df7f"}, + {file = "grpcio-1.70.0-cp312-cp312-win32.whl", hash = "sha256:cd24d2d9d380fbbee7a5ac86afe9787813f285e684b0271599f95a51bce33528"}, + {file = "grpcio-1.70.0-cp312-cp312-win_amd64.whl", hash = "sha256:0495c86a55a04a874c7627fd33e5beaee771917d92c0e6d9d797628ac40e7655"}, + {file = "grpcio-1.70.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa573896aeb7d7ce10b1fa425ba263e8dddd83d71530d1322fd3a16f31257b4a"}, + {file = "grpcio-1.70.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:d405b005018fd516c9ac529f4b4122342f60ec1cee181788249372524e6db429"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f32090238b720eb585248654db8e3afc87b48d26ac423c8dde8334a232ff53c9"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa089a734f24ee5f6880c83d043e4f46bf812fcea5181dcb3a572db1e79e01c"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19375f0300b96c0117aca118d400e76fede6db6e91f3c34b7b035822e06c35f"}, + {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7c73c42102e4a5ec76608d9b60227d917cea46dff4d11d372f64cbeb56d259d0"}, + {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0a5c78d5198a1f0aa60006cd6eb1c912b4a1520b6a3968e677dbcba215fabb40"}, + {file = "grpcio-1.70.0-cp313-cp313-win32.whl", hash = "sha256:fe9dbd916df3b60e865258a8c72ac98f3ac9e2a9542dcb72b7a34d236242a5ce"}, + {file = "grpcio-1.70.0-cp313-cp313-win_amd64.whl", hash = "sha256:4119fed8abb7ff6c32e3d2255301e59c316c22d31ab812b3fbcbaf3d0d87cc68"}, + {file = "grpcio-1.70.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:8058667a755f97407fca257c844018b80004ae8035565ebc2812cc550110718d"}, + {file = "grpcio-1.70.0-cp38-cp38-macosx_10_14_universal2.whl", hash = "sha256:879a61bf52ff8ccacbedf534665bb5478ec8e86ad483e76fe4f729aaef867cab"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ba0a173f4feacf90ee618fbc1a27956bfd21260cd31ced9bc707ef551ff7dc7"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558c386ecb0148f4f99b1a65160f9d4b790ed3163e8610d11db47838d452512d"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:412faabcc787bbc826f51be261ae5fa996b21263de5368a55dc2cf824dc5090e"}, + {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3b0f01f6ed9994d7a0b27eeddea43ceac1b7e6f3f9d86aeec0f0064b8cf50fdb"}, + {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7385b1cb064734005204bc8994eed7dcb801ed6c2eda283f613ad8c6c75cf873"}, + {file = "grpcio-1.70.0-cp38-cp38-win32.whl", hash = "sha256:07269ff4940f6fb6710951116a04cd70284da86d0a4368fd5a3b552744511f5a"}, + {file = "grpcio-1.70.0-cp38-cp38-win_amd64.whl", hash = "sha256:aba19419aef9b254e15011b230a180e26e0f6864c90406fdbc255f01d83bc83c"}, + {file = "grpcio-1.70.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4f1937f47c77392ccd555728f564a49128b6a197a05a5cd527b796d36f3387d0"}, + {file = "grpcio-1.70.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:0cd430b9215a15c10b0e7d78f51e8a39d6cf2ea819fd635a7214fae600b1da27"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:e27585831aa6b57b9250abaf147003e126cd3a6c6ca0c531a01996f31709bed1"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1af8e15b0f0fe0eac75195992a63df17579553b0c4af9f8362cc7cc99ccddf4"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbce24409beaee911c574a3d75d12ffb8c3e3dd1b813321b1d7a96bbcac46bf4"}, + {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ff4a8112a79464919bb21c18e956c54add43ec9a4850e3949da54f61c241a4a6"}, + {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5413549fdf0b14046c545e19cfc4eb1e37e9e1ebba0ca390a8d4e9963cab44d2"}, + {file = "grpcio-1.70.0-cp39-cp39-win32.whl", hash = "sha256:b745d2c41b27650095e81dea7091668c040457483c9bdb5d0d9de8f8eb25e59f"}, + {file = "grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c"}, + {file = "grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.70.0)"] + +[[package]] +name = "h5py" +version = "3.11.0" +description = "Read and write HDF5 files from Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "h5py-3.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1625fd24ad6cfc9c1ccd44a66dac2396e7ee74940776792772819fc69f3a3731"}, + {file = "h5py-3.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c072655ad1d5fe9ef462445d3e77a8166cbfa5e599045f8aa3c19b75315f10e5"}, + {file = "h5py-3.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77b19a40788e3e362b54af4dcf9e6fde59ca016db2c61360aa30b47c7b7cef00"}, + {file = "h5py-3.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:ef4e2f338fc763f50a8113890f455e1a70acd42a4d083370ceb80c463d803972"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd732a08187a9e2a6ecf9e8af713f1d68256ee0f7c8b652a32795670fb481ba"}, + {file = "h5py-3.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75bd7b3d93fbeee40860fd70cdc88df4464e06b70a5ad9ce1446f5f32eb84007"}, + {file = "h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c416f8eb0daae39dabe71415cb531f95dce2d81e1f61a74537a50c63b28ab3"}, + {file = "h5py-3.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:083e0329ae534a264940d6513f47f5ada617da536d8dccbafc3026aefc33c90e"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a76cae64080210389a571c7d13c94a1a6cf8cb75153044fd1f822a962c97aeab"}, + {file = "h5py-3.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3736fe21da2b7d8a13fe8fe415f1272d2a1ccdeff4849c1421d2fb30fd533bc"}, + {file = "h5py-3.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa6ae84a14103e8dc19266ef4c3e5d7c00b68f21d07f2966f0ca7bdb6c2761fb"}, + {file = "h5py-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:21dbdc5343f53b2e25404673c4f00a3335aef25521bd5fa8c707ec3833934892"}, + {file = "h5py-3.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:754c0c2e373d13d6309f408325343b642eb0f40f1a6ad21779cfa9502209e150"}, + {file = "h5py-3.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:731839240c59ba219d4cb3bc5880d438248533366f102402cfa0621b71796b62"}, + {file = "h5py-3.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ec9df3dd2018904c4cc06331951e274f3f3fd091e6d6cc350aaa90fa9b42a76"}, + {file = "h5py-3.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:55106b04e2c83dfb73dc8732e9abad69d83a436b5b82b773481d95d17b9685e1"}, + {file = "h5py-3.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f4e025e852754ca833401777c25888acb96889ee2c27e7e629a19aee288833f0"}, + {file = "h5py-3.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c4b760082626120031d7902cd983d8c1f424cdba2809f1067511ef283629d4b"}, + {file = "h5py-3.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67462d0669f8f5459529de179f7771bd697389fcb3faab54d63bf788599a48ea"}, + {file = "h5py-3.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:d9c944d364688f827dc889cf83f1fca311caf4fa50b19f009d1f2b525edd33a3"}, + {file = "h5py-3.11.0.tar.gz", hash = "sha256:7b7e8f78072a2edec87c9836f25f34203fd492a4475709a18b417a33cfb21fa9"}, +] + +[package.dependencies] +numpy = ">=1.17.3" + +[[package]] +name = "hf-xet" +version = "1.2.0" +description = "Fast transfer of large files with the Hugging Face Hub." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\"" +files = [ + {file = "hf_xet-1.2.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:ceeefcd1b7aed4956ae8499e2199607765fbd1c60510752003b6cc0b8413b649"}, + {file = "hf_xet-1.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b70218dd548e9840224df5638fdc94bd033552963cfa97f9170829381179c813"}, + {file = "hf_xet-1.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d40b18769bb9a8bc82a9ede575ce1a44c75eb80e7375a01d76259089529b5dc"}, + {file = "hf_xet-1.2.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd3a6027d59cfb60177c12d6424e31f4b5ff13d8e3a1247b3a584bf8977e6df5"}, + {file = "hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6de1fc44f58f6dd937956c8d304d8c2dea264c80680bcfa61ca4a15e7b76780f"}, + {file = "hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f182f264ed2acd566c514e45da9f2119110e48a87a327ca271027904c70c5832"}, + {file = "hf_xet-1.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:293a7a3787e5c95d7be1857358a9130694a9c6021de3f27fa233f37267174382"}, + {file = "hf_xet-1.2.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:10bfab528b968c70e062607f663e21e34e2bba349e8038db546646875495179e"}, + {file = "hf_xet-1.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a212e842647b02eb6a911187dc878e79c4aa0aa397e88dd3b26761676e8c1f8"}, + {file = "hf_xet-1.2.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e06daccb3a7d4c065f34fc26c14c74f4653069bb2b194e7f18f17cbe9939c0"}, + {file = "hf_xet-1.2.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:29c8fc913a529ec0a91867ce3d119ac1aac966e098cf49501800c870328cc090"}, + {file = "hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e159cbfcfbb29f920db2c09ed8b660eb894640d284f102ada929b6e3dc410a"}, + {file = "hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9c91d5ae931510107f148874e9e2de8a16052b6f1b3ca3c1b12f15ccb491390f"}, + {file = "hf_xet-1.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:210d577732b519ac6ede149d2f2f34049d44e8622bf14eb3d63bbcd2d4b332dc"}, + {file = "hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848"}, + {file = "hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4"}, + {file = "hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd"}, + {file = "hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c"}, + {file = "hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737"}, + {file = "hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865"}, + {file = "hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69"}, + {file = "hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "huggingface-hub" +version = "0.36.0" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "huggingface_hub-0.36.0-py3-none-any.whl", hash = "sha256:7bcc9ad17d5b3f07b57c78e79d527102d08313caa278a641993acddcb894548d"}, + {file = "huggingface_hub-0.36.0.tar.gz", hash = "sha256:47b3f0e2539c39bf5cde015d63b72ec49baff67b6931c3d97f3f84532e2b8d25"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +hf-xet = {version = ">=1.1.3,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""} +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "libcst (>=1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "ty", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +hf-xet = ["hf-xet (>=1.1.2,<2.0.0)"] +inference = ["aiohttp"] +mcp = ["aiohttp", "mcp (>=1.8.0)", "typer"] +oauth = ["authlib (>=1.3.2)", "fastapi", "httpx", "itsdangerous"] +quality = ["libcst (>=1.4.0)", "mypy (==1.15.0) ; python_version >= \"3.9\"", "mypy (>=1.14.1,<1.15.0) ; python_version == \"3.8\"", "ruff (>=0.9.0)", "ty"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "authlib (>=1.3.2)", "fastapi", "gradio (>=4.0.0)", "httpx", "itsdangerous", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures (<16.0)", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["dev"] +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.5.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, +] +markers = {main = "python_version < \"3.10\" and (platform_machine == \"x86_64\" or platform_machine == \"arm64\")", dev = "python_version == \"3.8\""} + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, +] + +[[package]] +name = "iopath" +version = "0.1.10" +description = "A library for providing I/O abstraction." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "iopath-0.1.10.tar.gz", hash = "sha256:3311c16a4d9137223e20f141655759933e1eda24f8bff166af834af3c645ef01"}, +] + +[package.dependencies] +portalocker = "*" +tqdm = "*" +typing_extensions = "*" + +[package.extras] +aws = ["boto3"] + +[[package]] +name = "jax" +version = "0.4.13" +description = "Differentiate, compile, and transform Numpy code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "jax-0.4.13.tar.gz", hash = "sha256:03bfe6749dfe647f16f15f6616638adae6c4a7ca7167c75c21961ecfd3a3baaa"}, +] + +[package.dependencies] +importlib_metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} +ml_dtypes = ">=0.1.0" +numpy = ">=1.21" +opt_einsum = "*" +scipy = ">=1.7" + +[package.extras] +australis = ["protobuf (>=3.13,<4)"] +ci = ["jaxlib (==0.4.12)"] +cpu = ["jaxlib (==0.4.13)"] +cuda = ["jaxlib (==0.4.13+cuda11.cudnn86)"] +cuda11-cudnn86 = ["jaxlib (==0.4.13+cuda11.cudnn86)"] +cuda11-local = ["jaxlib (==0.4.13+cuda11.cudnn86)"] +cuda11-pip = ["jaxlib (==0.4.13+cuda11.cudnn86)", "nvidia-cublas-cu11 (>=11.11)", "nvidia-cuda-cupti-cu11 (>=11.8)", "nvidia-cuda-nvcc-cu11 (>=11.8)", "nvidia-cuda-runtime-cu11 (>=11.8)", "nvidia-cudnn-cu11 (>=8.8)", "nvidia-cufft-cu11 (>=10.9)", "nvidia-cusolver-cu11 (>=11.4)", "nvidia-cusparse-cu11 (>=11.7)"] +cuda12-local = ["jaxlib (==0.4.13+cuda12.cudnn89)"] +cuda12-pip = ["jaxlib (==0.4.13+cuda12.cudnn89)", "nvidia-cublas-cu12", "nvidia-cuda-cupti-cu12", "nvidia-cuda-nvcc-cu12", "nvidia-cuda-runtime-cu12", "nvidia-cudnn-cu12 (>=8.9)", "nvidia-cufft-cu12", "nvidia-cusolver-cu12", "nvidia-cusparse-cu12"] +minimum-jaxlib = ["jaxlib (==0.4.11)"] +tpu = ["jaxlib (==0.4.13)", "libtpu-nightly (==0.1.dev20230622)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "joblib" +version = "1.4.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "joblib" +version = "1.5.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241"}, + {file = "joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55"}, +] + +[[package]] +name = "keras" +version = "2.12.0" +description = "Deep learning for humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "keras-2.12.0-py2.py3-none-any.whl", hash = "sha256:35c39534011e909645fb93515452e98e1a0ce23727b55d4918b9c58b2308c15e"}, +] + +[[package]] +name = "keras-data-format-converter" +version = "0.1.22" +description = "Generates equal keras models with the desired data format" +optional = false +python-versions = "<3.11,>=3.8" +groups = ["main"] +files = [ + {file = "keras_data_format_converter-0.1.22-py3-none-any.whl", hash = "sha256:193477345cec20c70f22f76be49a4cb54be16512c1e524398293628ea92900c4"}, + {file = "keras_data_format_converter-0.1.22.tar.gz", hash = "sha256:c65746aa17a7bd87b331d44a4171d943b8a70933e551cfbe4468d40968e35298"}, +] + +[package.dependencies] +libclang = "14.0.1" +protobuf = ">=3.19.6,<=3.20.3" +tensorflow = {version = ">=2.11.0,<3.0.0", markers = "platform_machine == \"x86_64\""} +tensorflow-addons = {version = ">=0.19.0,<0.20.0", markers = "platform_machine == \"x86_64\""} +tensorflow-io-gcs-filesystem = "0.34.0" +tensorflow-macos = {version = ">=2.11.0,<3.0.0", markers = "platform_machine == \"arm64\""} + +[[package]] +name = "lazy-loader" +version = "0.4" +description = "Makes it easy to load subpackages and functions on demand." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"}, + {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +dev = ["changelist (==0.5)"] +lint = ["pre-commit (==3.7.0)"] +test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] + +[[package]] +name = "libclang" +version = "14.0.1" +description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "libclang-14.0.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:a00c5f433af032979ac0cf03bcba59cf5247cb01fa04ef2380bf9668e84d50a9"}, + {file = "libclang-14.0.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:52634f51635e8fc710febde1d7c59d3756b14531bd9ab60df54397ccc08cc4a8"}, + {file = "libclang-14.0.1-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:02bacd219959601c627872f2c7c7090ce57cf6bd497618388e41813c7ee75a3a"}, + {file = "libclang-14.0.1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d44b8e4b063ea4c7e78c925f083c05ab14440d63ed1bad13d4ca62d2908d277"}, + {file = "libclang-14.0.1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:7c7b8c7c82c0cdc088052c6b7b2be4a45b6b06f5f856e7e7058e598f05c09910"}, + {file = "libclang-14.0.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:58b9679868b2d6b5172ded26026c2f71306c4cabd6d15b93b597446fd677eb98"}, + {file = "libclang-14.0.1-py2.py3-none-win_amd64.whl", hash = "sha256:1a4f0d5959c801c975950926cffb9b45521c890d7c4b730d8a1f688d75b25de9"}, + {file = "libclang-14.0.1-py2.py3-none-win_arm64.whl", hash = "sha256:7c344b16d32e80c06cd7d42bfad0ef3ffeadc96fd77b6674dd66d97bf23889ea"}, + {file = "libclang-14.0.1.tar.gz", hash = "sha256:332e539201b46cd4676bee992bbf4b3e50450fc17f71ff33d4afc9da09cf46cb"}, +] + +[[package]] +name = "librosa" +version = "0.10.2.post1" +description = "Python module for audio and music processing" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "librosa-0.10.2.post1-py3-none-any.whl", hash = "sha256:dc882750e8b577a63039f25661b7e39ec4cfbacc99c1cffba666cd664fb0a7a0"}, + {file = "librosa-0.10.2.post1.tar.gz", hash = "sha256:cd99f16717cbcd1e0983e37308d1db46a6f7dfc2e396e5a9e61e6821e44bd2e7"}, +] + +[package.dependencies] +audioread = ">=2.1.9" +decorator = ">=4.3.0" +joblib = ">=0.14" +lazy-loader = ">=0.1" +msgpack = ">=1.0" +numba = ">=0.51.0" +numpy = ">=1.20.3,<1.22.0 || >1.22.0,<1.22.1 || >1.22.1,<1.22.2 || >1.22.2" +pooch = ">=1.1" +scikit-learn = ">=0.20.0" +scipy = ">=1.2.0" +soundfile = ">=0.12.1" +soxr = ">=0.3.2" +typing-extensions = ">=4.1.1" + +[package.extras] +display = ["matplotlib (>=3.5.0)"] +docs = ["ipython (>=7.0)", "matplotlib (>=3.5.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (>=1.2.0)", "sphinxcontrib-svg2pdfconverter"] +tests = ["matplotlib (>=3.5.0)", "packaging (>=20.0)", "pytest", "pytest-cov", "pytest-mpl", "resampy (>=0.2.2)", "samplerate", "types-decorator"] + +[[package]] +name = "llvmlite" +version = "0.41.1" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "llvmlite-0.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1e1029d47ee66d3a0c4d6088641882f75b93db82bd0e6178f7bd744ebce42b9"}, + {file = "llvmlite-0.41.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:150d0bc275a8ac664a705135e639178883293cf08c1a38de3bbaa2f693a0a867"}, + {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eee5cf17ec2b4198b509272cf300ee6577229d237c98cc6e63861b08463ddc6"}, + {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd0338da625346538f1173a17cabf21d1e315cf387ca21b294ff209d176e244"}, + {file = "llvmlite-0.41.1-cp310-cp310-win32.whl", hash = "sha256:fa1469901a2e100c17eb8fe2678e34bd4255a3576d1a543421356e9c14d6e2ae"}, + {file = "llvmlite-0.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b76acee82ea0e9304be6be9d4b3840208d050ea0dcad75b1635fa06e949a0ae"}, + {file = "llvmlite-0.41.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:210e458723436b2469d61b54b453474e09e12a94453c97ea3fbb0742ba5a83d8"}, + {file = "llvmlite-0.41.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:855f280e781d49e0640aef4c4af586831ade8f1a6c4df483fb901cbe1a48d127"}, + {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67340c62c93a11fae482910dc29163a50dff3dfa88bc874872d28ee604a83be"}, + {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2181bb63ef3c607e6403813421b46982c3ac6bfc1f11fa16a13eaafb46f578e6"}, + {file = "llvmlite-0.41.1-cp311-cp311-win_amd64.whl", hash = "sha256:9564c19b31a0434f01d2025b06b44c7ed422f51e719ab5d24ff03b7560066c9a"}, + {file = "llvmlite-0.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5940bc901fb0325970415dbede82c0b7f3e35c2d5fd1d5e0047134c2c46b3281"}, + {file = "llvmlite-0.41.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0a9a47c28f67a269bb62f6256e63cef28d3c5f13cbae4fab587c3ad506778b"}, + {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8afdfa6da33f0b4226af8e64cfc2b28986e005528fbf944d0a24a72acfc9432"}, + {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8454c1133ef701e8c050a59edd85d238ee18bb9a0eb95faf2fca8b909ee3c89a"}, + {file = "llvmlite-0.41.1-cp38-cp38-win32.whl", hash = "sha256:2d92c51e6e9394d503033ffe3292f5bef1566ab73029ec853861f60ad5c925d0"}, + {file = "llvmlite-0.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:df75594e5a4702b032684d5481db3af990b69c249ccb1d32687b8501f0689432"}, + {file = "llvmlite-0.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04725975e5b2af416d685ea0769f4ecc33f97be541e301054c9f741003085802"}, + {file = "llvmlite-0.41.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bf14aa0eb22b58c231243dccf7e7f42f7beec48970f2549b3a6acc737d1a4ba4"}, + {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c32356f669e036eb01016e883b22add883c60739bc1ebee3a1cc0249a50828"}, + {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24091a6b31242bcdd56ae2dbea40007f462260bc9bdf947953acc39dffd54f8f"}, + {file = "llvmlite-0.41.1-cp39-cp39-win32.whl", hash = "sha256:880cb57ca49e862e1cd077104375b9d1dfdc0622596dfa22105f470d7bacb309"}, + {file = "llvmlite-0.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:92f093986ab92e71c9ffe334c002f96defc7986efda18397d0f08534f3ebdc4d"}, + {file = "llvmlite-0.41.1.tar.gz", hash = "sha256:f19f767a018e6ec89608e1f6b13348fa2fcde657151137cb64e56d48598a92db"}, +] + +[[package]] +name = "llvmlite" +version = "0.45.1" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "llvmlite-0.45.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:1b1af0c910af0978aa55fa4f60bbb3e9f39b41e97c2a6d94d199897be62ba07a"}, + {file = "llvmlite-0.45.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02a164db2d79088bbd6e0d9633b4fe4021d6379d7e4ac7cc85ed5f44b06a30c5"}, + {file = "llvmlite-0.45.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f2d47f34e4029e6df3395de34cc1c66440a8d72712993a6e6168db228686711b"}, + {file = "llvmlite-0.45.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f7319e5f9f90720578a7f56fbc805bdfb4bc071b507c7611f170d631c3c0f1e0"}, + {file = "llvmlite-0.45.1-cp310-cp310-win_amd64.whl", hash = "sha256:4edb62e685867799e336723cb9787ec6598d51d0b1ed9af0f38e692aa757e898"}, + {file = "llvmlite-0.45.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:60f92868d5d3af30b4239b50e1717cb4e4e54f6ac1c361a27903b318d0f07f42"}, + {file = "llvmlite-0.45.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98baab513e19beb210f1ef39066288784839a44cd504e24fff5d17f1b3cf0860"}, + {file = "llvmlite-0.45.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3adc2355694d6a6fbcc024d59bb756677e7de506037c878022d7b877e7613a36"}, + {file = "llvmlite-0.45.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2f3377a6db40f563058c9515dedcc8a3e562d8693a106a28f2ddccf2c8fcf6ca"}, + {file = "llvmlite-0.45.1-cp311-cp311-win_amd64.whl", hash = "sha256:f9c272682d91e0d57f2a76c6d9ebdfccc603a01828cdbe3d15273bdca0c3363a"}, + {file = "llvmlite-0.45.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:28e763aba92fe9c72296911e040231d486447c01d4f90027c8e893d89d49b20e"}, + {file = "llvmlite-0.45.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1a53f4b74ee9fd30cb3d27d904dadece67a7575198bd80e687ee76474620735f"}, + {file = "llvmlite-0.45.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b3796b1b1e1c14dcae34285d2f4ea488402fbd2c400ccf7137603ca3800864f"}, + {file = "llvmlite-0.45.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:779e2f2ceefef0f4368548685f0b4adde34e5f4b457e90391f570a10b348d433"}, + {file = "llvmlite-0.45.1-cp312-cp312-win_amd64.whl", hash = "sha256:9e6c9949baf25d9aa9cd7cf0f6d011b9ca660dd17f5ba2b23bdbdb77cc86b116"}, + {file = "llvmlite-0.45.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:d9ea9e6f17569a4253515cc01dade70aba536476e3d750b2e18d81d7e670eb15"}, + {file = "llvmlite-0.45.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:c9f3cadee1630ce4ac18ea38adebf2a4f57a89bd2740ce83746876797f6e0bfb"}, + {file = "llvmlite-0.45.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:57c48bf2e1083eedbc9406fb83c4e6483017879714916fe8be8a72a9672c995a"}, + {file = "llvmlite-0.45.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3aa3dfceda4219ae39cf18806c60eeb518c1680ff834b8b311bd784160b9ce40"}, + {file = "llvmlite-0.45.1-cp313-cp313-win_amd64.whl", hash = "sha256:080e6f8d0778a8239cd47686d402cb66eb165e421efa9391366a9b7e5810a38b"}, + {file = "llvmlite-0.45.1.tar.gz", hash = "sha256:09430bb9d0bb58fc45a45a57c7eae912850bedc095cd0810a57de109c69e1c32"}, +] + +[[package]] +name = "markdown" +version = "3.7" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "Markdown-3.7-py3-none-any.whl", hash = "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803"}, + {file = "markdown-3.7.tar.gz", hash = "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "ml-dtypes" +version = "0.2.0" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "ml_dtypes-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df6a76e1c8adf484feb138ed323f9f40a7b6c21788f120f7c78bec20ac37ee81"}, + {file = "ml_dtypes-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc29a0524ef5e23a7fbb8d881bdecabeb3fc1d19d9db61785d077a86cb94fab2"}, + {file = "ml_dtypes-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08c391c2794f2aad358e6f4c70785a9a7b1df980ef4c232b3ccd4f6fe39f719"}, + {file = "ml_dtypes-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:75015818a7fccf99a5e8ed18720cb430f3e71a8838388840f4cdf225c036c983"}, + {file = "ml_dtypes-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e70047ec2c83eaee01afdfdabee2c5b0c133804d90d0f7db4dd903360fcc537c"}, + {file = "ml_dtypes-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36d28b8861a8931695e5a31176cad5ae85f6504906650dea5598fbec06c94606"}, + {file = "ml_dtypes-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e85ba8e24cf48d456e564688e981cf379d4c8e644db0a2f719b78de281bac2ca"}, + {file = "ml_dtypes-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:832a019a1b6db5c4422032ca9940a990fa104eee420f643713241b3a518977fa"}, + {file = "ml_dtypes-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8faaf0897942c8253dd126662776ba45f0a5861968cf0f06d6d465f8a7bc298a"}, + {file = "ml_dtypes-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b984cddbe8173b545a0e3334fe56ea1a5c3eb67c507f60d0cfde1d3fa8f8c2"}, + {file = "ml_dtypes-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:022d5a4ee6be14569c2a9d1549e16f1ec87ca949681d0dca59995445d5fcdd5b"}, + {file = "ml_dtypes-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:50845af3e9a601810751b55091dee6c2562403fa1cb4e0123675cf3a4fc2c17a"}, + {file = "ml_dtypes-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f00c71c8c63e03aff313bc6a7aeaac9a4f1483a921a6ffefa6d4404efd1af3d0"}, + {file = "ml_dtypes-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80d304c836d73f10605c58ccf7789c171cc229bfb678748adfb7cea2510dfd0e"}, + {file = "ml_dtypes-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32107e7fa9f62db9a5281de923861325211dfff87bd23faefb27b303314635ab"}, + {file = "ml_dtypes-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:1749b60348da71fd3c2ab303fdbc1965958dc50775ead41f5669c932a341cafd"}, + {file = "ml_dtypes-0.2.0.tar.gz", hash = "sha256:6488eb642acaaf08d8020f6de0a38acee7ac324c1e6e92ee0c0fea42422cb797"}, +] + +[package.dependencies] +numpy = [ + {version = ">1.20"}, + {version = ">=1.21.2", markers = "python_version > \"3.9\""}, +] + +[package.extras] +dev = ["absl-py", "pyink", "pylint (>=2.6.0)", "pytest", "pytest-xdist"] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "msgpack" +version = "1.1.1" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "msgpack-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:353b6fc0c36fde68b661a12949d7d49f8f51ff5fa019c1e47c87c4ff34b080ed"}, + {file = "msgpack-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:79c408fcf76a958491b4e3b103d1c417044544b68e96d06432a189b43d1215c8"}, + {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78426096939c2c7482bf31ef15ca219a9e24460289c00dd0b94411040bb73ad2"}, + {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b17ba27727a36cb73aabacaa44b13090feb88a01d012c0f4be70c00f75048b4"}, + {file = "msgpack-1.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a17ac1ea6ec3c7687d70201cfda3b1e8061466f28f686c24f627cae4ea8efd0"}, + {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:88d1e966c9235c1d4e2afac21ca83933ba59537e2e2727a999bf3f515ca2af26"}, + {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f6d58656842e1b2ddbe07f43f56b10a60f2ba5826164910968f5933e5178af75"}, + {file = "msgpack-1.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96decdfc4adcbc087f5ea7ebdcfd3dee9a13358cae6e81d54be962efc38f6338"}, + {file = "msgpack-1.1.1-cp310-cp310-win32.whl", hash = "sha256:6640fd979ca9a212e4bcdf6eb74051ade2c690b862b679bfcb60ae46e6dc4bfd"}, + {file = "msgpack-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:8b65b53204fe1bd037c40c4148d00ef918eb2108d24c9aaa20bc31f9810ce0a8"}, + {file = "msgpack-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:71ef05c1726884e44f8b1d1773604ab5d4d17729d8491403a705e649116c9558"}, + {file = "msgpack-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:36043272c6aede309d29d56851f8841ba907a1a3d04435e43e8a19928e243c1d"}, + {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a32747b1b39c3ac27d0670122b57e6e57f28eefb725e0b625618d1b59bf9d1e0"}, + {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a8b10fdb84a43e50d38057b06901ec9da52baac6983d3f709d8507f3889d43f"}, + {file = "msgpack-1.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0c325c3f485dc54ec298d8b024e134acf07c10d494ffa24373bea729acf704"}, + {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:88daaf7d146e48ec71212ce21109b66e06a98e5e44dca47d853cbfe171d6c8d2"}, + {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8b55ea20dc59b181d3f47103f113e6f28a5e1c89fd5b67b9140edb442ab67f2"}, + {file = "msgpack-1.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4a28e8072ae9779f20427af07f53bbb8b4aa81151054e882aee333b158da8752"}, + {file = "msgpack-1.1.1-cp311-cp311-win32.whl", hash = "sha256:7da8831f9a0fdb526621ba09a281fadc58ea12701bc709e7b8cbc362feabc295"}, + {file = "msgpack-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fd1b58e1431008a57247d6e7cc4faa41c3607e8e7d4aaf81f7c29ea013cb458"}, + {file = "msgpack-1.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ae497b11f4c21558d95de9f64fff7053544f4d1a17731c866143ed6bb4591238"}, + {file = "msgpack-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:33be9ab121df9b6b461ff91baac6f2731f83d9b27ed948c5b9d1978ae28bf157"}, + {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f64ae8fe7ffba251fecb8408540c34ee9df1c26674c50c4544d72dbf792e5ce"}, + {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a494554874691720ba5891c9b0b39474ba43ffb1aaf32a5dac874effb1619e1a"}, + {file = "msgpack-1.1.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb643284ab0ed26f6957d969fe0dd8bb17beb567beb8998140b5e38a90974f6c"}, + {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d275a9e3c81b1093c060c3837e580c37f47c51eca031f7b5fb76f7b8470f5f9b"}, + {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fd6b577e4541676e0cc9ddc1709d25014d3ad9a66caa19962c4f5de30fc09ef"}, + {file = "msgpack-1.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb29aaa613c0a1c40d1af111abf025f1732cab333f96f285d6a93b934738a68a"}, + {file = "msgpack-1.1.1-cp312-cp312-win32.whl", hash = "sha256:870b9a626280c86cff9c576ec0d9cbcc54a1e5ebda9cd26dab12baf41fee218c"}, + {file = "msgpack-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:5692095123007180dca3e788bb4c399cc26626da51629a31d40207cb262e67f4"}, + {file = "msgpack-1.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3765afa6bd4832fc11c3749be4ba4b69a0e8d7b728f78e68120a157a4c5d41f0"}, + {file = "msgpack-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8ddb2bcfd1a8b9e431c8d6f4f7db0773084e107730ecf3472f1dfe9ad583f3d9"}, + {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:196a736f0526a03653d829d7d4c5500a97eea3648aebfd4b6743875f28aa2af8"}, + {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d592d06e3cc2f537ceeeb23d38799c6ad83255289bb84c2e5792e5a8dea268a"}, + {file = "msgpack-1.1.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4df2311b0ce24f06ba253fda361f938dfecd7b961576f9be3f3fbd60e87130ac"}, + {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4141c5a32b5e37905b5940aacbc59739f036930367d7acce7a64e4dec1f5e0b"}, + {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b1ce7f41670c5a69e1389420436f41385b1aa2504c3b0c30620764b15dded2e7"}, + {file = "msgpack-1.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4147151acabb9caed4e474c3344181e91ff7a388b888f1e19ea04f7e73dc7ad5"}, + {file = "msgpack-1.1.1-cp313-cp313-win32.whl", hash = "sha256:500e85823a27d6d9bba1d057c871b4210c1dd6fb01fbb764e37e4e8847376323"}, + {file = "msgpack-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:6d489fba546295983abd142812bda76b57e33d0b9f5d5b71c09a583285506f69"}, + {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bba1be28247e68994355e028dcd668316db30c1f758d3241a7b903ac78dcd285"}, + {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8f93dcddb243159c9e4109c9750ba5b335ab8d48d9522c5308cd05d7e3ce600"}, + {file = "msgpack-1.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fbbc0b906a24038c9958a1ba7ae0918ad35b06cb449d398b76a7d08470b0ed9"}, + {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:61e35a55a546a1690d9d09effaa436c25ae6130573b6ee9829c37ef0f18d5e78"}, + {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:1abfc6e949b352dadf4bce0eb78023212ec5ac42f6abfd469ce91d783c149c2a"}, + {file = "msgpack-1.1.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:996f2609ddf0142daba4cefd767d6db26958aac8439ee41db9cc0db9f4c4c3a6"}, + {file = "msgpack-1.1.1-cp38-cp38-win32.whl", hash = "sha256:4d3237b224b930d58e9d83c81c0dba7aacc20fcc2f89c1e5423aa0529a4cd142"}, + {file = "msgpack-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:da8f41e602574ece93dbbda1fab24650d6bf2a24089f9e9dbb4f5730ec1e58ad"}, + {file = "msgpack-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5be6b6bc52fad84d010cb45433720327ce886009d862f46b26d4d154001994b"}, + {file = "msgpack-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3a89cd8c087ea67e64844287ea52888239cbd2940884eafd2dcd25754fb72232"}, + {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d75f3807a9900a7d575d8d6674a3a47e9f227e8716256f35bc6f03fc597ffbf"}, + {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d182dac0221eb8faef2e6f44701812b467c02674a322c739355c39e94730cdbf"}, + {file = "msgpack-1.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b13fe0fb4aac1aa5320cd693b297fe6fdef0e7bea5518cbc2dd5299f873ae90"}, + {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:435807eeb1bc791ceb3247d13c79868deb22184e1fc4224808750f0d7d1affc1"}, + {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4835d17af722609a45e16037bb1d4d78b7bdf19d6c0128116d178956618c4e88"}, + {file = "msgpack-1.1.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a8ef6e342c137888ebbfb233e02b8fbd689bb5b5fcc59b34711ac47ebd504478"}, + {file = "msgpack-1.1.1-cp39-cp39-win32.whl", hash = "sha256:61abccf9de335d9efd149e2fff97ed5974f2481b3353772e8e2dd3402ba2bd57"}, + {file = "msgpack-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:40eae974c873b2992fd36424a5d9407f93e97656d999f43fca9d29f820899084"}, + {file = "msgpack-1.1.1.tar.gz", hash = "sha256:77b79ce34a2bdab2594f490c8e80dd62a02d650b91a75159a63ec413b8d104cd"}, +] + +[[package]] +name = "msgpack" +version = "1.1.2" +description = "MessagePack serializer" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2"}, + {file = "msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251"}, + {file = "msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f"}, + {file = "msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f"}, + {file = "msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9"}, + {file = "msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c"}, + {file = "msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296"}, + {file = "msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c"}, + {file = "msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e"}, + {file = "msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e"}, + {file = "msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68"}, + {file = "msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa"}, + {file = "msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f"}, + {file = "msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9"}, + {file = "msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620"}, + {file = "msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029"}, + {file = "msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b"}, + {file = "msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf"}, + {file = "msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999"}, + {file = "msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162"}, + {file = "msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794"}, + {file = "msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c"}, + {file = "msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9"}, + {file = "msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00"}, + {file = "msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e"}, + {file = "msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014"}, + {file = "msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2"}, + {file = "msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717"}, + {file = "msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b"}, + {file = "msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a"}, + {file = "msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245"}, + {file = "msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20"}, + {file = "msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27"}, + {file = "msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff"}, + {file = "msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ea5405c46e690122a76531ab97a079e184c0daf491e588592d6a23d3e32af99e"}, + {file = "msgpack-1.1.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fba231af7a933400238cb357ecccf8ab5d51535ea95d94fc35b7806218ff844"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a8f6e7d30253714751aa0b0c84ae28948e852ee7fb0524082e6716769124bc23"}, + {file = "msgpack-1.1.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:94fd7dc7d8cb0a54432f296f2246bc39474e017204ca6f4ff345941d4ed285a7"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:350ad5353a467d9e3b126d8d1b90fe05ad081e2e1cef5753f8c345217c37e7b8"}, + {file = "msgpack-1.1.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6bde749afe671dc44893f8d08e83bf475a1a14570d67c4bb5cec5573463c8833"}, + {file = "msgpack-1.1.2-cp39-cp39-win32.whl", hash = "sha256:ad09b984828d6b7bb52d1d1d0c9be68ad781fa004ca39216c8a1e63c0f34ba3c"}, + {file = "msgpack-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:67016ae8c8965124fdede9d3769528ad8284f14d635337ffa6a713a580f6c030"}, + {file = "msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e"}, +] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "multidict" +version = "6.7.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "multidict-6.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9f474ad5acda359c8758c8accc22032c6abe6dc87a8be2440d097785e27a9349"}, + {file = "multidict-6.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b7a9db5a870f780220e931d0002bbfd88fb53aceb6293251e2c839415c1b20e"}, + {file = "multidict-6.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03ca744319864e92721195fa28c7a3b2bc7b686246b35e4078c1e4d0eb5466d3"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f0e77e3c0008bc9316e662624535b88d360c3a5d3f81e15cf12c139a75250046"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08325c9e5367aa379a3496aa9a022fe8837ff22e00b94db256d3a1378c76ab32"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e2862408c99f84aa571ab462d25236ef9cb12a602ea959ba9c9009a54902fc73"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4d72a9a2d885f5c208b0cb91ff2ed43636bb7e345ec839ff64708e04f69a13cc"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:478cc36476687bac1514d651cbbaa94b86b0732fb6855c60c673794c7dd2da62"}, + {file = "multidict-6.7.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6843b28b0364dc605f21481c90fadb5f60d9123b442eb8a726bb74feef588a84"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23bfeee5316266e5ee2d625df2d2c602b829435fc3a235c2ba2131495706e4a0"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:680878b9f3d45c31e1f730eef731f9b0bc1da456155688c6745ee84eb818e90e"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:eb866162ef2f45063acc7a53a88ef6fe8bf121d45c30ea3c9cd87ce7e191a8d4"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:df0e3bf7993bdbeca5ac25aa859cf40d39019e015c9c91809ba7093967f7a648"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:661709cdcd919a2ece2234f9bae7174e5220c80b034585d7d8a755632d3e2111"}, + {file = "multidict-6.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:096f52730c3fb8ed419db2d44391932b63891b2c5ed14850a7e215c0ba9ade36"}, + {file = "multidict-6.7.0-cp310-cp310-win32.whl", hash = "sha256:afa8a2978ec65d2336305550535c9c4ff50ee527914328c8677b3973ade52b85"}, + {file = "multidict-6.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:b15b3afff74f707b9275d5ba6a91ae8f6429c3ffb29bbfd216b0b375a56f13d7"}, + {file = "multidict-6.7.0-cp310-cp310-win_arm64.whl", hash = "sha256:4b73189894398d59131a66ff157837b1fafea9974be486d036bb3d32331fdbf0"}, + {file = "multidict-6.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4d409aa42a94c0b3fa617708ef5276dfe81012ba6753a0370fcc9d0195d0a1fc"}, + {file = "multidict-6.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14c9e076eede3b54c636f8ce1c9c252b5f057c62131211f0ceeec273810c9721"}, + {file = "multidict-6.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c09703000a9d0fa3c3404b27041e574cc7f4df4c6563873246d0e11812a94b6"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a265acbb7bb33a3a2d626afbe756371dce0279e7b17f4f4eda406459c2b5ff1c"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51cb455de290ae462593e5b1cb1118c5c22ea7f0d3620d9940bf695cea5a4bd7"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db99677b4457c7a5c5a949353e125ba72d62b35f74e26da141530fbb012218a7"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f470f68adc395e0183b92a2f4689264d1ea4b40504a24d9882c27375e6662bb9"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0db4956f82723cc1c270de9c6e799b4c341d327762ec78ef82bb962f79cc07d8"}, + {file = "multidict-6.7.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e56d780c238f9e1ae66a22d2adf8d16f485381878250db8d496623cd38b22bd"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d14baca2ee12c1a64740d4531356ba50b82543017f3ad6de0deb943c5979abb"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:295a92a76188917c7f99cda95858c822f9e4aae5824246bba9b6b44004ddd0a6"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39f1719f57adbb767ef592a50ae5ebb794220d1188f9ca93de471336401c34d2"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0a13fb8e748dfc94749f622de065dd5c1def7e0d2216dba72b1d8069a389c6ff"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e3aa16de190d29a0ea1b48253c57d99a68492c8dd8948638073ab9e74dc9410b"}, + {file = "multidict-6.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a048ce45dcdaaf1defb76b2e684f997fb5abf74437b6cb7b22ddad934a964e34"}, + {file = "multidict-6.7.0-cp311-cp311-win32.whl", hash = "sha256:a90af66facec4cebe4181b9e62a68be65e45ac9b52b67de9eec118701856e7ff"}, + {file = "multidict-6.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:95b5ffa4349df2887518bb839409bcf22caa72d82beec453216802f475b23c81"}, + {file = "multidict-6.7.0-cp311-cp311-win_arm64.whl", hash = "sha256:329aa225b085b6f004a4955271a7ba9f1087e39dcb7e65f6284a988264a63912"}, + {file = "multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184"}, + {file = "multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45"}, + {file = "multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1"}, + {file = "multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a"}, + {file = "multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8"}, + {file = "multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4"}, + {file = "multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b"}, + {file = "multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec"}, + {file = "multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6"}, + {file = "multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159"}, + {file = "multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf"}, + {file = "multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd"}, + {file = "multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288"}, + {file = "multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17"}, + {file = "multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390"}, + {file = "multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e"}, + {file = "multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00"}, + {file = "multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb"}, + {file = "multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad"}, + {file = "multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762"}, + {file = "multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6"}, + {file = "multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d"}, + {file = "multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6"}, + {file = "multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792"}, + {file = "multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842"}, + {file = "multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b"}, + {file = "multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1"}, + {file = "multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f"}, + {file = "multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f"}, + {file = "multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885"}, + {file = "multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c"}, + {file = "multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000"}, + {file = "multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63"}, + {file = "multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718"}, + {file = "multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a"}, + {file = "multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9"}, + {file = "multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0"}, + {file = "multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13"}, + {file = "multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd"}, + {file = "multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827"}, + {file = "multidict-6.7.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:363eb68a0a59bd2303216d2346e6c441ba10d36d1f9969fcb6f1ba700de7bb5c"}, + {file = "multidict-6.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d874eb056410ca05fed180b6642e680373688efafc7f077b2a2f61811e873a40"}, + {file = "multidict-6.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b55d5497b51afdfde55925e04a022f1de14d4f4f25cdfd4f5d9b0aa96166851"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f8e5c0031b90ca9ce555e2e8fd5c3b02a25f14989cbc310701823832c99eb687"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9cf41880c991716f3c7cec48e2f19ae4045fc9db5fc9cff27347ada24d710bb5"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8cfc12a8630a29d601f48d47787bd7eb730e475e83edb5d6c5084317463373eb"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3996b50c3237c4aec17459217c1e7bbdead9a22a0fcd3c365564fbd16439dde6"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7f5170993a0dd3ab871c74f45c0a21a4e2c37a2f2b01b5f722a2ad9c6650469e"}, + {file = "multidict-6.7.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ec81878ddf0e98817def1e77d4f50dae5ef5b0e4fe796fae3bd674304172416e"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9281bf5b34f59afbc6b1e477a372e9526b66ca446f4bf62592839c195a718b32"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:68af405971779d8b37198726f2b6fe3955db846fee42db7a4286fc542203934c"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ba3ef510467abb0667421a286dc906e30eb08569365f5cdb131d7aff7c2dd84"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b61189b29081a20c7e4e0b49b44d5d44bb0dc92be3c6d06a11cc043f81bf9329"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fb287618b9c7aa3bf8d825f02d9201b2f13078a5ed3b293c8f4d953917d84d5e"}, + {file = "multidict-6.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:521f33e377ff64b96c4c556b81c55d0cfffb96a11c194fd0c3f1e56f3d8dd5a4"}, + {file = "multidict-6.7.0-cp39-cp39-win32.whl", hash = "sha256:ce8fdc2dca699f8dbf055a61d73eaa10482569ad20ee3c36ef9641f69afa8c91"}, + {file = "multidict-6.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:7e73299c99939f089dd9b2120a04a516b95cdf8c1cd2b18c53ebf0de80b1f18f"}, + {file = "multidict-6.7.0-cp39-cp39-win_arm64.whl", hash = "sha256:6bdce131e14b04fd34a809b6380dbfd826065c3e2fe8a50dbae659fa0c390546"}, + {file = "multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3"}, + {file = "multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "multiprocess" +version = "0.70.16" +description = "better multiprocessing and multithreading in Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-macosx_10_13_x86_64.whl", hash = "sha256:476887be10e2f59ff183c006af746cb6f1fd0eadcfd4ef49e605cbe2659920ee"}, + {file = "multiprocess-0.70.16-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d951bed82c8f73929ac82c61f01a7b5ce8f3e5ef40f5b52553b4f547ce2b08ec"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37b55f71c07e2d741374998c043b9520b626a8dddc8b3129222ca4f1a06ef67a"}, + {file = "multiprocess-0.70.16-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba8c31889abf4511c7308a8c52bb4a30b9d590e7f58523302ba00237702ca054"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-macosx_10_13_x86_64.whl", hash = "sha256:0dfd078c306e08d46d7a8d06fb120313d87aa43af60d66da43ffff40b44d2f41"}, + {file = "multiprocess-0.70.16-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e7b9d0f307cd9bd50851afaac0dba2cb6c44449efff697df7c7645f7d3f2be3a"}, + {file = "multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02"}, + {file = "multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a"}, + {file = "multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e"}, + {file = "multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435"}, + {file = "multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3"}, + {file = "multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1"}, +] + +[package.dependencies] +dill = ">=0.3.8" + +[[package]] +name = "numba" +version = "0.58.1" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "numba-0.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07f2fa7e7144aa6f275f27260e73ce0d808d3c62b30cff8906ad1dec12d87bbe"}, + {file = "numba-0.58.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7bf1ddd4f7b9c2306de0384bf3854cac3edd7b4d8dffae2ec1b925e4c436233f"}, + {file = "numba-0.58.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bc2d904d0319d7a5857bd65062340bed627f5bfe9ae4a495aef342f072880d50"}, + {file = "numba-0.58.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e79b6cc0d2bf064a955934a2e02bf676bc7995ab2db929dbbc62e4c16551be6"}, + {file = "numba-0.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:81fe5b51532478149b5081311b0fd4206959174e660c372b94ed5364cfb37c82"}, + {file = "numba-0.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bcecd3fb9df36554b342140a4d77d938a549be635d64caf8bd9ef6c47a47f8aa"}, + {file = "numba-0.58.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1eaa744f518bbd60e1f7ccddfb8002b3d06bd865b94a5d7eac25028efe0e0ff"}, + {file = "numba-0.58.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bf68df9c307fb0aa81cacd33faccd6e419496fdc621e83f1efce35cdc5e79cac"}, + {file = "numba-0.58.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:55a01e1881120e86d54efdff1be08381886fe9f04fc3006af309c602a72bc44d"}, + {file = "numba-0.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:811305d5dc40ae43c3ace5b192c670c358a89a4d2ae4f86d1665003798ea7a1a"}, + {file = "numba-0.58.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea5bfcf7d641d351c6a80e8e1826eb4a145d619870016eeaf20bbd71ef5caa22"}, + {file = "numba-0.58.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e63d6aacaae1ba4ef3695f1c2122b30fa3d8ba039c8f517784668075856d79e2"}, + {file = "numba-0.58.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6fe7a9d8e3bd996fbe5eac0683227ccef26cba98dae6e5cee2c1894d4b9f16c1"}, + {file = "numba-0.58.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:898af055b03f09d33a587e9425500e5be84fc90cd2f80b3fb71c6a4a17a7e354"}, + {file = "numba-0.58.1-cp38-cp38-win_amd64.whl", hash = "sha256:d3e2fe81fe9a59fcd99cc572002101119059d64d31eb6324995ee8b0f144a306"}, + {file = "numba-0.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c765aef472a9406a97ea9782116335ad4f9ef5c9f93fc05fd44aab0db486954"}, + {file = "numba-0.58.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9356e943617f5e35a74bf56ff6e7cc83e6b1865d5e13cee535d79bf2cae954"}, + {file = "numba-0.58.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:240e7a1ae80eb6b14061dc91263b99dc8d6af9ea45d310751b780888097c1aaa"}, + {file = "numba-0.58.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:45698b995914003f890ad839cfc909eeb9c74921849c712a05405d1a79c50f68"}, + {file = "numba-0.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd3dda77955be03ff366eebbfdb39919ce7c2620d86c906203bed92124989032"}, + {file = "numba-0.58.1.tar.gz", hash = "sha256:487ded0633efccd9ca3a46364b40006dbdaca0f95e99b8b83e778d1195ebcbaa"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} +llvmlite = "==0.41.*" +numpy = ">=1.22,<1.27" + +[[package]] +name = "numba" +version = "0.62.1" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "numba-0.62.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a323df9d36a0da1ca9c592a6baaddd0176d9f417ef49a65bb81951dce69d941a"}, + {file = "numba-0.62.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1e1f4781d3f9f7c23f16eb04e76ca10b5a3516e959634bd226fc48d5d8e7a0a"}, + {file = "numba-0.62.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:14432af305ea68627a084cd702124fd5d0c1f5b8a413b05f4e14757202d1cf6c"}, + {file = "numba-0.62.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f180922adf159ae36c2fe79fb94ffaa74cf5cb3688cb72dba0a904b91e978507"}, + {file = "numba-0.62.1-cp310-cp310-win_amd64.whl", hash = "sha256:f41834909d411b4b8d1c68f745144136f21416547009c1e860cc2098754b4ca7"}, + {file = "numba-0.62.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:f43e24b057714e480fe44bc6031de499e7cf8150c63eb461192caa6cc8530bc8"}, + {file = "numba-0.62.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:57cbddc53b9ee02830b828a8428757f5c218831ccc96490a314ef569d8342b7b"}, + {file = "numba-0.62.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:604059730c637c7885386521bb1b0ddcbc91fd56131a6dcc54163d6f1804c872"}, + {file = "numba-0.62.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6c540880170bee817011757dc9049dba5a29db0c09b4d2349295991fe3ee55f"}, + {file = "numba-0.62.1-cp311-cp311-win_amd64.whl", hash = "sha256:03de6d691d6b6e2b76660ba0f38f37b81ece8b2cc524a62f2a0cfae2bfb6f9da"}, + {file = "numba-0.62.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:1b743b32f8fa5fff22e19c2e906db2f0a340782caf024477b97801b918cf0494"}, + {file = "numba-0.62.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:90fa21b0142bcf08ad8e32a97d25d0b84b1e921bc9423f8dda07d3652860eef6"}, + {file = "numba-0.62.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ef84d0ac19f1bf80431347b6f4ce3c39b7ec13f48f233a48c01e2ec06ecbc59"}, + {file = "numba-0.62.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9315cc5e441300e0ca07c828a627d92a6802bcbf27c5487f31ae73783c58da53"}, + {file = "numba-0.62.1-cp312-cp312-win_amd64.whl", hash = "sha256:44e3aa6228039992f058f5ebfcfd372c83798e9464297bdad8cc79febcf7891e"}, + {file = "numba-0.62.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:b72489ba8411cc9fdcaa2458d8f7677751e94f0109eeb53e5becfdc818c64afb"}, + {file = "numba-0.62.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:44a1412095534a26fb5da2717bc755b57da5f3053965128fe3dc286652cc6a92"}, + {file = "numba-0.62.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8c9460b9e936c5bd2f0570e20a0a5909ee6e8b694fd958b210e3bde3a6dba2d7"}, + {file = "numba-0.62.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:728f91a874192df22d74e3fd42c12900b7ce7190b1aad3574c6c61b08313e4c5"}, + {file = "numba-0.62.1-cp313-cp313-win_amd64.whl", hash = "sha256:bbf3f88b461514287df66bc8d0307e949b09f2b6f67da92265094e8fa1282dd8"}, + {file = "numba-0.62.1.tar.gz", hash = "sha256:7b774242aa890e34c21200a1fc62e5b5757d5286267e71103257f4e2af0d5161"}, +] + +[package.dependencies] +llvmlite = "==0.45.*" +numpy = ">=1.22,<2.4" + +[[package]] +name = "numpy" +version = "1.23.5" +description = "NumPy is the fundamental package for array computing with Python." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"}, + {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"}, + {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"}, + {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"}, + {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"}, + {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"}, + {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"}, + {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"}, + {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"}, + {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, + {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "onnx" +version = "1.13.0" +description = "Open Neural Network Exchange" +optional = false +python-versions = "*" +groups = ["main", "dev"] +files = [ + {file = "onnx-1.13.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4d25aaf5b385937eb3d0846616ff8fdff65b4f5d2f55c82ffe0c6deb021f4714"}, + {file = "onnx-1.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ea9ce14eb7bc4ff3e236d44433ed6b9a5cdb2921d357d28f74268b43d04897c0"}, + {file = "onnx-1.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c77074bb9c12bef5c0054c972bba0280de6df0712dfccfdd224b0ee3f0b56cab"}, + {file = "onnx-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1f4f217942f97aad8bd3b707259338045ed5804b3a0dfa453d9f108f53a9ce2"}, + {file = "onnx-1.13.0-cp310-cp310-win32.whl", hash = "sha256:41953d6a9c1fc1c1023438c8aac8ed2ee29a1739fbfce68f8ece38f34d326d02"}, + {file = "onnx-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:9b2691ad78ddcac2244b4299e5f5132895e12f99dbd1cfb310d10cdb50cd2d90"}, + {file = "onnx-1.13.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:c1eeec79e3cc35b808df4616f9ddef769fb4e5912e0eaacbddfa3a60d93d36c0"}, + {file = "onnx-1.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8008d9dd4479445584a5e08cce5b8319777a145fdf397c1791a59efa347bb732"}, + {file = "onnx-1.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bad429298470efa5da5589f518036a125ca8864a766ad706278531c05c723f48"}, + {file = "onnx-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8faef7ee6e2012ad1ff13ffe248809218ac4ea7040bd62a7e0740978feffe926"}, + {file = "onnx-1.13.0-cp311-cp311-win32.whl", hash = "sha256:1d443d3b895dda42355ea6281eff638cc53a1a0b12244421d9fc09046444ba94"}, + {file = "onnx-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:88199c7eecba6c4f0b822147cf40184e7de94bcf915b3cbd0e2728966f7305c1"}, + {file = "onnx-1.13.0-cp37-cp37m-macosx_10_12_universal2.whl", hash = "sha256:99fb9e674e1244fed09afd2a3106485d803357beaea7c0ae2f5956bde8319c54"}, + {file = "onnx-1.13.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:6113b4491cf27c408272397405c2f0daf8751515a91ea86f03db05f953433be9"}, + {file = "onnx-1.13.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b9ff692dff87dc180045c055502ab0d91d042f1cd3058d94de4141cd4445286"}, + {file = "onnx-1.13.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:603587b438bbf685860ead13a83b771cd7a6a0565f745d825260c1ad6904674a"}, + {file = "onnx-1.13.0-cp37-cp37m-win32.whl", hash = "sha256:2b8429926423e83aba724ba8586ef1611d7bf7f4f9f2cc4312856da86fd9c5ba"}, + {file = "onnx-1.13.0-cp37-cp37m-win_amd64.whl", hash = "sha256:098176b93b19323639831561618da91aa472745cd518527a540152d7f9b8f7d3"}, + {file = "onnx-1.13.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:4bac7453fa5fa969352778f759896c43e3b8337ae532cd6dda36758b9dc656d7"}, + {file = "onnx-1.13.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:2e8ece71cb00c1e8f5fec93f306a071b8d2929fded6b1e6a71cab710de2e798d"}, + {file = "onnx-1.13.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd305274b199ad04ea6c185437f736981265f20d7ac2cbf16411d2d35e5e8e68"}, + {file = "onnx-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad8208252f6d61dcd487c2c62f6fbc1eee24bf11addb56c500cf0feb8966f36"}, + {file = "onnx-1.13.0-cp38-cp38-win32.whl", hash = "sha256:cef04e12123fef9f1e44078311fe7e9d9da0713e273ea13147b77d57cfd5eea0"}, + {file = "onnx-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:934a6497b2feb7a5ec31063afea5da1b06131fb2e9979f9bdeeec5c8cf0e03d2"}, + {file = "onnx-1.13.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:43e1ff72dcff4eabf3a75b119156c96603c7c3cbcf93ac7dba0c12687511aa9d"}, + {file = "onnx-1.13.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e27021a056dfecfdc6307d8b50e69a4e32e2137b54bfe215c82580e8dceb7d84"}, + {file = "onnx-1.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5d5e7efbf7d6b81edf358bbfb008d1110ab020de0d52d8c84ffddbf07cedfb2"}, + {file = "onnx-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257db1d357671d81f789f30d4e9358fd6427ae3ebf5fd90e4b0b418e6c8bb295"}, + {file = "onnx-1.13.0-cp39-cp39-win32.whl", hash = "sha256:eb32853d94a61728ba4dd0809740e782896d10178d5593f472d24f51c13c2e5d"}, + {file = "onnx-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:925e9ecc4cca0da65126e5320577b81c0c2ac3c9e0418e77ff21da63d0f3bcdb"}, + {file = "onnx-1.13.0.tar.gz", hash = "sha256:410b39950367857f97b65093681fe2495a2e23d63777a8aceaf96c56a16d166e"}, +] + +[package.dependencies] +numpy = ">=1.16.6" +protobuf = ">=3.20.2,<4" +typing-extensions = ">=3.6.2.1" + +[package.extras] +lint = ["black (>=22.3)", "clang-format (==13.0.0)", "flake8 (>=5.0.2)", "isort[colors] (>=5.10)", "mypy (>=0.971)", "types-protobuf (==3.18.4)"] + +[[package]] +name = "onnxconverter-common" +version = "1.16.0" +description = "ONNX Converter and Optimization Tools" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "onnxconverter_common-1.16.0-py2.py3-none-any.whl", hash = "sha256:df39ee96f17fff119dff10dd245467651b60b9e8a96020eb93402239794852f7"}, +] + +[package.dependencies] +numpy = "*" +onnx = "*" +packaging = "*" +protobuf = ">=3.20.2" + +[package.extras] +dev = ["onnxconverter-common[lint,test]"] +lint = ["pyright", "ruff"] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "onnxruntime" +version = "1.17.3" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "onnxruntime-1.17.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d86dde9c0bb435d709e51bd25991c9fe5b9a5b168df45ce119769edc4d198b15"}, + {file = "onnxruntime-1.17.3-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d87b68bf931ac527b2d3c094ead66bb4381bac4298b65f46c54fe4d1e255865"}, + {file = "onnxruntime-1.17.3-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26e950cf0333cf114a155f9142e71da344d2b08dfe202763a403ae81cc02ebd1"}, + {file = "onnxruntime-1.17.3-cp310-cp310-win32.whl", hash = "sha256:0962a4d0f5acebf62e1f0bf69b6e0adf16649115d8de854c1460e79972324d68"}, + {file = "onnxruntime-1.17.3-cp310-cp310-win_amd64.whl", hash = "sha256:468ccb8a0faa25c681a41787b1594bf4448b0252d3efc8b62fd8b2411754340f"}, + {file = "onnxruntime-1.17.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e8cd90c1c17d13d47b89ab076471e07fb85467c01dcd87a8b8b5cdfbcb40aa51"}, + {file = "onnxruntime-1.17.3-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a058b39801baefe454eeb8acf3ada298c55a06a4896fafc224c02d79e9037f60"}, + {file = "onnxruntime-1.17.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2f823d5eb4807007f3da7b27ca972263df6a1836e6f327384eb266274c53d05d"}, + {file = "onnxruntime-1.17.3-cp311-cp311-win32.whl", hash = "sha256:b66b23f9109e78ff2791628627a26f65cd335dcc5fbd67ff60162733a2f7aded"}, + {file = "onnxruntime-1.17.3-cp311-cp311-win_amd64.whl", hash = "sha256:570760ca53a74cdd751ee49f13de70d1384dcf73d9888b8deac0917023ccda6d"}, + {file = "onnxruntime-1.17.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:77c318178d9c16e9beadd9a4070d8aaa9f57382c3f509b01709f0f010e583b99"}, + {file = "onnxruntime-1.17.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23da8469049b9759082e22c41a444f44a520a9c874b084711b6343672879f50b"}, + {file = "onnxruntime-1.17.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2949730215af3f9289008b2e31e9bbef952012a77035b911c4977edea06f3f9e"}, + {file = "onnxruntime-1.17.3-cp312-cp312-win32.whl", hash = "sha256:6c7555a49008f403fb3b19204671efb94187c5085976ae526cb625f6ede317bc"}, + {file = "onnxruntime-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:58672cf20293a1b8a277a5c6c55383359fcdf6119b2f14df6ce3b140f5001c39"}, + {file = "onnxruntime-1.17.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:4395ba86e3c1e93c794a00619ef1aec597ab78f5a5039f3c6d2e9d0695c0a734"}, + {file = "onnxruntime-1.17.3-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdf354c04344ec38564fc22394e1fe08aa6d70d790df00159205a0055c4a4d3f"}, + {file = "onnxruntime-1.17.3-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a94b600b7af50e922d44b95a57981e3e35103c6e3693241a03d3ca204740bbda"}, + {file = "onnxruntime-1.17.3-cp38-cp38-win32.whl", hash = "sha256:5a335c76f9c002a8586c7f38bc20fe4b3725ced21f8ead835c3e4e507e42b2ab"}, + {file = "onnxruntime-1.17.3-cp38-cp38-win_amd64.whl", hash = "sha256:8f56a86fbd0ddc8f22696ddeda0677b041381f4168a2ca06f712ef6ec6050d6d"}, + {file = "onnxruntime-1.17.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:e0ae39f5452278cd349520c296e7de3e90d62dc5b0157c6868e2748d7f28b871"}, + {file = "onnxruntime-1.17.3-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ff2dc012bd930578aff5232afd2905bf16620815f36783a941aafabf94b3702"}, + {file = "onnxruntime-1.17.3-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf6c37483782e4785019b56e26224a25e9b9a35b849d0169ce69189867a22bb1"}, + {file = "onnxruntime-1.17.3-cp39-cp39-win32.whl", hash = "sha256:351bf5a1140dcc43bfb8d3d1a230928ee61fcd54b0ea664c8e9a889a8e3aa515"}, + {file = "onnxruntime-1.17.3-cp39-cp39-win_amd64.whl", hash = "sha256:57a3de15778da8d6cc43fbf6cf038e1e746146300b5f0b1fbf01f6f795dc6440"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.21.6" +packaging = "*" +protobuf = "*" +sympy = "*" + +[[package]] +name = "onnxruntime-tools" +version = "1.7.0" +description = "Transformers Model Optimization Tool of ONNXRuntime" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "onnxruntime_tools-1.7.0-py3-none-any.whl", hash = "sha256:1dff888b5c482ac5bc627f12e108445fefcb3d600c43f63633975316fe617ad8"}, + {file = "onnxruntime_tools-1.7.0.tar.gz", hash = "sha256:6dbdcee49424e066bcd10357c37d51bc422ae26494e3c2f0c1970d534f967f6d"}, +] + +[package.dependencies] +coloredlogs = "*" +numpy = "*" +onnx = "*" +packaging = "*" +psutil = "*" +py-cpuinfo = "*" +py3nvml = "*" + +[[package]] +name = "opt-einsum" +version = "3.4.0" +description = "Path optimization of einsum functions." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "opt_einsum-3.4.0-py3-none-any.whl", hash = "sha256:69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd"}, + {file = "opt_einsum-3.4.0.tar.gz", hash = "sha256:96ca72f1b886d148241348783498194c577fa30a8faac108586b14f1ba4473ac"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "pandas" +version = "2.3.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "pandas-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:376c6446ae31770764215a6c937f72d917f214b43560603cd60da6408f183b6c"}, + {file = "pandas-2.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e19d192383eab2f4ceb30b412b22ea30690c9e618f78870357ae1d682912015a"}, + {file = "pandas-2.3.3-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5caf26f64126b6c7aec964f74266f435afef1c1b13da3b0636c7518a1fa3e2b1"}, + {file = "pandas-2.3.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd7478f1463441ae4ca7308a70e90b33470fa593429f9d4c578dd00d1fa78838"}, + {file = "pandas-2.3.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4793891684806ae50d1288c9bae9330293ab4e083ccd1c5e383c34549c6e4250"}, + {file = "pandas-2.3.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:28083c648d9a99a5dd035ec125d42439c6c1c525098c58af0fc38dd1a7a1b3d4"}, + {file = "pandas-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:503cf027cf9940d2ceaa1a93cfb5f8c8c7e6e90720a2850378f0b3f3b1e06826"}, + {file = "pandas-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:602b8615ebcc4a0c1751e71840428ddebeb142ec02c786e8ad6b1ce3c8dec523"}, + {file = "pandas-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fe25fc7b623b0ef6b5009149627e34d2a4657e880948ec3c840e9402e5c1b45"}, + {file = "pandas-2.3.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b468d3dad6ff947df92dcb32ede5b7bd41a9b3cceef0a30ed925f6d01fb8fa66"}, + {file = "pandas-2.3.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b98560e98cb334799c0b07ca7967ac361a47326e9b4e5a7dfb5ab2b1c9d35a1b"}, + {file = "pandas-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37b5848ba49824e5c30bedb9c830ab9b7751fd049bc7914533e01c65f79791"}, + {file = "pandas-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db4301b2d1f926ae677a751eb2bd0e8c5f5319c9cb3f88b0becbbb0b07b34151"}, + {file = "pandas-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f086f6fe114e19d92014a1966f43a3e62285109afe874f067f5abbdcbb10e59c"}, + {file = "pandas-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d21f6d74eb1725c2efaa71a2bfc661a0689579b58e9c0ca58a739ff0b002b53"}, + {file = "pandas-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3fd2f887589c7aa868e02632612ba39acb0b8948faf5cc58f0850e165bd46f35"}, + {file = "pandas-2.3.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecaf1e12bdc03c86ad4a7ea848d66c685cb6851d807a26aa245ca3d2017a1908"}, + {file = "pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89"}, + {file = "pandas-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a68e15f780eddf2b07d242e17a04aa187a7ee12b40b930bfdd78070556550e98"}, + {file = "pandas-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:371a4ab48e950033bcf52b6527eccb564f52dc826c02afd9a1bc0ab731bba084"}, + {file = "pandas-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b"}, + {file = "pandas-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56851a737e3470de7fa88e6131f41281ed440d29a9268dcbf0002da5ac366713"}, + {file = "pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8"}, + {file = "pandas-2.3.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e32e7cc9af0f1cc15548288a51a3b681cc2a219faa838e995f7dc53dbab1062d"}, + {file = "pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac"}, + {file = "pandas-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e0a175408804d566144e170d0476b15d78458795bb18f1304fb94160cabf40c"}, + {file = "pandas-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2d9ab0fc11822b5eece72ec9587e172f63cff87c00b062f6e37448ced4493"}, + {file = "pandas-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee"}, + {file = "pandas-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:75ea25f9529fdec2d2e93a42c523962261e567d250b0013b16210e1d40d7c2e5"}, + {file = "pandas-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74ecdf1d301e812db96a465a525952f4dde225fdb6d8e5a521d47e1f42041e21"}, + {file = "pandas-2.3.3-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6435cb949cb34ec11cc9860246ccb2fdc9ecd742c12d3304989017d53f039a78"}, + {file = "pandas-2.3.3-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:900f47d8f20860de523a1ac881c4c36d65efcb2eb850e6948140fa781736e110"}, + {file = "pandas-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a45c765238e2ed7d7c608fc5bc4a6f88b642f2f01e70c0c23d2224dd21829d86"}, + {file = "pandas-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4fc4c21971a1a9f4bdb4c73978c7f7256caa3e62b323f70d6cb80db583350bc"}, + {file = "pandas-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ee15f284898e7b246df8087fc82b87b01686f98ee67d85a17b7ab44143a3a9a0"}, + {file = "pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593"}, + {file = "pandas-2.3.3-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d2cefc361461662ac48810cb14365a365ce864afe85ef1f447ff5a1e99ea81c"}, + {file = "pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b"}, + {file = "pandas-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c46467899aaa4da076d5abc11084634e2d197e9460643dd455ac3db5856b24d6"}, + {file = "pandas-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6253c72c6a1d990a410bc7de641d34053364ef8bcd3126f7e7450125887dffe3"}, + {file = "pandas-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5"}, + {file = "pandas-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2462b1a365b6109d275250baaae7b760fd25c726aaca0054649286bcfbb3e8ec"}, + {file = "pandas-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0242fe9a49aa8b4d78a4fa03acb397a58833ef6199e9aa40a95f027bb3a1b6e7"}, + {file = "pandas-2.3.3-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a21d830e78df0a515db2b3d2f5570610f5e6bd2e27749770e8bb7b524b89b450"}, + {file = "pandas-2.3.3-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e3ebdb170b5ef78f19bfb71b0dc5dc58775032361fa188e814959b74d726dd5"}, + {file = "pandas-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d051c0e065b94b7a3cea50eb1ec32e912cd96dba41647eb24104b6c6c14c5788"}, + {file = "pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87"}, + {file = "pandas-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c503ba5216814e295f40711470446bc3fd00f0faea8a086cbc688808e26f92a2"}, + {file = "pandas-2.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a637c5cdfa04b6d6e2ecedcb81fc52ffb0fd78ce2ebccc9ea964df9f658de8c8"}, + {file = "pandas-2.3.3-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:854d00d556406bffe66a4c0802f334c9ad5a96b4f1f868adf036a21b11ef13ff"}, + {file = "pandas-2.3.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bf1f8a81d04ca90e32a0aceb819d34dbd378a98bf923b6398b9a3ec0bf44de29"}, + {file = "pandas-2.3.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:23ebd657a4d38268c7dfbdf089fbc31ea709d82e4923c5ffd4fbd5747133ce73"}, + {file = "pandas-2.3.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5554c929ccc317d41a5e3d1234f3be588248e61f08a74dd17c9eabb535777dc9"}, + {file = "pandas-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:d3e28b3e83862ccf4d85ff19cf8c20b2ae7e503881711ff2d534dc8f761131aa"}, + {file = "pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b"}, +] + +[package.dependencies] +numpy = {version = ">=1.22.4", markers = "python_version < \"3.11\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions ; python_version < \"3.10\""] +xmp = ["defusedxml"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "platformdirs" +version = "4.5.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "platformdirs-4.5.0-py3-none-any.whl", hash = "sha256:e578a81bb873cbb89a41fcc904c7ef523cc18284b7e3b3ccf06aca1403b7ebd3"}, + {file = "platformdirs-4.5.0.tar.gz", hash = "sha256:70ddccdd7c99fc5942e9fc25636a8b34d04c24b335100223152c2803e4063312"}, +] + +[package.extras] +docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx-autodoc-typehints (>=3.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] +type = ["mypy (>=1.18.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pooch" +version = "1.8.2" +description = "A friend to fetch your data files" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pooch-1.8.2-py3-none-any.whl", hash = "sha256:3529a57096f7198778a5ceefd5ac3ef0e4d06a6ddaf9fc2d609b806f25302c47"}, + {file = "pooch-1.8.2.tar.gz", hash = "sha256:76561f0de68a01da4df6af38e9955c4c9d1a5c90da73f7e40276a5728ec83d10"}, +] + +[package.dependencies] +packaging = ">=20.0" +platformdirs = ">=2.5.0" +requests = ">=2.19.0" + +[package.extras] +progress = ["tqdm (>=4.41.0,<5.0.0)"] +sftp = ["paramiko (>=2.7.0)"] +xxhash = ["xxhash (>=1.4.3)"] + +[[package]] +name = "portalocker" +version = "3.0.0" +description = "Wraps the portalocker recipe for easy usage" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "portalocker-3.0.0-py3-none-any.whl", hash = "sha256:211916b539a0dc3c128a3d9e86893ecfefec5379c4ff684e798f0a00f99db406"}, + {file = "portalocker-3.0.0.tar.gz", hash = "sha256:21f535de2e7a82c94c130c054adb5c7421d480d5619d61073996e2f89bcb879b"}, +] + +[package.dependencies] +pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} + +[package.extras] +docs = ["sphinx (>=1.7.1)"] +redis = ["redis"] +tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] + +[[package]] +name = "propcache" +version = "0.2.0" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, + {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"}, + {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"}, + {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"}, + {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"}, + {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"}, + {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"}, + {file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"}, + {file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"}, + {file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"}, + {file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"}, + {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"}, + {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"}, + {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"}, + {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"}, + {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"}, + {file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"}, + {file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"}, + {file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"}, + {file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"}, + {file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"}, + {file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"}, + {file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"}, + {file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"}, + {file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"}, + {file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"}, + {file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"}, + {file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"}, + {file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"}, + {file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"}, + {file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"}, + {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"}, + {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"}, +] + +[[package]] +name = "propcache" +version = "0.4.1" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c2d1fa3201efaf55d730400d945b5b3ab6e672e100ba0f9a409d950ab25d7db"}, + {file = "propcache-0.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1eb2994229cc8ce7fe9b3db88f5465f5fd8651672840b2e426b88cdb1a30aac8"}, + {file = "propcache-0.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66c1f011f45a3b33d7bcb22daed4b29c0c9e2224758b6be00686731e1b46f925"}, + {file = "propcache-0.4.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a52009f2adffe195d0b605c25ec929d26b36ef986ba85244891dee3b294df21"}, + {file = "propcache-0.4.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5d4e2366a9c7b837555cf02fb9be2e3167d333aff716332ef1b7c3a142ec40c5"}, + {file = "propcache-0.4.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9d2b6caef873b4f09e26ea7e33d65f42b944837563a47a94719cc3544319a0db"}, + {file = "propcache-0.4.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b16ec437a8c8a965ecf95739448dd938b5c7f56e67ea009f4300d8df05f32b7"}, + {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:296f4c8ed03ca7476813fe666c9ea97869a8d7aec972618671b33a38a5182ef4"}, + {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:1f0978529a418ebd1f49dad413a2b68af33f85d5c5ca5c6ca2a3bed375a7ac60"}, + {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fd138803047fb4c062b1c1dd95462f5209456bfab55c734458f15d11da288f8f"}, + {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8c9b3cbe4584636d72ff556d9036e0c9317fa27b3ac1f0f558e7e84d1c9c5900"}, + {file = "propcache-0.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f93243fdc5657247533273ac4f86ae106cc6445a0efacb9a1bfe982fcfefd90c"}, + {file = "propcache-0.4.1-cp310-cp310-win32.whl", hash = "sha256:a0ee98db9c5f80785b266eb805016e36058ac72c51a064040f2bc43b61101cdb"}, + {file = "propcache-0.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cdb7988c4e5ac7f6d175a28a9aa0c94cb6f2ebe52756a3c0cda98d2809a9e37"}, + {file = "propcache-0.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:d82ad62b19645419fe79dd63b3f9253e15b30e955c0170e5cebc350c1844e581"}, + {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:60a8fda9644b7dfd5dece8c61d8a85e271cb958075bfc4e01083c148b61a7caf"}, + {file = "propcache-0.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c30b53e7e6bda1d547cabb47c825f3843a0a1a42b0496087bb58d8fedf9f41b5"}, + {file = "propcache-0.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6918ecbd897443087a3b7cd978d56546a812517dcaaca51b49526720571fa93e"}, + {file = "propcache-0.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d902a36df4e5989763425a8ab9e98cd8ad5c52c823b34ee7ef307fd50582566"}, + {file = "propcache-0.4.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a9695397f85973bb40427dedddf70d8dc4a44b22f1650dd4af9eedf443d45165"}, + {file = "propcache-0.4.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2bb07ffd7eaad486576430c89f9b215f9e4be68c4866a96e97db9e97fead85dc"}, + {file = "propcache-0.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd6f30fdcf9ae2a70abd34da54f18da086160e4d7d9251f81f3da0ff84fc5a48"}, + {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc38cba02d1acba4e2869eef1a57a43dfbd3d49a59bf90dda7444ec2be6a5570"}, + {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:67fad6162281e80e882fb3ec355398cf72864a54069d060321f6cd0ade95fe85"}, + {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f10207adf04d08bec185bae14d9606a1444715bc99180f9331c9c02093e1959e"}, + {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9b0d8d0845bbc4cfcdcbcdbf5086886bc8157aa963c31c777ceff7846c77757"}, + {file = "propcache-0.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:981333cb2f4c1896a12f4ab92a9cc8f09ea664e9b7dbdc4eff74627af3a11c0f"}, + {file = "propcache-0.4.1-cp311-cp311-win32.whl", hash = "sha256:f1d2f90aeec838a52f1c1a32fe9a619fefd5e411721a9117fbf82aea638fe8a1"}, + {file = "propcache-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:364426a62660f3f699949ac8c621aad6977be7126c5807ce48c0aeb8e7333ea6"}, + {file = "propcache-0.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:e53f3a38d3510c11953f3e6a33f205c6d1b001129f972805ca9b42fc308bc239"}, + {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2"}, + {file = "propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403"}, + {file = "propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207"}, + {file = "propcache-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec17c65562a827bba85e3872ead335f95405ea1674860d96483a02f5c698fa72"}, + {file = "propcache-0.4.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:405aac25c6394ef275dee4c709be43745d36674b223ba4eb7144bf4d691b7367"}, + {file = "propcache-0.4.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0013cb6f8dde4b2a2f66903b8ba740bdfe378c943c4377a200551ceb27f379e4"}, + {file = "propcache-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15932ab57837c3368b024473a525e25d316d8353016e7cc0e5ba9eb343fbb1cf"}, + {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:031dce78b9dc099f4c29785d9cf5577a3faf9ebf74ecbd3c856a7b92768c3df3"}, + {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ab08df6c9a035bee56e31af99be621526bd237bea9f32def431c656b29e41778"}, + {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4d7af63f9f93fe593afbf104c21b3b15868efb2c21d07d8732c0c4287e66b6a6"}, + {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cfc27c945f422e8b5071b6e93169679e4eb5bf73bbcbf1ba3ae3a83d2f78ebd9"}, + {file = "propcache-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:35c3277624a080cc6ec6f847cbbbb5b49affa3598c4535a0a4682a697aaa5c75"}, + {file = "propcache-0.4.1-cp312-cp312-win32.whl", hash = "sha256:671538c2262dadb5ba6395e26c1731e1d52534bfe9ae56d0b5573ce539266aa8"}, + {file = "propcache-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:cb2d222e72399fcf5890d1d5cc1060857b9b236adff2792ff48ca2dfd46c81db"}, + {file = "propcache-0.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:204483131fb222bdaaeeea9f9e6c6ed0cac32731f75dfc1d4a567fc1926477c1"}, + {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:43eedf29202c08550aac1d14e0ee619b0430aaef78f85864c1a892294fbc28cf"}, + {file = "propcache-0.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d62cdfcfd89ccb8de04e0eda998535c406bf5e060ffd56be6c586cbcc05b3311"}, + {file = "propcache-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cae65ad55793da34db5f54e4029b89d3b9b9490d8abe1b4c7ab5d4b8ec7ebf74"}, + {file = "propcache-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:333ddb9031d2704a301ee3e506dc46b1fe5f294ec198ed6435ad5b6a085facfe"}, + {file = "propcache-0.4.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd0858c20f078a32cf55f7e81473d96dcf3b93fd2ccdb3d40fdf54b8573df3af"}, + {file = "propcache-0.4.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:678ae89ebc632c5c204c794f8dab2837c5f159aeb59e6ed0539500400577298c"}, + {file = "propcache-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d472aeb4fbf9865e0c6d622d7f4d54a4e101a89715d8904282bb5f9a2f476c3f"}, + {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4d3df5fa7e36b3225954fba85589da77a0fe6a53e3976de39caf04a0db4c36f1"}, + {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:ee17f18d2498f2673e432faaa71698032b0127ebf23ae5974eeaf806c279df24"}, + {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:580e97762b950f993ae618e167e7be9256b8353c2dcd8b99ec100eb50f5286aa"}, + {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:501d20b891688eb8e7aa903021f0b72d5a55db40ffaab27edefd1027caaafa61"}, + {file = "propcache-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a0bd56e5b100aef69bd8562b74b46254e7c8812918d3baa700c8a8009b0af66"}, + {file = "propcache-0.4.1-cp313-cp313-win32.whl", hash = "sha256:bcc9aaa5d80322bc2fb24bb7accb4a30f81e90ab8d6ba187aec0744bc302ad81"}, + {file = "propcache-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:381914df18634f5494334d201e98245c0596067504b9372d8cf93f4bb23e025e"}, + {file = "propcache-0.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:8873eb4460fd55333ea49b7d189749ecf6e55bf85080f11b1c4530ed3034cba1"}, + {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:92d1935ee1f8d7442da9c0c4fa7ac20d07e94064184811b685f5c4fada64553b"}, + {file = "propcache-0.4.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:473c61b39e1460d386479b9b2f337da492042447c9b685f28be4f74d3529e566"}, + {file = "propcache-0.4.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c0ef0aaafc66fbd87842a3fe3902fd889825646bc21149eafe47be6072725835"}, + {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95393b4d66bfae908c3ca8d169d5f79cd65636ae15b5e7a4f6e67af675adb0e"}, + {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c07fda85708bc48578467e85099645167a955ba093be0a2dcba962195676e859"}, + {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:af223b406d6d000830c6f65f1e6431783fc3f713ba3e6cc8c024d5ee96170a4b"}, + {file = "propcache-0.4.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a78372c932c90ee474559c5ddfffd718238e8673c340dc21fe45c5b8b54559a0"}, + {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:564d9f0d4d9509e1a870c920a89b2fec951b44bf5ba7d537a9e7c1ccec2c18af"}, + {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:17612831fda0138059cc5546f4d12a2aacfb9e47068c06af35c400ba58ba7393"}, + {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:41a89040cb10bd345b3c1a873b2bf36413d48da1def52f268a055f7398514874"}, + {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e35b88984e7fa64aacecea39236cee32dd9bd8c55f57ba8a75cf2399553f9bd7"}, + {file = "propcache-0.4.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f8b465489f927b0df505cbe26ffbeed4d6d8a2bbc61ce90eb074ff129ef0ab1"}, + {file = "propcache-0.4.1-cp313-cp313t-win32.whl", hash = "sha256:2ad890caa1d928c7c2965b48f3a3815c853180831d0e5503d35cf00c472f4717"}, + {file = "propcache-0.4.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f7ee0e597f495cf415bcbd3da3caa3bd7e816b74d0d52b8145954c5e6fd3ff37"}, + {file = "propcache-0.4.1-cp313-cp313t-win_arm64.whl", hash = "sha256:929d7cbe1f01bb7baffb33dc14eb5691c95831450a26354cd210a8155170c93a"}, + {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3f7124c9d820ba5548d431afb4632301acf965db49e666aa21c305cbe8c6de12"}, + {file = "propcache-0.4.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c0d4b719b7da33599dfe3b22d3db1ef789210a0597bc650b7cee9c77c2be8c5c"}, + {file = "propcache-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9f302f4783709a78240ebc311b793f123328716a60911d667e0c036bc5dcbded"}, + {file = "propcache-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c80ee5802e3fb9ea37938e7eecc307fb984837091d5fd262bb37238b1ae97641"}, + {file = "propcache-0.4.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ed5a841e8bb29a55fb8159ed526b26adc5bdd7e8bd7bf793ce647cb08656cdf4"}, + {file = "propcache-0.4.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:55c72fd6ea2da4c318e74ffdf93c4fe4e926051133657459131a95c846d16d44"}, + {file = "propcache-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8326e144341460402713f91df60ade3c999d601e7eb5ff8f6f7862d54de0610d"}, + {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:060b16ae65bc098da7f6d25bf359f1f31f688384858204fe5d652979e0015e5b"}, + {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:89eb3fa9524f7bec9de6e83cf3faed9d79bffa560672c118a96a171a6f55831e"}, + {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:dee69d7015dc235f526fe80a9c90d65eb0039103fe565776250881731f06349f"}, + {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5558992a00dfd54ccbc64a32726a3357ec93825a418a401f5cc67df0ac5d9e49"}, + {file = "propcache-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c9b822a577f560fbd9554812526831712c1436d2c046cedee4c3796d3543b144"}, + {file = "propcache-0.4.1-cp314-cp314-win32.whl", hash = "sha256:ab4c29b49d560fe48b696cdcb127dd36e0bc2472548f3bf56cc5cb3da2b2984f"}, + {file = "propcache-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:5a103c3eb905fcea0ab98be99c3a9a5ab2de60228aa5aceedc614c0281cf6153"}, + {file = "propcache-0.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:74c1fb26515153e482e00177a1ad654721bf9207da8a494a0c05e797ad27b992"}, + {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:824e908bce90fb2743bd6b59db36eb4f45cd350a39637c9f73b1c1ea66f5b75f"}, + {file = "propcache-0.4.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2b5e7db5328427c57c8e8831abda175421b709672f6cfc3d630c3b7e2146393"}, + {file = "propcache-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6f6ff873ed40292cd4969ef5310179afd5db59fdf055897e282485043fc80ad0"}, + {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49a2dc67c154db2c1463013594c458881a069fcf98940e61a0569016a583020a"}, + {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:005f08e6a0529984491e37d8dbc3dd86f84bd78a8ceb5fa9a021f4c48d4984be"}, + {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c3310452e0d31390da9035c348633b43d7e7feb2e37be252be6da45abd1abcc"}, + {file = "propcache-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c3c70630930447f9ef1caac7728c8ad1c56bc5015338b20fed0d08ea2480b3a"}, + {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e57061305815dfc910a3634dcf584f08168a8836e6999983569f51a8544cd89"}, + {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:521a463429ef54143092c11a77e04056dd00636f72e8c45b70aaa3140d639726"}, + {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:120c964da3fdc75e3731aa392527136d4ad35868cc556fd09bb6d09172d9a367"}, + {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d8f353eb14ee3441ee844ade4277d560cdd68288838673273b978e3d6d2c8f36"}, + {file = "propcache-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ab2943be7c652f09638800905ee1bab2c544e537edb57d527997a24c13dc1455"}, + {file = "propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85"}, + {file = "propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1"}, + {file = "propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9"}, + {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d233076ccf9e450c8b3bc6720af226b898ef5d051a2d145f7d765e6e9f9bcff"}, + {file = "propcache-0.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:357f5bb5c377a82e105e44bd3d52ba22b616f7b9773714bff93573988ef0a5fb"}, + {file = "propcache-0.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbc3b6dfc728105b2a57c06791eb07a94229202ea75c59db644d7d496b698cac"}, + {file = "propcache-0.4.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:182b51b421f0501952d938dc0b0eb45246a5b5153c50d42b495ad5fb7517c888"}, + {file = "propcache-0.4.1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b536b39c5199b96fc6245eb5fb796c497381d3942f169e44e8e392b29c9ebcc"}, + {file = "propcache-0.4.1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:db65d2af507bbfbdcedb254a11149f894169d90488dd3e7190f7cdcb2d6cd57a"}, + {file = "propcache-0.4.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd2dbc472da1f772a4dae4fa24be938a6c544671a912e30529984dd80400cd88"}, + {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:daede9cd44e0f8bdd9e6cc9a607fc81feb80fae7a5fc6cecaff0e0bb32e42d00"}, + {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:71b749281b816793678ae7f3d0d84bd36e694953822eaad408d682efc5ca18e0"}, + {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:0002004213ee1f36cfb3f9a42b5066100c44276b9b72b4e1504cddd3d692e86e"}, + {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fe49d0a85038f36ba9e3ffafa1103e61170b28e95b16622e11be0a0ea07c6781"}, + {file = "propcache-0.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99d43339c83aaf4d32bda60928231848eee470c6bda8d02599cc4cebe872d183"}, + {file = "propcache-0.4.1-cp39-cp39-win32.whl", hash = "sha256:a129e76735bc792794d5177069691c3217898b9f5cee2b2661471e52ffe13f19"}, + {file = "propcache-0.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:948dab269721ae9a87fd16c514a0a2c2a1bdb23a9a61b969b0f9d9ee2968546f"}, + {file = "propcache-0.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:5fd37c406dd6dc85aa743e214cef35dc54bbdd1419baac4f6ae5e5b1a2976938"}, + {file = "propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237"}, + {file = "propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d"}, +] + +[[package]] +name = "protobuf" +version = "3.20.3" +description = "Protocol Buffers" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, + {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, + {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, + {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, + {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, + {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, + {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, + {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, + {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, + {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, + {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, + {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, + {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, + {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, + {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, +] + +[[package]] +name = "psutil" +version = "7.1.3" +description = "Cross-platform lib for process and system monitoring." +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc"}, + {file = "psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7"}, + {file = "psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251"}, + {file = "psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa"}, + {file = "psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353"}, + {file = "psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9"}, + {file = "psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f"}, + {file = "psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7"}, + {file = "psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab"}, + {file = "psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3"}, + {file = "psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b"}, + {file = "psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd"}, + {file = "psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1"}, + {file = "psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74"}, +] + +[package.extras] +dev = ["abi3audit", "black", "check-manifest", "colorama ; os_name == \"nt\"", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pyreadline ; os_name == \"nt\"", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32 ; os_name == \"nt\" and platform_python_implementation != \"PyPy\"", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "validate-pyproject[all]", "virtualenv", "vulture", "wheel", "wheel ; os_name == \"nt\" and platform_python_implementation != \"PyPy\"", "wmi ; os_name == \"nt\" and platform_python_implementation != \"PyPy\""] +test = ["pytest", "pytest-instafail", "pytest-subtests", "pytest-xdist", "pywin32 ; os_name == \"nt\" and platform_python_implementation != \"PyPy\"", "setuptools", "wheel ; os_name == \"nt\" and platform_python_implementation != \"PyPy\"", "wmi ; os_name == \"nt\" and platform_python_implementation != \"PyPy\""] + +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + +[[package]] +name = "py3nvml" +version = "0.2.7" +description = "Python 3 Bindings for the NVIDIA Management Library" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "py3nvml-0.2.7-py3-none-any.whl", hash = "sha256:30101170d1f51419c8d21fd8ca6cdc333a552b4f8a945c2fc7d107d77e4220dd"}, + {file = "py3nvml-0.2.7.tar.gz", hash = "sha256:09ee1d04598a6e664e24465f804ce3bfe119a6fdb5362df1c168f8aa929fbd73"}, +] + +[package.dependencies] +xmltodict = "*" + +[[package]] +name = "pyarrow" +version = "17.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + +[[package]] +name = "pyarrow" +version = "22.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "pyarrow-22.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:77718810bd3066158db1e95a63c160ad7ce08c6b0710bc656055033e39cdad88"}, + {file = "pyarrow-22.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:44d2d26cda26d18f7af7db71453b7b783788322d756e81730acb98f24eb90ace"}, + {file = "pyarrow-22.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:b9d71701ce97c95480fecb0039ec5bb889e75f110da72005743451339262f4ce"}, + {file = "pyarrow-22.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:710624ab925dc2b05a6229d47f6f0dac1c1155e6ed559be7109f684eba048a48"}, + {file = "pyarrow-22.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f963ba8c3b0199f9d6b794c90ec77545e05eadc83973897a4523c9e8d84e9340"}, + {file = "pyarrow-22.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd0d42297ace400d8febe55f13fdf46e86754842b860c978dfec16f081e5c653"}, + {file = "pyarrow-22.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:00626d9dc0f5ef3a75fe63fd68b9c7c8302d2b5bbc7f74ecaedba83447a24f84"}, + {file = "pyarrow-22.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:3e294c5eadfb93d78b0763e859a0c16d4051fc1c5231ae8956d61cb0b5666f5a"}, + {file = "pyarrow-22.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:69763ab2445f632d90b504a815a2a033f74332997052b721002298ed6de40f2e"}, + {file = "pyarrow-22.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:b41f37cabfe2463232684de44bad753d6be08a7a072f6a83447eeaf0e4d2a215"}, + {file = "pyarrow-22.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:35ad0f0378c9359b3f297299c3309778bb03b8612f987399a0333a560b43862d"}, + {file = "pyarrow-22.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8382ad21458075c2e66a82a29d650f963ce51c7708c7c0ff313a8c206c4fd5e8"}, + {file = "pyarrow-22.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1a812a5b727bc09c3d7ea072c4eebf657c2f7066155506ba31ebf4792f88f016"}, + {file = "pyarrow-22.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:ec5d40dd494882704fb876c16fa7261a69791e784ae34e6b5992e977bd2e238c"}, + {file = "pyarrow-22.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bea79263d55c24a32b0d79c00a1c58bb2ee5f0757ed95656b01c0fb310c5af3d"}, + {file = "pyarrow-22.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:12fe549c9b10ac98c91cf791d2945e878875d95508e1a5d14091a7aaa66d9cf8"}, + {file = "pyarrow-22.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:334f900ff08ce0423407af97e6c26ad5d4e3b0763645559ece6fbf3747d6a8f5"}, + {file = "pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c6c791b09c57ed76a18b03f2631753a4960eefbbca80f846da8baefc6491fcfe"}, + {file = "pyarrow-22.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c3200cb41cdbc65156e5f8c908d739b0dfed57e890329413da2748d1a2cd1a4e"}, + {file = "pyarrow-22.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ac93252226cf288753d8b46280f4edf3433bf9508b6977f8dd8526b521a1bbb9"}, + {file = "pyarrow-22.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:44729980b6c50a5f2bfcc2668d36c569ce17f8b17bccaf470c4313dcbbf13c9d"}, + {file = "pyarrow-22.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e6e95176209257803a8b3d0394f21604e796dadb643d2f7ca21b66c9c0b30c9a"}, + {file = "pyarrow-22.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:001ea83a58024818826a9e3f89bf9310a114f7e26dfe404a4c32686f97bd7901"}, + {file = "pyarrow-22.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:ce20fe000754f477c8a9125543f1936ea5b8867c5406757c224d745ed033e691"}, + {file = "pyarrow-22.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e0a15757fccb38c410947df156f9749ae4a3c89b2393741a50521f39a8cf202a"}, + {file = "pyarrow-22.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cedb9dd9358e4ea1d9bce3665ce0797f6adf97ff142c8e25b46ba9cdd508e9b6"}, + {file = "pyarrow-22.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:252be4a05f9d9185bb8c18e83764ebcfea7185076c07a7a662253af3a8c07941"}, + {file = "pyarrow-22.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:a4893d31e5ef780b6edcaf63122df0f8d321088bb0dee4c8c06eccb1ca28d145"}, + {file = "pyarrow-22.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f7fe3dbe871294ba70d789be16b6e7e52b418311e166e0e3cba9522f0f437fb1"}, + {file = "pyarrow-22.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ba95112d15fd4f1105fb2402c4eab9068f0554435e9b7085924bcfaac2cc306f"}, + {file = "pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c064e28361c05d72eed8e744c9605cbd6d2bb7481a511c74071fd9b24bc65d7d"}, + {file = "pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:6f9762274496c244d951c819348afbcf212714902742225f649cf02823a6a10f"}, + {file = "pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a9d9ffdc2ab696f6b15b4d1f7cec6658e1d788124418cb30030afbae31c64746"}, + {file = "pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ec1a15968a9d80da01e1d30349b2b0d7cc91e96588ee324ce1b5228175043e95"}, + {file = "pyarrow-22.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bba208d9c7decf9961998edf5c65e3ea4355d5818dd6cd0f6809bec1afb951cc"}, + {file = "pyarrow-22.0.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:9bddc2cade6561f6820d4cd73f99a0243532ad506bc510a75a5a65a522b2d74d"}, + {file = "pyarrow-22.0.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:e70ff90c64419709d38c8932ea9fe1cc98415c4f87ea8da81719e43f02534bc9"}, + {file = "pyarrow-22.0.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:92843c305330aa94a36e706c16209cd4df274693e777ca47112617db7d0ef3d7"}, + {file = "pyarrow-22.0.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:6dda1ddac033d27421c20d7a7943eec60be44e0db4e079f33cc5af3b8280ccde"}, + {file = "pyarrow-22.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:84378110dd9a6c06323b41b56e129c504d157d1a983ce8f5443761eb5256bafc"}, + {file = "pyarrow-22.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:854794239111d2b88b40b6ef92aa478024d1e5074f364033e73e21e3f76b25e0"}, + {file = "pyarrow-22.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:b883fe6fd85adad7932b3271c38ac289c65b7337c2c132e9569f9d3940620730"}, + {file = "pyarrow-22.0.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7a820d8ae11facf32585507c11f04e3f38343c1e784c9b5a8b1da5c930547fe2"}, + {file = "pyarrow-22.0.0-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:c6ec3675d98915bf1ec8b3c7986422682f7232ea76cad276f4c8abd5b7319b70"}, + {file = "pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:3e739edd001b04f654b166204fc7a9de896cf6007eaff33409ee9e50ceaff754"}, + {file = "pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7388ac685cab5b279a41dfe0a6ccd99e4dbf322edfb63e02fc0443bf24134e91"}, + {file = "pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f633074f36dbc33d5c05b5dc75371e5660f1dbf9c8b1d95669def05e5425989c"}, + {file = "pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4c19236ae2402a8663a2c8f21f1870a03cc57f0bef7e4b6eb3238cc82944de80"}, + {file = "pyarrow-22.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0c34fe18094686194f204a3b1787a27456897d8a2d62caf84b61e8dfbc0252ae"}, + {file = "pyarrow-22.0.0.tar.gz", hash = "sha256:3d600dc583260d845c7d8a6db540339dd883081925da2bd1c5cb808f720b3cd9"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.23" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "implementation_name != \"PyPy\" or python_version < \"3.10\"" +files = [ + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, +] + +[[package]] +name = "pyreadline3" +version = "3.5.4" +description = "A python implementation of GNU readline." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, + {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, +] + +[package.extras] +dev = ["build", "flake8", "mypy", "pytest", "twine"] + +[[package]] +name = "pytest" +version = "7.4.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2025.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, +] + +[[package]] +name = "pywin32" +version = "308" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Windows\"" +files = [ + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "regex" +version = "2024.11.6" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, +] + +[[package]] +name = "regex" +version = "2025.11.3" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5"}, + {file = "regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec"}, + {file = "regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd"}, + {file = "regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e"}, + {file = "regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf"}, + {file = "regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a"}, + {file = "regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0"}, + {file = "regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204"}, + {file = "regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9"}, + {file = "regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7"}, + {file = "regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c"}, + {file = "regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5"}, + {file = "regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2"}, + {file = "regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a"}, + {file = "regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c"}, + {file = "regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed"}, + {file = "regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4"}, + {file = "regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad"}, + {file = "regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379"}, + {file = "regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38"}, + {file = "regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de"}, + {file = "regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81519e25707fc076978c6143b81ea3dc853f176895af05bf7ec51effe818aeec"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3bf28b1873a8af8bbb58c26cc56ea6e534d80053b41fb511a35795b6de507e6a"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:856a25c73b697f2ce2a24e7968285579e62577a048526161a2c0f53090bea9f9"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a3d571bd95fade53c86c0517f859477ff3a93c3fde10c9e669086f038e0f207"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:732aea6de26051af97b94bc98ed86448821f839d058e5d259c72bf6d73ad0fc0"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:51c1c1847128238f54930edb8805b660305dca164645a9fd29243f5610beea34"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22dd622a402aad4558277305350699b2be14bc59f64d64ae1d928ce7d072dced"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f3b5a391c7597ffa96b41bd5cbd2ed0305f515fcbb367dfa72735679d5502364"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cc4076a5b4f36d849fd709284b4a3b112326652f3b0466f04002a6c15a0c96c1"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a295ca2bba5c1c885826ce3125fa0b9f702a1be547d821c01d65f199e10c01e2"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b4774ff32f18e0504bfc4e59a3e71e18d83bc1e171a3c8ed75013958a03b2f14"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e7d1cdfa88ef33a2ae6aa0d707f9255eb286ffbd90045f1088246833223aee"}, + {file = "regex-2025.11.3-cp39-cp39-win32.whl", hash = "sha256:74d04244852ff73b32eeede4f76f51c5bcf44bc3c207bc3e6cf1c5c45b890708"}, + {file = "regex-2025.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:7a50cd39f73faa34ec18d6720ee25ef10c4c1839514186fcda658a06c06057a2"}, + {file = "regex-2025.11.3-cp39-cp39-win_arm64.whl", hash = "sha256:43b4fb020e779ca81c1b5255015fe2b82816c76ec982354534ad9ec09ad7c9e3"}, + {file = "regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3transfer" +version = "0.11.2" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "s3transfer-0.11.2-py3-none-any.whl", hash = "sha256:be6ecb39fadd986ef1701097771f87e4d2f821f27f6071c872143884d2950fbc"}, + {file = "s3transfer-0.11.2.tar.gz", hash = "sha256:3b39185cb72f5acc77db1a58b6e25b977f28d20496b6e58d6813d75f464d632f"}, +] + +[package.dependencies] +botocore = ">=1.36.0,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.36.0,<2.0a.0)"] + +[[package]] +name = "safetensors" +version = "0.5.3" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "safetensors-0.5.3-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd20eb133db8ed15b40110b7c00c6df51655a2998132193de2f75f72d99c7073"}, + {file = "safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:21d01c14ff6c415c485616b8b0bf961c46b3b343ca59110d38d744e577f9cce7"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11bce6164887cd491ca75c2326a113ba934be596e22b28b1742ce27b1d076467"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4a243be3590bc3301c821da7a18d87224ef35cbd3e5f5727e4e0728b8172411e"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bd84b12b1670a6f8e50f01e28156422a2bc07fb16fc4e98bded13039d688a0d"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:391ac8cab7c829452175f871fcaf414aa1e292b5448bd02620f675a7f3e7abb9"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cead1fa41fc54b1e61089fa57452e8834f798cb1dc7a09ba3524f1eb08e0317a"}, + {file = "safetensors-0.5.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1077f3e94182d72618357b04b5ced540ceb71c8a813d3319f1aba448e68a770d"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:799021e78287bac619c7b3f3606730a22da4cda27759ddf55d37c8db7511c74b"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:df26da01aaac504334644e1b7642fa000bfec820e7cef83aeac4e355e03195ff"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:32c3ef2d7af8b9f52ff685ed0bc43913cdcde135089ae322ee576de93eae5135"}, + {file = "safetensors-0.5.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:37f1521be045e56fc2b54c606d4455573e717b2d887c579ee1dbba5f868ece04"}, + {file = "safetensors-0.5.3-cp38-abi3-win32.whl", hash = "sha256:cfc0ec0846dcf6763b0ed3d1846ff36008c6e7290683b61616c4b040f6a54ace"}, + {file = "safetensors-0.5.3-cp38-abi3-win_amd64.whl", hash = "sha256:836cbbc320b47e80acd40e44c8682db0e8ad7123209f69b093def21ec7cafd11"}, + {file = "safetensors-0.5.3.tar.gz", hash = "sha256:b6b0d6ecacec39a4fdd99cc19f4576f5219ce858e6fd8dbe7609df0b8dc56965"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + +[[package]] +name = "safetensors" +version = "0.6.2" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "safetensors-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:9c85ede8ec58f120bad982ec47746981e210492a6db876882aa021446af8ffba"}, + {file = "safetensors-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d6675cf4b39c98dbd7d940598028f3742e0375a6b4d4277e76beb0c35f4b843b"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d2d2b3ce1e2509c68932ca03ab8f20570920cd9754b05063d4368ee52833ecd"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:93de35a18f46b0f5a6a1f9e26d91b442094f2df02e9fd7acf224cfec4238821a"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89a89b505f335640f9120fac65ddeb83e40f1fd081cb8ed88b505bdccec8d0a1"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc4d0d0b937e04bdf2ae6f70cd3ad51328635fe0e6214aa1fc811f3b576b3bda"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8045db2c872db8f4cbe3faa0495932d89c38c899c603f21e9b6486951a5ecb8f"}, + {file = "safetensors-0.6.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:81e67e8bab9878bb568cffbc5f5e655adb38d2418351dc0859ccac158f753e19"}, + {file = "safetensors-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0e4d029ab0a0e0e4fdf142b194514695b1d7d3735503ba700cf36d0fc7136ce"}, + {file = "safetensors-0.6.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:fa48268185c52bfe8771e46325a1e21d317207bcabcb72e65c6e28e9ffeb29c7"}, + {file = "safetensors-0.6.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:d83c20c12c2d2f465997c51b7ecb00e407e5f94d7dec3ea0cc11d86f60d3fde5"}, + {file = "safetensors-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d944cea65fad0ead848b6ec2c37cc0b197194bec228f8020054742190e9312ac"}, + {file = "safetensors-0.6.2-cp38-abi3-win32.whl", hash = "sha256:cab75ca7c064d3911411461151cb69380c9225798a20e712b102edda2542ddb1"}, + {file = "safetensors-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:c7b214870df923cbc1593c3faee16bec59ea462758699bd3fee399d00aac072c"}, + {file = "safetensors-0.6.2.tar.gz", hash = "sha256:43ff2aa0e6fa2dc3ea5524ac7ad93a9839256b8703761e76e2d0b2a3fa4f15d9"}, +] + +[package.extras] +all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"] +dev = ["safetensors[all]"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"] +mlx = ["mlx (>=0.0.9)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"] +pinned-tf = ["safetensors[numpy]", "tensorflow (==2.18.0)"] +quality = ["ruff"] +tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +testingfree = ["huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"] +torch = ["safetensors[numpy]", "torch (>=1.10)"] + +[[package]] +name = "scikit-learn" +version = "1.3.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, + {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, + {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, + {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, + {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, + {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, +] + +[package.dependencies] +joblib = ">=1.1.1" +numpy = ">=1.17.3,<2.0" +scipy = ">=1.5.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] + +[[package]] +name = "scikit-learn" +version = "1.7.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b33579c10a3081d076ab403df4a4190da4f4432d443521674637677dc91e61f"}, + {file = "scikit_learn-1.7.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:36749fb62b3d961b1ce4fedf08fa57a1986cd409eff2d783bca5d4b9b5fce51c"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7a58814265dfc52b3295b1900cfb5701589d30a8bb026c7540f1e9d3499d5ec8"}, + {file = "scikit_learn-1.7.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a847fea807e278f821a0406ca01e387f97653e284ecbd9750e3ee7c90347f18"}, + {file = "scikit_learn-1.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:ca250e6836d10e6f402436d6463d6c0e4d8e0234cfb6a9a47835bd392b852ce5"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7509693451651cd7361d30ce4e86a1347493554f172b1c72a39300fa2aea79e"}, + {file = "scikit_learn-1.7.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:0486c8f827c2e7b64837c731c8feff72c0bd2b998067a8a9cbc10643c31f0fe1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89877e19a80c7b11a2891a27c21c4894fb18e2c2e077815bcade10d34287b20d"}, + {file = "scikit_learn-1.7.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8da8bf89d4d79aaec192d2bda62f9b56ae4e5b4ef93b6a56b5de4977e375c1f1"}, + {file = "scikit_learn-1.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:9b7ed8d58725030568523e937c43e56bc01cadb478fc43c042a9aca1dacb3ba1"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8d91a97fa2b706943822398ab943cde71858a50245e31bc71dba62aab1d60a96"}, + {file = "scikit_learn-1.7.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:acbc0f5fd2edd3432a22c69bed78e837c70cf896cd7993d71d51ba6708507476"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e5bf3d930aee75a65478df91ac1225ff89cd28e9ac7bd1196853a9229b6adb0b"}, + {file = "scikit_learn-1.7.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4d6e9deed1a47aca9fe2f267ab8e8fe82ee20b4526b2c0cd9e135cea10feb44"}, + {file = "scikit_learn-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:6088aa475f0785e01bcf8529f55280a3d7d298679f50c0bb70a2364a82d0b290"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b7dacaa05e5d76759fb071558a8b5130f4845166d88654a0f9bdf3eb57851b7"}, + {file = "scikit_learn-1.7.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:abebbd61ad9e1deed54cca45caea8ad5f79e1b93173dece40bb8e0c658dbe6fe"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:502c18e39849c0ea1a5d681af1dbcf15f6cce601aebb657aabbfe84133c1907f"}, + {file = "scikit_learn-1.7.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a4c328a71785382fe3fe676a9ecf2c86189249beff90bf85e22bdb7efaf9ae0"}, + {file = "scikit_learn-1.7.2-cp313-cp313-win_amd64.whl", hash = "sha256:63a9afd6f7b229aad94618c01c252ce9e6fa97918c5ca19c9a17a087d819440c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9acb6c5e867447b4e1390930e3944a005e2cb115922e693c08a323421a6966e8"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:2a41e2a0ef45063e654152ec9d8bcfc39f7afce35b08902bfe290c2498a67a6a"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98335fb98509b73385b3ab2bd0639b1f610541d3988ee675c670371d6a87aa7c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:191e5550980d45449126e23ed1d5e9e24b2c68329ee1f691a3987476e115e09c"}, + {file = "scikit_learn-1.7.2-cp313-cp313t-win_amd64.whl", hash = "sha256:57dc4deb1d3762c75d685507fbd0bc17160144b2f2ba4ccea5dc285ab0d0e973"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fa8f63940e29c82d1e67a45d5297bdebbcb585f5a5a50c4914cc2e852ab77f33"}, + {file = "scikit_learn-1.7.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f95dc55b7902b91331fa4e5845dd5bde0580c9cd9612b1b2791b7e80c3d32615"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9656e4a53e54578ad10a434dc1f993330568cfee176dff07112b8785fb413106"}, + {file = "scikit_learn-1.7.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96dc05a854add0e50d3f47a1ef21a10a595016da5b007c7d9cd9d0bffd1fcc61"}, + {file = "scikit_learn-1.7.2-cp314-cp314-win_amd64.whl", hash = "sha256:bb24510ed3f9f61476181e4db51ce801e2ba37541def12dc9333b946fc7a9cf8"}, + {file = "scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.22.0" +scipy = ">=1.8.0" +threadpoolctl = ">=3.1.0" + +[package.extras] +benchmark = ["matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "pandas (>=1.4.0)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.17.1)", "numpy (>=1.22.0)", "scipy (>=1.8.0)"] +docs = ["Pillow (>=8.4.0)", "matplotlib (>=3.5.0)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] +examples = ["matplotlib (>=3.5.0)", "pandas (>=1.4.0)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.19.0)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.22.0)", "scipy (>=1.8.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==3.0.1)"] +tests = ["matplotlib (>=3.5.0)", "mypy (>=1.15)", "numpydoc (>=1.2.0)", "pandas (>=1.4.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.2.1)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.11.7)", "scikit-image (>=0.19.0)"] + +[[package]] +name = "scipy" +version = "1.10.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = "<3.12,>=3.8" +groups = ["main", "dev"] +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, + {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +] +markers = {main = "platform_machine == \"x86_64\" or platform_machine == \"arm64\""} + +[package.dependencies] +numpy = ">=1.19.5,<1.27.0" + +[package.extras] +dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "sentencepiece" +version = "0.1.99" +description = "SentencePiece python wrapper" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, + {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, +] + +[[package]] +name = "setuptools" +version = "75.3.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"}, + {file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.5.2) ; sys_platform != \"cygwin\""] +core = ["importlib-metadata (>=6) ; python_version < \"3.10\"", "importlib-resources (>=5.10.2) ; python_version < \"3.9\"", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.12.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "soundfile" +version = "0.13.1" +description = "An audio library based on libsndfile, CFFI and NumPy" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "soundfile-0.13.1-py2.py3-none-any.whl", hash = "sha256:a23c717560da2cf4c7b5ae1142514e0fd82d6bbd9dfc93a50423447142f2c445"}, + {file = "soundfile-0.13.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:82dc664d19831933fe59adad199bf3945ad06d84bc111a5b4c0d3089a5b9ec33"}, + {file = "soundfile-0.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:743f12c12c4054921e15736c6be09ac26b3b3d603aef6fd69f9dde68748f2593"}, + {file = "soundfile-0.13.1-py2.py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9c9e855f5a4d06ce4213f31918653ab7de0c5a8d8107cd2427e44b42df547deb"}, + {file = "soundfile-0.13.1-py2.py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:03267c4e493315294834a0870f31dbb3b28a95561b80b134f0bd3cf2d5f0e618"}, + {file = "soundfile-0.13.1-py2.py3-none-win32.whl", hash = "sha256:c734564fab7c5ddf8e9be5bf70bab68042cd17e9c214c06e365e20d64f9a69d5"}, + {file = "soundfile-0.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:1e70a05a0626524a69e9f0f4dd2ec174b4e9567f4d8b6c11d38b5c289be36ee9"}, + {file = "soundfile-0.13.1.tar.gz", hash = "sha256:b2c68dab1e30297317080a5b43df57e302584c49e2942defdde0acccc53f0e5b"}, +] + +[package.dependencies] +cffi = ">=1.0" +numpy = "*" + +[[package]] +name = "soxr" +version = "0.3.7" +description = "High quality, one-dimensional sample-rate conversion library" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "soxr-0.3.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac81c4af6a993d5b7c0b466bbac4835bad2b14ec32f342b2c1f83e4cf825e301"}, + {file = "soxr-0.3.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d8a2b3e7f8d0255e2484fb82cb66c86da6fb25b342ef793cceca9ce9a61aa16"}, + {file = "soxr-0.3.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd6eb6f6bbda2e8de36672cf2f0529ced6e638773150744ef075be0cc4f52c"}, + {file = "soxr-0.3.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e47d86af35b942c92606fc2d5dfccf3f01309329475571ae2312bbf9edc3a790"}, + {file = "soxr-0.3.7-cp310-cp310-win_amd64.whl", hash = "sha256:0e291adfaf9f2a7c4dd180a1b8c280f9beb1c84cb381853e4f4b3434d002ed7f"}, + {file = "soxr-0.3.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e811450f0e91972932bd37ac58e32e44002c2c99db2aa926a9e7ba164545034"}, + {file = "soxr-0.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9cea63014ce91035074e1228c9340e2b8609faf964e268705fcac5135d05060c"}, + {file = "soxr-0.3.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bfab27830f6217a15b83445988225c3aeea3bbccfa9399ced291e53e1b05925d"}, + {file = "soxr-0.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:286858e3078d76c11b6d490b66fed3c9bb2a4229759f6be03ceef5c02189bf2c"}, + {file = "soxr-0.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:54985ff33292192d2937be80df3e5f3a44d6d53e6835f727d6b99b7cdd3f1611"}, + {file = "soxr-0.3.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:83c74ef6d61d7dcd81be26f91bee0a420f792f5c1982266f2a80e655f0650a98"}, + {file = "soxr-0.3.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb1e14663a43fe88b8fbc287822a159028366a820abe1a0a9670fb53618cb47b"}, + {file = "soxr-0.3.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48acdfbcf870ab54f645b1cfd641bce92c1e3a67346c3bf0f6c0ad2873c1dd35"}, + {file = "soxr-0.3.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea663b76f2b0ec1576b8a43aef317aec080abc0a67a4015fcd9f3407039f260a"}, + {file = "soxr-0.3.7-cp312-cp312-win_amd64.whl", hash = "sha256:42da0d9eb79c70e5a41917f1b48a032e241a48eb4a1bcea7c80577302ff26974"}, + {file = "soxr-0.3.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:511c6b2279c8ddd83459d129d69f628f7aae4616ae0a1912963985bd89e35df7"}, + {file = "soxr-0.3.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a37c518c0b5d70162956d808d6c2e249bae0672e414e0dcfc101e200d8c31f3c"}, + {file = "soxr-0.3.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f2890528d2b2e358938ab660a6b8346802863f5b6b646204d7ff8ab0ca2c66"}, + {file = "soxr-0.3.7-cp37-cp37m-win_amd64.whl", hash = "sha256:52467c8c012495544a6dcfcce6b5bcbbc653d24fe9bb33c0b6191acecdb5e297"}, + {file = "soxr-0.3.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ce12b93747958f2769d6b297e6e27c73d9ad635fe8104ef052bece9c8a322824"}, + {file = "soxr-0.3.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cd65dc7b96ea3cb6c8c48e6020e859680556cc42dd3d4de44779530cce21037"}, + {file = "soxr-0.3.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d994f1a7690b1b13ab639ea33e0c1d78415b64d88d6df4af705a9443f97b9687"}, + {file = "soxr-0.3.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e87b58bc9e8c2caa16f07726f666bd043f0a49ca937baa803ce7708003b27833"}, + {file = "soxr-0.3.7-cp38-cp38-win_amd64.whl", hash = "sha256:07f4c0c6125ea1482fa187ad5f007216712ee0a93586a9b2f80e79c0bf944cf7"}, + {file = "soxr-0.3.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e5267c3ba34d4b873d9bbe3a9e58418b01ae4fd04349a4f944d9943b9ddac0f7"}, + {file = "soxr-0.3.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e39668c250e221db888cf3b290a16fbe10a702d9a4eb604a127f720040de583"}, + {file = "soxr-0.3.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ceeb74e5a55d903cc286d3bd12c2d8f8c85d02894071e9ec92ab405430907c"}, + {file = "soxr-0.3.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed6bf58192dd1bb93becd2444de4d712689713d727b32fd55623ae9aae7df7"}, + {file = "soxr-0.3.7-cp39-cp39-win_amd64.whl", hash = "sha256:7221302b4547d02a3f38dd3cd15317ab2b78873c75921db5f4a070848f0c71be"}, + {file = "soxr-0.3.7.tar.gz", hash = "sha256:436ddff00c6eb2c75b79c19cfdca7527b1e31b5fad738652f044045ba6258593"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +docs = ["linkify-it-py", "myst-parser", "sphinx", "sphinx-book-theme"] +test = ["pytest"] + +[[package]] +name = "soxr" +version = "1.0.0" +description = "High quality, one-dimensional sample-rate conversion library" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "soxr-1.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:b876a3156f67c76aef0cff1084eaf4088d9ca584bb569cb993f89a52ec5f399f"}, + {file = "soxr-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4d3b957a7b0cc19ae6aa45d40b2181474e53a8dd00efd7bce6bcf4e60e020892"}, + {file = "soxr-1.0.0-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89685faedebc45af71f08f9957b61cc6143bc94ba43fe38e97067f81e272969"}, + {file = "soxr-1.0.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d255741b2f0084fd02d4a2ddd77cd495be9e7e7b6f9dba1c9494f86afefac65b"}, + {file = "soxr-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:158a4a9055958c4b95ef91dbbe280cabb00946b5423b25a9b0ce31bd9e0a271e"}, + {file = "soxr-1.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:28e19d74a5ef45c0d7000f3c70ec1719e89077379df2a1215058914d9603d2d8"}, + {file = "soxr-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8dc69fc18884e53b72f6141fdf9d80997edbb4fec9dc2942edcb63abbe0d023"}, + {file = "soxr-1.0.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f15450e6f65f22f02fcd4c5a9219c873b1e583a73e232805ff160c759a6b586"}, + {file = "soxr-1.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f73f57452f9df37b4de7a4052789fcbd474a5b28f38bba43278ae4b489d4384"}, + {file = "soxr-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f417c3d69236051cf5a1a7bad7c4bff04eb3d8fcaa24ac1cb06e26c8d48d8dc"}, + {file = "soxr-1.0.0-cp312-abi3-macosx_10_14_x86_64.whl", hash = "sha256:abecf4e39017f3fadb5e051637c272ae5778d838e5c3926a35db36a53e3a607f"}, + {file = "soxr-1.0.0-cp312-abi3-macosx_11_0_arm64.whl", hash = "sha256:e973d487ee46aa8023ca00a139db6e09af053a37a032fe22f9ff0cc2e19c94b4"}, + {file = "soxr-1.0.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e8ce273cca101aff3d8c387db5a5a41001ba76ef1837883438d3c652507a9ccc"}, + {file = "soxr-1.0.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8f2a69686f2856d37823bbb7b78c3d44904f311fe70ba49b893af11d6b6047b"}, + {file = "soxr-1.0.0-cp312-abi3-win_amd64.whl", hash = "sha256:2a3b77b115ae7c478eecdbd060ed4f61beda542dfb70639177ac263aceda42a2"}, + {file = "soxr-1.0.0-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:392a5c70c04eb939c9c176bd6f654dec9a0eaa9ba33d8f1024ed63cf68cdba0a"}, + {file = "soxr-1.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fdc41a1027ba46777186f26a8fba7893be913383414135577522da2fcc684490"}, + {file = "soxr-1.0.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:449acd1dfaf10f0ce6dfd75c7e2ef984890df94008765a6742dafb42061c1a24"}, + {file = "soxr-1.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38b35c99e408b8f440c9376a5e1dd48014857cd977c117bdaa4304865ae0edd0"}, + {file = "soxr-1.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:a39b519acca2364aa726b24a6fd55acf29e4c8909102e0b858c23013c38328e5"}, + {file = "soxr-1.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c120775b7d0ef9e974a5797a4695861e88653f7ecd0a2a532f089bc4452ba130"}, + {file = "soxr-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e59e5f648bd6144e79a6e0596aa486218876293f5ddce3ca84b9d8f8aa34d6d"}, + {file = "soxr-1.0.0-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bb86c342862697dbd4a44043f275e5196f2d2c49dca374c78f19b7893988675d"}, + {file = "soxr-1.0.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3d2a4fadd88207c2991fb08c29fc189e7b2e298b598a94ea1747e42c8acb7a01"}, + {file = "soxr-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c7f5ace8f04f924b21caedeeb69f2a7b3d83d2d436639498c08b2cebe181af14"}, + {file = "soxr-1.0.0.tar.gz", hash = "sha256:e07ee6c1d659bc6957034f4800c60cb8b98de798823e34d2a2bba1caa85a4509"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +docs = ["linkify-it-py", "myst-parser", "sphinx", "sphinx-book-theme"] +test = ["pytest"] + +[[package]] +name = "sympy" +version = "1.13.3" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "sympy" +version = "1.14.0" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"}, + {file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"}, +] + +[package.dependencies] +mpmath = ">=1.1.0,<1.4" + +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "tensorboard" +version = "2.12.3" +description = "TensorBoard lets you watch Tensors Flow" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "tensorboard-2.12.3-py3-none-any.whl", hash = "sha256:b4a69366784bc347e02fbe7d847e01896a649ca52f8948a11005e205dcf724fb"}, +] + +[package.dependencies] +absl-py = ">=0.4" +google-auth = ">=1.6.3,<3" +google-auth-oauthlib = ">=0.5,<1.1" +grpcio = ">=1.48.2" +markdown = ">=2.6.8" +numpy = ">=1.12.0" +protobuf = ">=3.19.6" +requests = ">=2.21.0,<3" +setuptools = ">=41.0.0" +tensorboard-data-server = ">=0.7.0,<0.8.0" +werkzeug = ">=1.0.1" +wheel = ">=0.26" + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +description = "Fast data loading for TensorBoard" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb"}, + {file = "tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60"}, + {file = "tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530"}, +] + +[[package]] +name = "tensorflow" +version = "2.12.0" +description = "TensorFlow is an open source machine learning framework for everyone." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\"" +files = [ + {file = "tensorflow-2.12.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:be4ac0dfcc7a16f6df2bc19bd322e312235ab3f7b0c7297f96c92c44bb14d2a1"}, + {file = "tensorflow-2.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5193ddb3bb5120cb445279beb08ed9e74a85a4eeb2485550d6fb707a89d9a88"}, + {file = "tensorflow-2.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357d9d2851188a8d27ee195345b4d175cad970150d1344ba9d9fcc4bf2b68336"}, + {file = "tensorflow-2.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c8001210df7202ef6267150865b0b79f834c3ca69ee3132277de8eeb994dffde"}, + {file = "tensorflow-2.12.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:91dccda42c03569d8c787190482a11ecae3b9b173aaa9166f0ab20cecc9c31f4"}, + {file = "tensorflow-2.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f81eb8adaeb558963f5d8b47dbfcc398d898f0857bf3de6b6484350236b7b5"}, + {file = "tensorflow-2.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ec4a2934ea19e92f27a9668ece43025ed5efe14b5d19be53b07692bc8a4189d"}, + {file = "tensorflow-2.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e7641e2a6e32f31ff233495478a9cc86b7c038140eab714a61eeddbbbb327c3"}, + {file = "tensorflow-2.12.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:a7194e744c5a7f3e759ecb949527b4a07718a6d1110e6e82fd4ce0c5586a7d4a"}, + {file = "tensorflow-2.12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4afc2dd57435f29ebe249eb5f595d89b0e73be94922eeb7110aa6280a332837c"}, + {file = "tensorflow-2.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23850332f1f9f778d697c9dba63ca52be72cb73363e75ad358f07ddafef63c01"}, + {file = "tensorflow-2.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:e29fcf6cfd069aefb4b44f357cccbb4415a5a3d7b5b516eaf4450062fe40021e"}, + {file = "tensorflow-2.12.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:42fc2635e9420faee781a16bd393126f29cd39aa2b9d02901f24d8497bd6f958"}, + {file = "tensorflow-2.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76414355e420edb9154b4e72113eef5813ccb71701fda959afbbc1eebe3099bd"}, + {file = "tensorflow-2.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:020d6a54cb26020bdc71a7bae8ee35be05096f63e773dc517f6e87c49de62c50"}, + {file = "tensorflow-2.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:9f70a8f9ab46e5ed436850aa60d1cd40645f5c669e14bcad48915dc1f597dda2"}, +] + +[package.dependencies] +absl-py = ">=1.0.0" +astunparse = ">=1.6.0" +flatbuffers = ">=2.0" +gast = ">=0.2.1,<=0.4.0" +google-pasta = ">=0.1.1" +grpcio = ">=1.24.3,<2.0" +h5py = ">=2.9.0" +jax = ">=0.3.15" +keras = ">=2.12.0,<2.13" +libclang = ">=13.0.0" +numpy = ">=1.22,<1.24" +opt-einsum = ">=2.3.2" +packaging = "*" +protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +setuptools = "*" +six = ">=1.12.0" +tensorboard = ">=2.12,<2.13" +tensorflow-estimator = ">=2.12.0,<2.13" +tensorflow-io-gcs-filesystem = {version = ">=0.23.1", markers = "platform_machine != \"arm64\" or platform_system != \"Darwin\""} +termcolor = ">=1.1.0" +typing-extensions = ">=3.6.6" +wrapt = ">=1.11.0,<1.15" + +[[package]] +name = "tensorflow-addons" +version = "0.19.0" +description = "TensorFlow Addons." +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_machine == \"x86_64\"" +files = [ + {file = "tensorflow_addons-0.19.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:ca3764beba54c4ee4bb01a4294f8c2fef5c3814fd0f521dbe8beb4522545cb2d"}, + {file = "tensorflow_addons-0.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f74646fe83fd6f0d84ae5e0186c85cae3dd7e6c2329c8a5db4574c144706f39"}, + {file = "tensorflow_addons-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a46016fe9a1705043e39b7dacee3b089303ecdedbf1b12eb607aa35b7d2471e3"}, + {file = "tensorflow_addons-0.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:eefbdb4e0450b93fba6b393870784dad4c91189e5551e01b268aeb5fe5b04da6"}, + {file = "tensorflow_addons-0.19.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:a297db1af6e682277f593411d4d28b939646c2b67b8351ef0d31a30b9531fb93"}, + {file = "tensorflow_addons-0.19.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b673fe22c4113edabdc0dc1ef919ba0f1fb024ca39a5718ec146285c400e8f"}, + {file = "tensorflow_addons-0.19.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eefb6bf6d7a31d60649d6f6e99aee172ed4f5e693a079acfb264297997de21d0"}, + {file = "tensorflow_addons-0.19.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:d447a3f7852810b7985c890852dbcb6454f3899100d439d5eba370a78d8bd281"}, + {file = "tensorflow_addons-0.19.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:51fefd5f496ada5dafb13c446853fa1ddeb5482a0b9074af14efe0b99903816e"}, + {file = "tensorflow_addons-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c3142149f16e81362cc1d0959686543cb69df79f38a3ea3c5205fbf57b28e"}, + {file = "tensorflow_addons-0.19.0-cp38-cp38-win_amd64.whl", hash = "sha256:c93602cf3b8a7bbe1fbf973b7b9f986892be34ba8b943923f09ae6cd79f0a241"}, + {file = "tensorflow_addons-0.19.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:fc058876dce711009227c47559b05295a5fb480748d6ec5c49386b1dc2c00167"}, + {file = "tensorflow_addons-0.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9334910bb6b599dd627e632a59f35ae9256bda2312b06929066a437076bf4789"}, + {file = "tensorflow_addons-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25b029a917b635162b1f14df0263b2f79deadcd71daecd3161f69ccb1fbcea4"}, + {file = "tensorflow_addons-0.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8f4c3a88b381bd28bba3189a0216749f9e799ae3dc4959651728e01ae20d738"}, +] + +[package.dependencies] +packaging = "*" +typeguard = ">=2.7" + +[package.extras] +tensorflow = ["tensorflow (>=2.9.0,<2.12.0)"] +tensorflow-cpu = ["tensorflow-cpu (>=2.9.0,<2.12.0)"] +tensorflow-gpu = ["tensorflow-gpu (>=2.9.0,<2.12.0)"] + +[[package]] +name = "tensorflow-estimator" +version = "2.12.0" +description = "TensorFlow Estimator." +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "tensorflow_estimator-2.12.0-py2.py3-none-any.whl", hash = "sha256:59b191bead4883822de3d63ac02ace11a83bfe6c10d64d0c4dfde75a50e60ca1"}, +] + +[[package]] +name = "tensorflow-io-gcs-filesystem" +version = "0.34.0" +description = "TensorFlow IO" +optional = false +python-versions = ">=3.7, <3.12" +groups = ["main"] +files = [ + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d831702fbb270996b27cda7fde06e0825b2ea81fd8dd3ead35242f4f8b3889b8"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:b9a93fcb01db269bc845a1ced431f3c61201755ce5f9ec4885760f30122276ef"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5813c336b4f7cb0a01ff4cc6cbd3edf11ef67305baf0e3cf634911b702f493f8"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b035f4c92639657b6d376929d550ac3dee9e6c0523eb434eefe0a27bae3d05b"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:a17a616d2c7fae83de4424404815843507d40d4eb0d507c636a5493a20c3d958"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:ec4604c99cbb5b708f4516dee27aa655abae222b876c98b740f4c2f89dd5c001"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cbe26c4a3332589c7b724f147df453b5c226993aa8d346a15536358d77b364c4"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6353123a5b51397950138a118876af833a7db66b531123bb86f82e80ab0e72"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f211d2b3db8f9931765992b607b71cbfb98c8cd6169079d004a67a94ab10ecb4"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:d3feba2dd76f7c188137c34642d68d378f0eed81636cb95090ecb1496722707c"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:44ad387a812a78e7424bb8bee3820521ae1c044bddf72b1e163e8df95c124a74"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:7f60183473f0ca966451bb1d1bb5dc29b3cf9c74d1d0e7f2ed46760ed56bd4af"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:3f346b287ed2400e09b13cfd8524222fd70a66aadb9164c645286c2087007e9f"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:027a07553367187f918a99661f63ae0506b91b77a70bee9c7ccaf3920bf7cfe7"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d8664bddbe4e7b56ce94db8b93ea9077a158fb5e15364e11e29f93015ceea24"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:182b0fbde7e9a537fda0b354c28b0b6c035736728de8fe2db7ef49cf90352014"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0dafed144673e1173528768fe208a7c5a6e8edae40208381cac420ee7c918ec9"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:396bfff61b49f80b86ddebe0c76ae0f2731689cee49ad7d782625180b50b13af"}, + {file = "tensorflow_io_gcs_filesystem-0.34.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b20622f8572fcb6c93e8f7d626327472f263e47ebd63d2153ef09162ef5ef7b5"}, +] + +[package.extras] +tensorflow = ["tensorflow (>=2.13.0,<2.14.0)"] +tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.13.0,<2.14.0)"] +tensorflow-cpu = ["tensorflow-cpu (>=2.13.0,<2.14.0)"] +tensorflow-gpu = ["tensorflow-gpu (>=2.13.0,<2.14.0)"] +tensorflow-rocm = ["tensorflow-rocm (>=2.13.0,<2.14.0)"] + +[[package]] +name = "tensorflow-macos" +version = "2.12.0" +description = "TensorFlow is an open source machine learning framework for everyone." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"arm64\"" +files = [ + {file = "tensorflow_macos-2.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:db464c88e10e927725997f9b872a21c9d057789d3b7e9a26e4ef1af41d0bcc8c"}, + {file = "tensorflow_macos-2.12.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:172277c33cb1ae0da19f98c5bcd4946149cfa73c8ea05c6ba18365d58dd3c6f2"}, + {file = "tensorflow_macos-2.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:9c9b14fbb73ec4cb0f209722a1489020fd8614c92ae22589f2309c48cefdf21f"}, + {file = "tensorflow_macos-2.12.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:6a54539bd076746f69ae8bef7282f981674fe4dbf59c3a84c4af86ae6bae9d5c"}, + {file = "tensorflow_macos-2.12.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:e3fa53e63672fd71998bbd71cc5478c74dbe5a2d9291d1801c575358c28403c2"}, + {file = "tensorflow_macos-2.12.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:5499312c21ed3ed47cc6b4cf861896e9564c2c32d8d3c2ef1437c5ca31adfc73"}, + {file = "tensorflow_macos-2.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:84cb873c90be63efabfecca53fdc48b734a037d0750532b55cb7ce7c343b5cac"}, + {file = "tensorflow_macos-2.12.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:85d9451a691324490e1d644b1051972e14edc249004eef5831b3510df9e36515"}, +] + +[package.dependencies] +absl-py = ">=1.0.0" +astunparse = ">=1.6.0" +flatbuffers = ">=2.0" +gast = ">=0.2.1,<=0.4.0" +google-pasta = ">=0.1.1" +grpcio = ">=1.24.3,<2.0" +h5py = ">=2.9.0" +jax = ">=0.3.15" +keras = ">=2.12.0,<2.13" +libclang = ">=13.0.0" +numpy = ">=1.22,<1.24" +opt-einsum = ">=2.3.2" +packaging = "*" +protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +setuptools = "*" +six = ">=1.12.0" +tensorboard = ">=2.12,<2.13" +tensorflow-estimator = ">=2.12.0,<2.13" +tensorflow-io-gcs-filesystem = {version = ">=0.23.1", markers = "platform_machine != \"arm64\" or platform_system != \"Darwin\""} +termcolor = ">=1.1.0" +typing-extensions = ">=3.6.6" +wrapt = ">=1.11.0,<1.15" + +[[package]] +name = "termcolor" +version = "2.4.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"}, + {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tf2onnx" +version = "1.16.1" +description = "Tensorflow to ONNX converter" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "tf2onnx-1.16.1-py3-none-any.whl", hash = "sha256:90fb5f62575896d47884d27dc313cfebff36b8783e1094335ad00824ce923a8a"}, +] + +[package.dependencies] +flatbuffers = ">=1.12" +numpy = ">=1.14.1" +onnx = ">=1.4.1" +protobuf = ">=3.20,<4.0" +requests = "*" +six = "*" + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "threadpoolctl" +version = "3.6.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb"}, + {file = "threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e"}, +] + +[[package]] +name = "tokenizers" +version = "0.20.3" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"}, + {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d"}, + {file = "tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f"}, + {file = "tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"}, + {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"}, + {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"}, + {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"}, + {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"}, + {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"}, + {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:9adda1ff5fb9dcdf899ceca672a4e2ce9e797adb512a6467305ca3d8bfcfbdd0"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:6dde2cae6004ba7a3badff4a11911cae03ebf23e97eebfc0e71fef2530e5074f"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4a7fd678b35614fca708579eb95b7587a5e8a6d328171bd2488fd9f27d82be4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b80e3c7283a01a356bd2210f53d1a4a5d32b269c2024389ed0173137708d50e"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8cc0e8176b762973758a77f0d9c4467d310e33165fb74173418ca3734944da4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5634b2e2f5f3d2b4439d2d74066e22eb4b1f04f3fea05cb2a3c12d89b5a3bcd"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4ba635165bc1ea46f2da8e5d80b5f70f6ec42161e38d96dbef33bb39df73964"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e4c7c64172e7789bd8b07aa3087ea87c4c4de7e90937a2aa036b5d92332536"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1f74909ef7675c26d4095a817ec3393d67f3158ca4836c233212e5613ef640c4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9b81321a1e05b16487d312b4264984513f8b4a7556229cafac6e88c2036b09"}, + {file = "tokenizers-0.20.3-cp37-none-win32.whl", hash = "sha256:ab48184cd58b4a03022a2ec75b54c9f600ffea9a733612c02325ed636f353729"}, + {file = "tokenizers-0.20.3-cp37-none-win_amd64.whl", hash = "sha256:60ac483cebee1c12c71878523e768df02fa17e4c54412966cb3ac862c91b36c1"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3229ef103c89583d10b9378afa5d601b91e6337530a0988e17ca8d635329a996"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac52cc24bad3de865c7e65b1c4e7b70d00938a8ae09a92a453b8f676e714ad5"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04627b7b502fa6a2a005e1bd446fa4247d89abcb1afaa1b81eb90e21aba9a60f"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c27ceb887f0e81a3c377eb4605dca7a95a81262761c0fba308d627b2abb98f2b"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65ab780194da4e1fcf5670523a2f377c4838ebf5249efe41fa1eddd2a84fb49d"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d343134f47159e81f7f242264b0eb222e6b802f37173c8d7d7b64d5c9d1388"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2475bb004ab2009d29aff13b5047bfdb3d4b474f0aa9d4faa13a7f34dbbbb43"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b6583a65c01db1197c1eb36857ceba8ec329d53afadd268b42a6b04f4965724"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d00ba208358c037eeab7bfc00a905adc67b2d31b68ab40ed09d75881e114ea"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7a39e5bedc817bda395a798dfe2d9c5f7c71153c90d381b5135a0328d9520"}, + {file = "tokenizers-0.20.3-cp38-none-win32.whl", hash = "sha256:84d40ee0f8550d64d3ea92dd7d24a8557a9172165bdb986c9fb2503b4fe4e3b6"}, + {file = "tokenizers-0.20.3-cp38-none-win_amd64.whl", hash = "sha256:205a45246ed7f1718cf3785cff88450ba603352412aaf220ace026384aa3f1c0"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921"}, + {file = "tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064"}, + {file = "tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f3558a7ae6a6d38a77dfce12172a1e2e1bf3e8871e744a1861cd7591ea9ebe24"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d53029fe44bc70c3ff14ef512460a0cf583495a0f8e2f4b70e26eb9438e38a9"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a2a56397b2bec5a629b516b23f0f8a3e4f978c7488d4a299980f8375954b85"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e5bfaae740ef9ece000f8a07e78ac0e2b085c5ce9648f8593ddf0243c9f76d"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fbaf3ea28fedfb2283da60e710aff25492e795a7397cad8a50f1e079b65a5a70"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c47c037116310dc976eb96b008e41b9cfaba002ed8005848d4d632ee0b7ba9ae"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c31751f0721f58f5e19bb27c1acc259aeff860d8629c4e1a900b26a1979ada8e"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c697cbd3be7a79ea250ea5f380d6f12e534c543cfb137d5c734966b3ee4f34cc"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48971b88ef9130bf35b41b35fd857c3c4dae4a9cd7990ebc7fc03e59cc92438"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e615de179bbe060ab33773f0d98a8a8572b5883dd7dac66c1de8c056c7e748c"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da1ec842035ed9999c62e45fbe0ff14b7e8a7e02bb97688cc6313cf65e5cd755"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6ee4954c1dd23aadc27958dad759006e71659d497dcb0ef0c7c87ea992c16ebd"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3eda46ca402751ec82553a321bf35a617b76bbed7586e768c02ccacbdda94d6d"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb"}, + {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + +[[package]] +name = "tokenizers" +version = "0.22.1" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73"}, + {file = "tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f"}, + {file = "tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4"}, + {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879"}, + {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446"}, + {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a"}, + {file = "tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390"}, + {file = "tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82"}, + {file = "tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138"}, + {file = "tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<2.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"] + +[[package]] +name = "tomli" +version = "2.3.0" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45"}, + {file = "tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf"}, + {file = "tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845"}, + {file = "tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c"}, + {file = "tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456"}, + {file = "tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac"}, + {file = "tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f"}, + {file = "tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8"}, + {file = "tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6"}, + {file = "tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876"}, + {file = "tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b"}, + {file = "tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b"}, + {file = "tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f"}, + {file = "tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05"}, + {file = "tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606"}, + {file = "tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e"}, + {file = "tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc"}, + {file = "tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879"}, + {file = "tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005"}, + {file = "tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463"}, + {file = "tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77"}, + {file = "tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530"}, + {file = "tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67"}, + {file = "tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f"}, + {file = "tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0"}, + {file = "tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba"}, + {file = "tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b"}, + {file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"}, +] + +[[package]] +name = "torch" +version = "1.12.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.7.0" +groups = ["dev"] +files = [ + {file = "torch-1.12.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:9c038662db894a23e49e385df13d47b2a777ffd56d9bcd5b832593fab0a7e286"}, + {file = "torch-1.12.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:4e1b9c14cf13fd2ab8d769529050629a0e68a6fc5cb8e84b4a3cc1dd8c4fe541"}, + {file = "torch-1.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:e9c8f4a311ac29fc7e8e955cfb7733deb5dbe1bdaabf5d4af2765695824b7e0d"}, + {file = "torch-1.12.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:976c3f997cea38ee91a0dd3c3a42322785414748d1761ef926b789dfa97c6134"}, + {file = "torch-1.12.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:68104e4715a55c4bb29a85c6a8d57d820e0757da363be1ba680fa8cc5be17b52"}, + {file = "torch-1.12.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:743784ccea0dc8f2a3fe6a536bec8c4763bd82c1352f314937cb4008d4805de1"}, + {file = "torch-1.12.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b5dbcca369800ce99ba7ae6dee3466607a66958afca3b740690d88168752abcf"}, + {file = "torch-1.12.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f3b52a634e62821e747e872084ab32fbcb01b7fa7dbb7471b6218279f02a178a"}, + {file = "torch-1.12.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:8a34a2fbbaa07c921e1b203f59d3d6e00ed379f2b384445773bd14e328a5b6c8"}, + {file = "torch-1.12.1-cp37-none-macosx_11_0_arm64.whl", hash = "sha256:42f639501928caabb9d1d55ddd17f07cd694de146686c24489ab8c615c2871f2"}, + {file = "torch-1.12.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0b44601ec56f7dd44ad8afc00846051162ef9c26a8579dda0a02194327f2d55e"}, + {file = "torch-1.12.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:cd26d8c5640c3a28c526d41ccdca14cf1cbca0d0f2e14e8263a7ac17194ab1d2"}, + {file = "torch-1.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:42e115dab26f60c29e298559dbec88444175528b729ae994ec4c65d56fe267dd"}, + {file = "torch-1.12.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:a8320ba9ad87e80ca5a6a016e46ada4d1ba0c54626e135d99b2129a4541c509d"}, + {file = "torch-1.12.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:03e31c37711db2cd201e02de5826de875529e45a55631d317aadce2f1ed45aa8"}, + {file = "torch-1.12.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:9b356aea223772cd754edb4d9ecf2a025909b8615a7668ac7d5130f86e7ec421"}, + {file = "torch-1.12.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:6cf6f54b43c0c30335428195589bd00e764a6d27f3b9ba637aaa8c11aaf93073"}, + {file = "torch-1.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:f00c721f489089dc6364a01fd84906348fe02243d0af737f944fddb36003400d"}, + {file = "torch-1.12.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:bfec2843daa654f04fda23ba823af03e7b6f7650a873cdb726752d0e3718dada"}, + {file = "torch-1.12.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:69fe2cae7c39ccadd65a123793d30e0db881f1c1927945519c5c17323131437e"}, +] + +[package.dependencies] +typing-extensions = "*" + +[[package]] +name = "torchvision" +version = "0.12.0" +description = "image and video datasets and models for torch deep learning" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "torchvision-0.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:693656e6790b6ab21e4a6e87e81c2982bad9e455b5eb24e14bb672382ec6130f"}, + {file = "torchvision-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0be4501ca0ba1b195644c9243f49a1c49a26e52a7f37924c4239d0bf5ecbd8d"}, + {file = "torchvision-0.12.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:ebfb47adf65bf3926b990b2c4767e291f135e259e03232e0e1a30ecdb05eb087"}, + {file = "torchvision-0.12.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9771231639afb5973cdaea1d449b451e2982e1ef5410ca67bbdc2b465565573a"}, + {file = "torchvision-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:894dacdc64b6e35e3f330722db51c76f4de016c7bf7bd79cf02ed2f4c106e625"}, + {file = "torchvision-0.12.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:36dfdf6451fe3072ab15118982853b848896c0fd3b26cb8135e1e7981dbb0916"}, + {file = "torchvision-0.12.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:aac76d52c5ce4229cb0eaebb762f3391fa736565eb35a4184fa0f7be30b705cd"}, + {file = "torchvision-0.12.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:926666f0b893dce6619759c19b0dd3884af7a9d7022b10395653659d28e43c48"}, + {file = "torchvision-0.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c225f55c1bfce027a03f4ca46ddb9559c83f8087c2880bed3261a76c49bb7996"}, + {file = "torchvision-0.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d1ccb53836ba886320dcda12d00ee8b5f8f38b6c36d7906f141d25778cf74104"}, + {file = "torchvision-0.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9f42420f7f0b29cd3d61776df3157827257a0cf16b2c02776dc16c96abb1256d"}, + {file = "torchvision-0.12.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9017248c7e526c8cdcaaab8cf41d904a520a409d707398189a06d0757901d235"}, + {file = "torchvision-0.12.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0744902f2265d4c3e83c44a06b567df312e4a9faf8c92620016c7bed7056b5a7"}, + {file = "torchvision-0.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:a91db01496932350bf9c0ee8607ac8ef31c3ebfdaedefe5c5cda0515317f8b8e"}, + {file = "torchvision-0.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24d03fcaa28004c64a24124ac4a894c50f5948c8eb290e398d6c76fff2bc678f"}, + {file = "torchvision-0.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69d82f47b67bad6ddcbb87833ba5950a6c271ba97baae4c0955610071bf034f5"}, + {file = "torchvision-0.12.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:49ed7886b93b80c9733462edd06a07f8d4c6ea4d5bd2894e7268f7a3774f4f7d"}, + {file = "torchvision-0.12.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b93a767f44e3933cb3b01a6fe9727db54590f57b7dac09d5aaf15966c6c151dd"}, + {file = "torchvision-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:edab05f7ba9f648c00435b384ffdbd7bde79a3b8ea893813fb50f6ccf28b1e76"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" +requests = "*" +torch = "*" +typing-extensions = "*" + +[package.extras] +scipy = ["scipy"] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "transformers" +version = "4.46.3" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef"}, + {file = "transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.23.2,<1.0" +numpy = ">=1.17" +onnxconverter-common = {version = "*", optional = true, markers = "extra == \"onnx\""} +onnxruntime = {version = ">=1.4.0", optional = true, markers = "extra == \"onnx\""} +onnxruntime-tools = {version = ">=1.4.2", optional = true, markers = "extra == \"onnx\""} +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.1" +tf2onnx = {version = "*", optional = true, markers = "extra == \"onnx\""} +tokenizers = ">=0.20,<0.21" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.26.0)"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +benchmark = ["optimum-benchmark (>=0.3.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "librosa", "nltk (<=3.8.1)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.20,<0.21)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "libcst", "librosa", "nltk (<=3.8.1)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.20,<0.21)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "libcst", "rich", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +ruff = ["ruff (==0.5.1)"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk (<=3.8.1)", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +tiktoken = ["blobfile", "tiktoken"] +timm = ["timm (<=0.9.16)"] +tokenizers = ["tokenizers (>=0.20,<0.21)"] +torch = ["accelerate (>=0.26.0)", "torch"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.20,<0.21)", "torch", "tqdm (>=4.27)"] +video = ["av (==9.2.0)"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "transformers" +version = "4.57.1" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = false +python-versions = ">=3.9.0" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "transformers-4.57.1-py3-none-any.whl", hash = "sha256:b10d05da8fa67dc41644dbbf9bc45a44cb86ae33da6f9295f5fbf5b7890bd267"}, + {file = "transformers-4.57.1.tar.gz", hash = "sha256:f06c837959196c75039809636cd964b959f6604b75b8eeec6fdfc0440b89cc55"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.34.0,<1.0" +numpy = ">=1.17" +onnxconverter-common = {version = "*", optional = true, markers = "extra == \"onnx\""} +onnxruntime = {version = ">=1.4.0", optional = true, markers = "extra == \"onnx\""} +onnxruntime-tools = {version = ">=1.4.2", optional = true, markers = "extra == \"onnx\""} +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.4.3" +tf2onnx = {version = "*", optional = true, markers = "extra == \"onnx\""} +tokenizers = ">=0.22.0,<=0.23.0" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.26.0)"] +all = ["Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "accelerate (>=0.26.0)", "av", "codecarbon (>=2.8.1)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "jinja2 (>=3.1.0)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "kernels (>=0.6.1,<=0.9)", "librosa", "mistral-common[opencv] (>=1.6.3)", "num2words", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (!=1.0.18,<=1.0.19)", "tokenizers (>=0.22.0,<=0.23.0)", "torch (>=2.2)", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +benchmark = ["optimum-benchmark (>=0.3.0)"] +chat-template = ["jinja2 (>=3.1.0)"] +codecarbon = ["codecarbon (>=2.8.1)"] +deepspeed = ["accelerate (>=0.26.0)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fastapi", "libcst", "mistral-common[opencv] (>=1.6.3)", "nltk (<=3.8.1)", "openai (>=1.98.0)", "optuna", "parameterized (>=0.9)", "protobuf", "psutil", "pydantic (>=2)", "pydantic (>=2)", "pytest (>=7.2.0)", "pytest-asyncio", "pytest-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.13.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "tensorboard", "timeout-decorator", "torch (>=2.2)", "uvicorn"] +dev = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "accelerate (>=0.26.0)", "accelerate (>=0.26.0)", "av", "beautifulsoup4", "codecarbon (>=2.8.1)", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fastapi", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "jinja2 (>=3.1.0)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "kernels (>=0.6.1,<=0.9)", "libcst", "libcst", "librosa", "mistral-common[opencv] (>=1.6.3)", "mistral-common[opencv] (>=1.6.3)", "nltk (<=3.8.1)", "num2words", "onnxconverter-common", "openai (>=1.98.0)", "optax (>=0.0.8,<=0.1.4)", "optuna", "pandas (<2.3.0)", "parameterized (>=0.9)", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (>=2)", "pydantic (>=2)", "pytest (>=7.2.0)", "pytest-asyncio", "pytest-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.13.1)", "ruff (==0.13.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (!=1.0.18,<=1.0.19)", "tokenizers (>=0.22.0,<=0.23.0)", "torch (>=2.2)", "torch (>=2.2)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)", "urllib3 (<2.0.0)", "uvicorn"] +dev-tensorflow = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fastapi", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "libcst", "libcst", "librosa", "mistral-common[opencv] (>=1.6.3)", "nltk (<=3.8.1)", "onnxconverter-common", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "openai (>=1.98.0)", "pandas (<2.3.0)", "parameterized (>=0.9)", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (>=2)", "pydantic (>=2)", "pytest (>=7.2.0)", "pytest-asyncio", "pytest-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.13.1)", "ruff (==0.13.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "tf2onnx", "timeout-decorator", "tokenizers (>=0.22.0,<=0.23.0)", "torch (>=2.2)", "urllib3 (<2.0.0)", "uvicorn"] +dev-torch = ["GitPython (<3.1.19)", "GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.26.0)", "accelerate (>=0.26.0)", "beautifulsoup4", "codecarbon (>=2.8.1)", "cookiecutter (==1.7.3)", "cookiecutter (==1.7.3)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fastapi", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "kenlm", "kernels (>=0.6.1,<=0.9)", "libcst", "libcst", "librosa", "mistral-common[opencv] (>=1.6.3)", "nltk (<=3.8.1)", "num2words", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "openai (>=1.98.0)", "optuna", "pandas (<2.3.0)", "parameterized (>=0.9)", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (>=2)", "pydantic (>=2)", "pytest (>=7.2.0)", "pytest-asyncio", "pytest-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rich", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.13.1)", "ruff (==0.13.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (!=1.0.18,<=1.0.19)", "tokenizers (>=0.22.0,<=0.23.0)", "torch (>=2.2)", "torch (>=2.2)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)", "urllib3 (<2.0.0)", "uvicorn"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +hf-xet = ["hf_xet"] +hub-kernels = ["kernels (>=0.6.1,<=0.9)"] +integrations = ["kernels (>=0.6.1,<=0.9)", "optuna", "ray[tune] (>=2.7.0)"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict_core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic_lite (>=1.0.7)"] +mistral-common = ["mistral-common[opencv] (>=1.6.3)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6,<0.15.0)"] +num2words = ["num2words"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +open-telemetry = ["opentelemetry-api", "opentelemetry-exporter-otlp", "opentelemetry-sdk"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "datasets (>=2.15.0)", "libcst", "pandas (<2.3.0)", "rich", "ruff (==0.13.1)", "urllib3 (<2.0.0)"] +ray = ["ray[tune] (>=2.7.0)"] +retrieval = ["datasets (>=2.15.0)", "faiss-cpu"] +ruff = ["ruff (==0.13.1)"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["accelerate (>=0.26.0)", "fastapi", "openai (>=1.98.0)", "pydantic (>=2)", "starlette", "torch (>=2.2)", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "accelerate (>=0.26.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (>=2.15.0)", "datasets (>=2.15.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fastapi", "libcst", "mistral-common[opencv] (>=1.6.3)", "nltk (<=3.8.1)", "openai (>=1.98.0)", "parameterized (>=0.9)", "psutil", "pydantic (>=2)", "pydantic (>=2)", "pytest (>=7.2.0)", "pytest-asyncio", "pytest-order", "pytest-rerunfailures (<16.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.13.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "starlette", "tensorboard", "timeout-decorator", "torch (>=2.2)", "uvicorn"] +tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +tiktoken = ["blobfile", "tiktoken"] +timm = ["timm (!=1.0.18,<=1.0.19)"] +tokenizers = ["tokenizers (>=0.22.0,<=0.23.0)"] +torch = ["accelerate (>=0.26.0)", "torch (>=2.2)"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.34.0,<1.0)", "importlib_metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.22.0,<=0.23.0)", "torch (>=2.2)", "tqdm (>=4.27)"] +video = ["av"] +vision = ["Pillow (>=10.0.1,<=15.0)"] + +[[package]] +name = "typeguard" +version = "4.4.0" +description = "Run-time type checker for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\"" +files = [ + {file = "typeguard-4.4.0-py3-none-any.whl", hash = "sha256:8ca34c14043f53b2caae7040549ba431770869bcd6287cfa8239db7ecb882b4a"}, + {file = "typeguard-4.4.0.tar.gz", hash = "sha256:463bd8697a65a4aa576a63767c369b1ecfba8a5ba735edfe3223127b6ecfa28c"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} +typing-extensions = ">=4.10.0" + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.3.0)"] +test = ["coverage[toml] (>=7)", "mypy (>=1.2.0) ; platform_python_implementation != \"PyPy\"", "pytest (>=7)"] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "tzdata" +version = "2025.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +groups = ["dev"] +files = [ + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, +] + +[[package]] +name = "urllib3" +version = "1.26.20" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, + {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, +] + +[package.extras] +brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "werkzeug" +version = "3.0.6" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, + {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wheel" +version = "0.45.1" +description = "A built-package format for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248"}, + {file = "wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729"}, +] + +[package.extras] +test = ["pytest (>=6.0.0)", "setuptools (>=65)"] + +[[package]] +name = "wrapt" +version = "1.14.1" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +groups = ["main"] +markers = "platform_machine == \"x86_64\" or platform_machine == \"arm64\"" +files = [ + {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, + {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, + {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, + {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, + {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, + {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, + {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, + {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, + {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, + {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, + {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, + {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, + {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, + {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, + {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, + {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, + {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, + {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, +] + +[[package]] +name = "xmltodict" +version = "0.15.0" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "xmltodict-0.15.0-py2.py3-none-any.whl", hash = "sha256:8887783bf1faba1754fc45fdf3fe03fbb3629c811ae57f91c018aace4c58d4ed"}, + {file = "xmltodict-0.15.0.tar.gz", hash = "sha256:c6d46b4e3413d1e4fc3e5016f0f1c7a5c10f8ce39efaa0cb099af986ecfc9a53"}, +] + +[[package]] +name = "xmltodict" +version = "1.0.2" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d"}, + {file = "xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649"}, +] + +[package.extras] +test = ["pytest", "pytest-cov"] + +[[package]] +name = "xxhash" +version = "3.6.0" +description = "Python binding for xxHash" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71"}, + {file = "xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b"}, + {file = "xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b"}, + {file = "xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb"}, + {file = "xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3"}, + {file = "xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd"}, + {file = "xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef"}, + {file = "xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae"}, + {file = "xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb"}, + {file = "xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c"}, + {file = "xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd"}, + {file = "xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799"}, + {file = "xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392"}, + {file = "xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033"}, + {file = "xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5"}, + {file = "xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f"}, + {file = "xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad"}, + {file = "xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518"}, + {file = "xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dac94fad14a3d1c92affb661021e1d5cbcf3876be5f5b4d90730775ccb7ac41"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6965e0e90f1f0e6cb78da568c13d4a348eeb7f40acfd6d43690a666a459458b8"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2ab89a6b80f22214b43d98693c30da66af910c04f9858dd39c8e570749593d7e"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4903530e866b7a9c1eadfd3fa2fbe1b97d3aed4739a80abf506eb9318561c850"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4da8168ae52c01ac64c511d6f4a709479da8b7a4a1d7621ed51652f93747dffa"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:97460eec202017f719e839a0d3551fbc0b2fcc9c6c6ffaa5af85bbd5de432788"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45aae0c9df92e7fa46fbb738737324a563c727990755ec1965a6a339ea10a1df"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d50101e57aad86f4344ca9b32d091a2135a9d0a4396f19133426c88025b09f1"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9085e798c163ce310d91f8aa6b325dda3c2944c93c6ce1edb314030d4167cc65"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a87f271a33fad0e5bf3be282be55d78df3a45ae457950deb5241998790326f87"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:9e040d3e762f84500961791fa3709ffa4784d4dcd7690afc655c095e02fff05f"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b0359391c3dad6de872fefb0cf5b69d55b0655c55ee78b1bb7a568979b2ce96b"}, + {file = "xxhash-3.6.0-cp38-cp38-win32.whl", hash = "sha256:e4ff728a2894e7f436b9e94c667b0f426b9c74b71f900cf37d5468c6b5da0536"}, + {file = "xxhash-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:01be0c5b500c5362871fc9cfdf58c69b3e5c4f531a82229ddb9eb1eb14138004"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cc604dc06027dbeb8281aeac5899c35fcfe7c77b25212833709f0bff4ce74d2a"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:277175a73900ad43a8caeb8b99b9604f21fe8d7c842f2f9061a364a7e220ddb7"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cfbc5b91397c8c2972fdac13fb3e4ed2f7f8ccac85cd2c644887557780a9b6e2"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2762bfff264c4e73c0e507274b40634ff465e025f0eaf050897e88ec8367575d"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2f171a900d59d51511209f7476933c34a0c2c711078d3c80e74e0fe4f38680ec"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:780b90c313348f030b811efc37b0fa1431163cb8db8064cf88a7936b6ce5f222"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b242455eccdfcd1fa4134c431a30737d2b4f045770f8fe84356b3469d4b919"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a75ffc1bd5def584129774c158e108e5d768e10b75813f2b32650bb041066ed6"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1fc1ed882d1e8df932a66e2999429ba6cc4d5172914c904ab193381fba825360"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:44e342e8cc11b4e79dae5c57f2fb6360c3c20cc57d32049af8f567f5b4bcb5f4"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c2f9ccd5c4be370939a2e17602fbc49995299203da72a3429db013d44d590e86"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02ea4cb627c76f48cd9fb37cf7ab22bd51e57e1b519807234b473faebe526796"}, + {file = "xxhash-3.6.0-cp39-cp39-win32.whl", hash = "sha256:6551880383f0e6971dc23e512c9ccc986147ce7bfa1cd2e4b520b876c53e9f3d"}, + {file = "xxhash-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7c35c4cdc65f2a29f34425c446f2f5cdcd0e3c34158931e1cc927ece925ab802"}, + {file = "xxhash-3.6.0-cp39-cp39-win_arm64.whl", hash = "sha256:ffc578717a347baf25be8397cb10d2528802d24f94cfc005c0e44fef44b5cdd6"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d"}, + {file = "xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6"}, +] + +[[package]] +name = "yacs" +version = "0.1.8" +description = "Yet Another Configuration System" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "yacs-0.1.8-py2-none-any.whl", hash = "sha256:d43d1854c1ffc4634c5b349d1c1120f86f05c3a294c9d141134f282961ab5d94"}, + {file = "yacs-0.1.8-py3-none-any.whl", hash = "sha256:99f893e30497a4b66842821bac316386f7bd5c4f47ad35c9073ef089aa33af32"}, + {file = "yacs-0.1.8.tar.gz", hash = "sha256:efc4c732942b3103bea904ee89af98bcd27d01f0ac12d8d4d369f1e7a2914384"}, +] + +[package.dependencies] +PyYAML = "*" + +[[package]] +name = "yarl" +version = "1.15.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.10\"" +files = [ + {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"}, + {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"}, + {file = "yarl-1.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43ebdcc120e2ca679dba01a779333a8ea76b50547b55e812b8b92818d604662c"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3433da95b51a75692dcf6cc8117a31410447c75a9a8187888f02ad45c0a86c50"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38d0124fa992dbacd0c48b1b755d3ee0a9f924f427f95b0ef376556a24debf01"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ded1b1803151dd0f20a8945508786d57c2f97a50289b16f2629f85433e546d47"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace4cad790f3bf872c082366c9edd7f8f8f77afe3992b134cfc810332206884f"}, + {file = "yarl-1.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c77494a2f2282d9bbbbcab7c227a4d1b4bb829875c96251f66fb5f3bae4fb053"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b7f227ca6db5a9fda0a2b935a2ea34a7267589ffc63c8045f0e4edb8d8dcf956"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:31561a5b4d8dbef1559b3600b045607cf804bae040f64b5f5bca77da38084a8a"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3e52474256a7db9dcf3c5f4ca0b300fdea6c21cca0148c8891d03a025649d935"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0e1af74a9529a1137c67c887ed9cde62cff53aa4d84a3adbec329f9ec47a3936"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:15c87339490100c63472a76d87fe7097a0835c705eb5ae79fd96e343473629ed"}, + {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:74abb8709ea54cc483c4fb57fb17bb66f8e0f04438cff6ded322074dbd17c7ec"}, + {file = "yarl-1.15.2-cp310-cp310-win32.whl", hash = "sha256:ffd591e22b22f9cb48e472529db6a47203c41c2c5911ff0a52e85723196c0d75"}, + {file = "yarl-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:1695497bb2a02a6de60064c9f077a4ae9c25c73624e0d43e3aa9d16d983073c2"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9fcda20b2de7042cc35cf911702fa3d8311bd40055a14446c1e62403684afdc5"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0545de8c688fbbf3088f9e8b801157923be4bf8e7b03e97c2ecd4dfa39e48e0e"}, + {file = "yarl-1.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fbda058a9a68bec347962595f50546a8a4a34fd7b0654a7b9697917dc2bf810d"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ac2bc069f4a458634c26b101c2341b18da85cb96afe0015990507efec2e417"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd126498171f752dd85737ab1544329a4520c53eed3997f9b08aefbafb1cc53b"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3db817b4e95eb05c362e3b45dafe7144b18603e1211f4a5b36eb9522ecc62bcf"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:076b1ed2ac819933895b1a000904f62d615fe4533a5cf3e052ff9a1da560575c"}, + {file = "yarl-1.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8cfd847e6b9ecf9f2f2531c8427035f291ec286c0a4944b0a9fce58c6446046"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32b66be100ac5739065496c74c4b7f3015cef792c3174982809274d7e51b3e04"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:34a2d76a1984cac04ff8b1bfc939ec9dc0914821264d4a9c8fd0ed6aa8d4cfd2"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0afad2cd484908f472c8fe2e8ef499facee54a0a6978be0e0cff67b1254fd747"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c68e820879ff39992c7f148113b46efcd6ec765a4865581f2902b3c43a5f4bbb"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:98f68df80ec6ca3015186b2677c208c096d646ef37bbf8b49764ab4a38183931"}, + {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c56ec1eacd0a5d35b8a29f468659c47f4fe61b2cab948ca756c39b7617f0aa5"}, + {file = "yarl-1.15.2-cp311-cp311-win32.whl", hash = "sha256:eedc3f247ee7b3808ea07205f3e7d7879bc19ad3e6222195cd5fbf9988853e4d"}, + {file = "yarl-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:0ccaa1bc98751fbfcf53dc8dfdb90d96e98838010fc254180dd6707a6e8bb179"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82d5161e8cb8f36ec778fd7ac4d740415d84030f5b9ef8fe4da54784a1f46c94"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fa2bea05ff0a8fb4d8124498e00e02398f06d23cdadd0fe027d84a3f7afde31e"}, + {file = "yarl-1.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99e12d2bf587b44deb74e0d6170fec37adb489964dbca656ec41a7cd8f2ff178"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:243fbbbf003754fe41b5bdf10ce1e7f80bcc70732b5b54222c124d6b4c2ab31c"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:856b7f1a7b98a8c31823285786bd566cf06226ac4f38b3ef462f593c608a9bd6"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:553dad9af802a9ad1a6525e7528152a015b85fb8dbf764ebfc755c695f488367"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30c3ff305f6e06650a761c4393666f77384f1cc6c5c0251965d6bfa5fbc88f7f"}, + {file = "yarl-1.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:353665775be69bbfc6d54c8d134bfc533e332149faeddd631b0bc79df0897f46"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f4fe99ce44128c71233d0d72152db31ca119711dfc5f2c82385ad611d8d7f897"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9c1e3ff4b89cdd2e1a24c214f141e848b9e0451f08d7d4963cb4108d4d798f1f"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:711bdfae4e699a6d4f371137cbe9e740dc958530cb920eb6f43ff9551e17cfbc"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4388c72174868884f76affcdd3656544c426407e0043c89b684d22fb265e04a5"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f0e1844ad47c7bd5d6fa784f1d4accc5f4168b48999303a868fe0f8597bde715"}, + {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5cafb02cf097a82d74403f7e0b6b9df3ffbfe8edf9415ea816314711764a27b"}, + {file = "yarl-1.15.2-cp312-cp312-win32.whl", hash = "sha256:156ececdf636143f508770bf8a3a0498de64da5abd890c7dbb42ca9e3b6c05b8"}, + {file = "yarl-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:435aca062444a7f0c884861d2e3ea79883bd1cd19d0a381928b69ae1b85bc51d"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:416f2e3beaeae81e2f7a45dc711258be5bdc79c940a9a270b266c0bec038fb84"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:173563f3696124372831007e3d4b9821746964a95968628f7075d9231ac6bb33"}, + {file = "yarl-1.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ce2e0f6123a60bd1a7f5ae3b2c49b240c12c132847f17aa990b841a417598a2"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaea112aed589131f73d50d570a6864728bd7c0c66ef6c9154ed7b59f24da611"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ca3b9f370f218cc2a0309542cab8d0acdfd66667e7c37d04d617012485f904"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23ec1d3c31882b2a8a69c801ef58ebf7bae2553211ebbddf04235be275a38548"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75119badf45f7183e10e348edff5a76a94dc19ba9287d94001ff05e81475967b"}, + {file = "yarl-1.15.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e6fdc976ec966b99e4daa3812fac0274cc28cd2b24b0d92462e2e5ef90d368"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8657d3f37f781d987037f9cc20bbc8b40425fa14380c87da0cb8dfce7c92d0fb"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:93bed8a8084544c6efe8856c362af08a23e959340c87a95687fdbe9c9f280c8b"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:69d5856d526802cbda768d3e6246cd0d77450fa2a4bc2ea0ea14f0d972c2894b"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ccad2800dfdff34392448c4bf834be124f10a5bc102f254521d931c1c53c455a"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a880372e2e5dbb9258a4e8ff43f13888039abb9dd6d515f28611c54361bc5644"}, + {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c998d0558805860503bc3a595994895ca0f7835e00668dadc673bbf7f5fbfcbe"}, + {file = "yarl-1.15.2-cp313-cp313-win32.whl", hash = "sha256:533a28754e7f7439f217550a497bb026c54072dbe16402b183fdbca2431935a9"}, + {file = "yarl-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:5838f2b79dc8f96fdc44077c9e4e2e33d7089b10788464609df788eb97d03aad"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fbbb63bed5fcd70cd3dd23a087cd78e4675fb5a2963b8af53f945cbbca79ae16"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2e93b88ecc8f74074012e18d679fb2e9c746f2a56f79cd5e2b1afcf2a8a786b"}, + {file = "yarl-1.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af8ff8d7dc07ce873f643de6dfbcd45dc3db2c87462e5c387267197f59e6d776"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66f629632220a4e7858b58e4857927dd01a850a4cef2fb4044c8662787165cf7"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:833547179c31f9bec39b49601d282d6f0ea1633620701288934c5f66d88c3e50"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa738e0282be54eede1e3f36b81f1e46aee7ec7602aa563e81e0e8d7b67963f"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a13a07532e8e1c4a5a3afff0ca4553da23409fad65def1b71186fb867eeae8d"}, + {file = "yarl-1.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c45817e3e6972109d1a2c65091504a537e257bc3c885b4e78a95baa96df6a3f8"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:670eb11325ed3a6209339974b276811867defe52f4188fe18dc49855774fa9cf"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:d417a4f6943112fae3924bae2af7112562285848d9bcee737fc4ff7cbd450e6c"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bc8936d06cd53fddd4892677d65e98af514c8d78c79864f418bbf78a4a2edde4"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:954dde77c404084c2544e572f342aef384240b3e434e06cecc71597e95fd1ce7"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5bc0df728e4def5e15a754521e8882ba5a5121bd6b5a3a0ff7efda5d6558ab3d"}, + {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b71862a652f50babab4a43a487f157d26b464b1dedbcc0afda02fd64f3809d04"}, + {file = "yarl-1.15.2-cp38-cp38-win32.whl", hash = "sha256:63eab904f8630aed5a68f2d0aeab565dcfc595dc1bf0b91b71d9ddd43dea3aea"}, + {file = "yarl-1.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:2cf441c4b6e538ba0d2591574f95d3fdd33f1efafa864faa077d9636ecc0c4e9"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a32d58f4b521bb98b2c0aa9da407f8bd57ca81f34362bcb090e4a79e9924fefc"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:766dcc00b943c089349d4060b935c76281f6be225e39994c2ccec3a2a36ad627"}, + {file = "yarl-1.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bed1b5dbf90bad3bfc19439258c97873eab453c71d8b6869c136346acfe497e7"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed20a4bdc635f36cb19e630bfc644181dd075839b6fc84cac51c0f381ac472e2"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d538df442c0d9665664ab6dd5fccd0110fa3b364914f9c85b3ef9b7b2e157980"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c6cf1d92edf936ceedc7afa61b07e9d78a27b15244aa46bbcd534c7458ee1b"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce44217ad99ffad8027d2fde0269ae368c86db66ea0571c62a000798d69401fb"}, + {file = "yarl-1.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47a6000a7e833ebfe5886b56a31cb2ff12120b1efd4578a6fcc38df16cc77bd"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e52f77a0cd246086afde8815039f3e16f8d2be51786c0a39b57104c563c5cbb0"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:f9ca0e6ce7774dc7830dc0cc4bb6b3eec769db667f230e7c770a628c1aa5681b"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:136f9db0f53c0206db38b8cd0c985c78ded5fd596c9a86ce5c0b92afb91c3a19"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:173866d9f7409c0fb514cf6e78952e65816600cb888c68b37b41147349fe0057"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6e840553c9c494a35e449a987ca2c4f8372668ee954a03a9a9685075228e5036"}, + {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:458c0c65802d816a6b955cf3603186de79e8fdb46d4f19abaec4ef0a906f50a7"}, + {file = "yarl-1.15.2-cp39-cp39-win32.whl", hash = "sha256:5b48388ded01f6f2429a8c55012bdbd1c2a0c3735b3e73e221649e524c34a58d"}, + {file = "yarl-1.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:81dadafb3aa124f86dc267a2168f71bbd2bfb163663661ab0038f6e4b8edb810"}, + {file = "yarl-1.15.2-py3-none-any.whl", hash = "sha256:0d3105efab7c5c091609abacad33afff33bdff0035bece164c98bcf5a85ef90a"}, + {file = "yarl-1.15.2.tar.gz", hash = "sha256:a39c36f4218a5bb668b4f06874d676d35a035ee668e6e7e3538835c703634b84"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[[package]] +name = "yarl" +version = "1.22.0" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "yarl-1.22.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c7bd6683587567e5a49ee6e336e0612bec8329be1b7d4c8af5687dcdeb67ee1e"}, + {file = "yarl-1.22.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5cdac20da754f3a723cceea5b3448e1a2074866406adeb4ef35b469d089adb8f"}, + {file = "yarl-1.22.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07a524d84df0c10f41e3ee918846e1974aba4ec017f990dc735aad487a0bdfdf"}, + {file = "yarl-1.22.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1b329cb8146d7b736677a2440e422eadd775d1806a81db2d4cded80a48efc1a"}, + {file = "yarl-1.22.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:75976c6945d85dbb9ee6308cd7ff7b1fb9409380c82d6119bd778d8fcfe2931c"}, + {file = "yarl-1.22.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:80ddf7a5f8c86cb3eb4bc9028b07bbbf1f08a96c5c0bc1244be5e8fefcb94147"}, + {file = "yarl-1.22.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d332fc2e3c94dad927f2112395772a4e4fedbcf8f80efc21ed7cdfae4d574fdb"}, + {file = "yarl-1.22.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cf71bf877efeac18b38d3930594c0948c82b64547c1cf420ba48722fe5509f6"}, + {file = "yarl-1.22.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:663e1cadaddae26be034a6ab6072449a8426ddb03d500f43daf952b74553bba0"}, + {file = "yarl-1.22.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6dcbb0829c671f305be48a7227918cfcd11276c2d637a8033a99a02b67bf9eda"}, + {file = "yarl-1.22.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f0d97c18dfd9a9af4490631905a3f131a8e4c9e80a39353919e2cfed8f00aedc"}, + {file = "yarl-1.22.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:437840083abe022c978470b942ff832c3940b2ad3734d424b7eaffcd07f76737"}, + {file = "yarl-1.22.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a899cbd98dce6f5d8de1aad31cb712ec0a530abc0a86bd6edaa47c1090138467"}, + {file = "yarl-1.22.0-cp310-cp310-win32.whl", hash = "sha256:595697f68bd1f0c1c159fcb97b661fc9c3f5db46498043555d04805430e79bea"}, + {file = "yarl-1.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:cb95a9b1adaa48e41815a55ae740cfda005758104049a640a398120bf02515ca"}, + {file = "yarl-1.22.0-cp310-cp310-win_arm64.whl", hash = "sha256:b85b982afde6df99ecc996990d4ad7ccbdbb70e2a4ba4de0aecde5922ba98a0b"}, + {file = "yarl-1.22.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ab72135b1f2db3fed3997d7e7dc1b80573c67138023852b6efb336a5eae6511"}, + {file = "yarl-1.22.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:669930400e375570189492dc8d8341301578e8493aec04aebc20d4717f899dd6"}, + {file = "yarl-1.22.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:792a2af6d58177ef7c19cbf0097aba92ca1b9cb3ffdd9c7470e156c8f9b5e028"}, + {file = "yarl-1.22.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ea66b1c11c9150f1372f69afb6b8116f2dd7286f38e14ea71a44eee9ec51b9d"}, + {file = "yarl-1.22.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3e2daa88dc91870215961e96a039ec73e4937da13cf77ce17f9cad0c18df3503"}, + {file = "yarl-1.22.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba440ae430c00eee41509353628600212112cd5018d5def7e9b05ea7ac34eb65"}, + {file = "yarl-1.22.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e6438cc8f23a9c1478633d216b16104a586b9761db62bfacb6425bac0a36679e"}, + {file = "yarl-1.22.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c52a6e78aef5cf47a98ef8e934755abf53953379b7d53e68b15ff4420e6683d"}, + {file = "yarl-1.22.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3b06bcadaac49c70f4c88af4ffcfbe3dc155aab3163e75777818092478bcbbe7"}, + {file = "yarl-1.22.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6944b2dc72c4d7f7052683487e3677456050ff77fcf5e6204e98caf785ad1967"}, + {file = "yarl-1.22.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d5372ca1df0f91a86b047d1277c2aaf1edb32d78bbcefffc81b40ffd18f027ed"}, + {file = "yarl-1.22.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:51af598701f5299012b8416486b40fceef8c26fc87dc6d7d1f6fc30609ea0aa6"}, + {file = "yarl-1.22.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b266bd01fedeffeeac01a79ae181719ff848a5a13ce10075adbefc8f1daee70e"}, + {file = "yarl-1.22.0-cp311-cp311-win32.whl", hash = "sha256:a9b1ba5610a4e20f655258d5a1fdc7ebe3d837bb0e45b581398b99eb98b1f5ca"}, + {file = "yarl-1.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:078278b9b0b11568937d9509b589ee83ef98ed6d561dfe2020e24a9fd08eaa2b"}, + {file = "yarl-1.22.0-cp311-cp311-win_arm64.whl", hash = "sha256:b6a6f620cfe13ccec221fa312139135166e47ae169f8253f72a0abc0dae94376"}, + {file = "yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f"}, + {file = "yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2"}, + {file = "yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74"}, + {file = "yarl-1.22.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1651bf8e0398574646744c1885a41198eba53dc8a9312b954073f845c90a8df"}, + {file = "yarl-1.22.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b8a0588521a26bf92a57a1705b77b8b59044cdceccac7151bd8d229e66b8dedb"}, + {file = "yarl-1.22.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42188e6a615c1a75bcaa6e150c3fe8f3e8680471a6b10150c5f7e83f47cc34d2"}, + {file = "yarl-1.22.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f6d2cb59377d99718913ad9a151030d6f83ef420a2b8f521d94609ecc106ee82"}, + {file = "yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50678a3b71c751d58d7908edc96d332af328839eea883bb554a43f539101277a"}, + {file = "yarl-1.22.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e8fbaa7cec507aa24ea27a01456e8dd4b6fab829059b69844bd348f2d467124"}, + {file = "yarl-1.22.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:433885ab5431bc3d3d4f2f9bd15bfa1614c522b0f1405d62c4f926ccd69d04fa"}, + {file = "yarl-1.22.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b790b39c7e9a4192dc2e201a282109ed2985a1ddbd5ac08dc56d0e121400a8f7"}, + {file = "yarl-1.22.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31f0b53913220599446872d757257be5898019c85e7971599065bc55065dc99d"}, + {file = "yarl-1.22.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a49370e8f711daec68d09b821a34e1167792ee2d24d405cbc2387be4f158b520"}, + {file = "yarl-1.22.0-cp312-cp312-win32.whl", hash = "sha256:70dfd4f241c04bd9239d53b17f11e6ab672b9f1420364af63e8531198e3f5fe8"}, + {file = "yarl-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:8884d8b332a5e9b88e23f60bb166890009429391864c685e17bd73a9eda9105c"}, + {file = "yarl-1.22.0-cp312-cp312-win_arm64.whl", hash = "sha256:ea70f61a47f3cc93bdf8b2f368ed359ef02a01ca6393916bc8ff877427181e74"}, + {file = "yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53"}, + {file = "yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a"}, + {file = "yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c"}, + {file = "yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601"}, + {file = "yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a"}, + {file = "yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df"}, + {file = "yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2"}, + {file = "yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b"}, + {file = "yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273"}, + {file = "yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a"}, + {file = "yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d"}, + {file = "yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02"}, + {file = "yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67"}, + {file = "yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95"}, + {file = "yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d"}, + {file = "yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b"}, + {file = "yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10"}, + {file = "yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3"}, + {file = "yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9"}, + {file = "yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f"}, + {file = "yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0"}, + {file = "yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e"}, + {file = "yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708"}, + {file = "yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f"}, + {file = "yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d"}, + {file = "yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8"}, + {file = "yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5"}, + {file = "yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f"}, + {file = "yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62"}, + {file = "yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03"}, + {file = "yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249"}, + {file = "yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b"}, + {file = "yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4"}, + {file = "yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683"}, + {file = "yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b"}, + {file = "yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e"}, + {file = "yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590"}, + {file = "yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2"}, + {file = "yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da"}, + {file = "yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784"}, + {file = "yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b"}, + {file = "yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694"}, + {file = "yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d"}, + {file = "yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd"}, + {file = "yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da"}, + {file = "yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2"}, + {file = "yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79"}, + {file = "yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33"}, + {file = "yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1"}, + {file = "yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca"}, + {file = "yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53"}, + {file = "yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c"}, + {file = "yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf"}, + {file = "yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face"}, + {file = "yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b"}, + {file = "yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486"}, + {file = "yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138"}, + {file = "yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a"}, + {file = "yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529"}, + {file = "yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093"}, + {file = "yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c"}, + {file = "yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e"}, + {file = "yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27"}, + {file = "yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1"}, + {file = "yarl-1.22.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3aa27acb6de7a23785d81557577491f6c38a5209a254d1191519d07d8fe51748"}, + {file = "yarl-1.22.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:af74f05666a5e531289cb1cc9c883d1de2088b8e5b4de48004e5ca8a830ac859"}, + {file = "yarl-1.22.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:62441e55958977b8167b2709c164c91a6363e25da322d87ae6dd9c6019ceecf9"}, + {file = "yarl-1.22.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b580e71cac3f8113d3135888770903eaf2f507e9421e5697d6ee6d8cd1c7f054"}, + {file = "yarl-1.22.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e81fda2fb4a07eda1a2252b216aa0df23ebcd4d584894e9612e80999a78fd95b"}, + {file = "yarl-1.22.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:99b6fc1d55782461b78221e95fc357b47ad98b041e8e20f47c1411d0aacddc60"}, + {file = "yarl-1.22.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:088e4e08f033db4be2ccd1f34cf29fe994772fb54cfe004bbf54db320af56890"}, + {file = "yarl-1.22.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e4e1f6f0b4da23e61188676e3ed027ef0baa833a2e633c29ff8530800edccba"}, + {file = "yarl-1.22.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:84fc3ec96fce86ce5aa305eb4aa9358279d1aa644b71fab7b8ed33fe3ba1a7ca"}, + {file = "yarl-1.22.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5dbeefd6ca588b33576a01b0ad58aa934bc1b41ef89dee505bf2932b22ddffba"}, + {file = "yarl-1.22.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:14291620375b1060613f4aab9ebf21850058b6b1b438f386cc814813d901c60b"}, + {file = "yarl-1.22.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a4fcfc8eb2c34148c118dfa02e6427ca278bfd0f3df7c5f99e33d2c0e81eae3e"}, + {file = "yarl-1.22.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:029866bde8d7b0878b9c160e72305bbf0a7342bcd20b9999381704ae03308dc8"}, + {file = "yarl-1.22.0-cp39-cp39-win32.whl", hash = "sha256:4dcc74149ccc8bba31ce1944acee24813e93cfdee2acda3c172df844948ddf7b"}, + {file = "yarl-1.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:10619d9fdee46d20edc49d3479e2f8269d0779f1b031e6f7c2aa1c76be04b7ed"}, + {file = "yarl-1.22.0-cp39-cp39-win_arm64.whl", hash = "sha256:dd7afd3f8b0bfb4e0d9fc3c31bfe8a4ec7debe124cfd90619305def3c8ca8cd2"}, + {file = "yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff"}, + {file = "yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.1" + +[[package]] +name = "zipp" +version = "3.20.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, +] +markers = {main = "python_version < \"3.10\" and (platform_machine == \"x86_64\" or platform_machine == \"arm64\")", dev = "python_version == \"3.8\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.8, <3.11" +content-hash = "732146f5dfc8becf5f8ab348e666483456792ff1d127555f703768260a01d63b" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..fa764259 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,35 @@ +[tool.poetry] +name = "onnx2kerastl" +version = "0.0.178" +description = "" +authors = ["dorhar "] +license = "MIT" + +[tool.poetry.dependencies] +python = ">=3.8, <3.11" +tensorflow = {version = "2.12.0", markers = "platform_machine == 'x86_64'"} +tensorflow-macos = {version = "2.12.0", markers = "platform_machine == 'arm64'"} +onnx = "1.13.0" +protobuf = "^3.19.6" +tensorflow-addons = {version = "^0.19.0", markers = "platform_machine == 'x86_64'"} +numpy = "1.23.5" +fvcore = "^0.1.5.post20221221" +boto3 = "^1.24.22" +tensorflow-io-gcs-filesystem = "0.34.0" +keras-data-format-converter = "0.1.22" + +[tool.poetry.dev-dependencies] +pytest = "^7.1.2" +torch = {version = "1.12.1"} +torchvision = {version = "^0.12.0"} +transformers = {extras = ["onnx"], version = "^4.25.1"} +pandas = "^2.0.3" +datasets = "^2.14.1" +librosa = "^0.10.0.post2" +onnxruntime = {version = "<=1.17.3"} +sentencepiece ={version = "^0.1.96"} + + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index fdd09dae..00000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow -tensorflow-addons -numpy -onnx \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index ec43c587..00000000 --- a/setup.py +++ /dev/null @@ -1,38 +0,0 @@ -from setuptools import setup, find_packages - - -def parse_requirements(filename): - """ load requirements from a pip requirements file """ - lineiter = (line.strip() for line in open(filename)) - return [line for line in lineiter if line and not line.startswith("#")] - - -reqs = parse_requirements('requirements.txt') - - -with open('README.md') as f: - long_description = f.read() - - -setup(name='onnx2keras', - version='0.0.24', - description='The deep learning models converter', - long_description=long_description, - long_description_content_type='text/markdown', - url='https://github.com/gmalivenko/onnx2keras', - author='Grigory Malivenko', - author_email='', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering :: Image Recognition', - ], - keywords='machine-learning deep-learning pytorch keras neuralnetwork vgg resnet ' - 'densenet drn dpn darknet squeezenet mobilenet', - license='MIT', - packages=find_packages(), - install_requires=reqs, - zip_safe=False) diff --git a/support_map.md b/support_map.md new file mode 100644 index 00000000..0872e174 --- /dev/null +++ b/support_map.md @@ -0,0 +1,46 @@ +Legend: +- :heavy_check_mark: supported +- \- not supported yet +- E not supported but easy to support + +| Op name | supported | +|---------|-----------| +|AffineGrid| -| +|Bernoulli| -| +|BlackmanWindow| -| +|CastLike| -| +|Celu| E| +|CenterCropPad| E| +|Clip| :heavy_check_mark:| +|DynamicQuantizeLinear| -| +|Elu| :heavy_check_mark:| +|Gelu| :heavy_check_mark:| +|GreaterOrEqual| :heavy_check_mark:| +|GroupNormalization| E| +|HammingWindow| -| +|HannWindow| -| +|HardSigmoid| :heavy_check_mark:| +|HardSwish| :heavy_check_mark:| +|LayerNormalization| E| +|LeakyRelu| :heavy_check_mark:| +|LessOrEqual| :heavy_check_mark:| +|LogSoftmax| E| +|MeanVarianceNormalization| -| +|Mish| :heavy_check_mark:| +|NegativeLogLikelihoodLoss| -| +|PRelu| :heavy_check_mark:| +|Range| :heavy_check_mark:| +|ReduceL1| E| +|ReduceL2| :heavy_check_mark:| +|ReduceLogSum| E| +|ReduceLogSumExp| E| +|ReduceSumSquare| E| +|Relu| :heavy_check_mark:| +|Selu| :heavy_check_mark:| +|SequenceMap| -| +|Shrink| -| +|Softmax| :heavy_check_mark:| +|SoftmaxCrossEntropyLoss| E| +|Softplus| :heavy_check_mark:| +|Softsign| :heavy_check_mark:| +|ThresholdedRelu| E| \ No newline at end of file diff --git a/test/layers/activations/test_elu.py b/test/layers/activations/test_elu.py index 86e29d3f..3aff732d 100644 --- a/test/layers/activations/test_elu.py +++ b/test/layers/activations/test_elu.py @@ -34,7 +34,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_elu(change_ordering): model = LayerELU() model.eval() @@ -43,7 +43,7 @@ def test_layer_elu(change_ordering): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_fp_elu(change_ordering): model = FPELU() model.eval() diff --git a/test/layers/activations/test_hard_sigmoid.py b/test/layers/activations/test_hard_sigmoid.py new file mode 100644 index 00000000..ca3d1328 --- /dev/null +++ b/test/layers/activations/test_hard_sigmoid.py @@ -0,0 +1,46 @@ +import torch.nn as nn +import numpy as np +import pytest + +from test.utils import convert_and_test + + +class LayerHardSigmoid(nn.Module): + """ + Test for nn.layers based types + """ + def __init__(self): + super(LayerHardSigmoid, self).__init__() + self.hard_sig = nn.Hardsigmoid() + + def forward(self, x): + x = self.hard_sig(x) + return x + + +class FHardSigmoid(nn.Module): + """ + Test for nn.functional types + """ + def __init__(self): + super(FHardSigmoid, self).__init__() + + def forward(self, x): + from torch.nn import functional as F + return F.hardsigmoid(x) + + +@pytest.mark.parametrize('change_ordering', [False]) +def test_layer_sigmoid(change_ordering): + model = LayerHardSigmoid() + model.eval() + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + + +@pytest.mark.parametrize('change_ordering', [False]) +def test_f_hard_sigmoid(change_ordering): + model = FHardSigmoid() + model.eval() + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) \ No newline at end of file diff --git a/test/layers/activations/test_hard_tanh.py b/test/layers/activations/test_hard_tanh.py index 50cb3ef0..d40081e5 100644 --- a/test/layers/activations/test_hard_tanh.py +++ b/test/layers/activations/test_hard_tanh.py @@ -36,7 +36,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_hardtanh(change_ordering): model = LayerHardtanh() model.eval() @@ -45,7 +45,7 @@ def test_layer_hardtanh(change_ordering): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_hardtanh(change_ordering): model = LayerHardtanh() model.eval() diff --git a/test/layers/activations/test_leaky_relu.py b/test/layers/activations/test_leaky_relu.py index 60a7ccb5..07951fa8 100644 --- a/test/layers/activations/test_leaky_relu.py +++ b/test/layers/activations/test_leaky_relu.py @@ -34,7 +34,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_leaky_relu(change_ordering): model = LayerLeakyReLU() model.eval() @@ -43,7 +43,7 @@ def test_layer_leaky_relu(change_ordering): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_leaky_relu(change_ordering): model = FLeakyReLU() model.eval() diff --git a/test/layers/activations/test_log_sigmoid.py b/test/layers/activations/test_log_sigmoid.py index 39455878..f2e7a748 100644 --- a/test/layers/activations/test_log_sigmoid.py +++ b/test/layers/activations/test_log_sigmoid.py @@ -30,7 +30,7 @@ def forward(self, x): return F.logsigmoid(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_logsigmoid(change_ordering): model = LayerLogSigmoid() model.eval() @@ -38,7 +38,7 @@ def test_layer_logsigmoid(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_logsigmoid(change_ordering): model = FLogSigmoid() model.eval() diff --git a/test/layers/activations/test_prelu.py b/test/layers/activations/test_prelu.py index 233318f3..6ed1e53a 100644 --- a/test/layers/activations/test_prelu.py +++ b/test/layers/activations/test_prelu.py @@ -34,7 +34,7 @@ def forward(self, x): return F.prelu(x, weight=weights) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_prelu(change_ordering): model = LayerPReLU() model.eval() @@ -42,7 +42,7 @@ def test_layer_prelu(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_prelu(change_ordering): model = FPReLU() model.eval() diff --git a/test/layers/activations/test_relu.py b/test/layers/activations/test_relu.py index 4235e13e..a65160e2 100644 --- a/test/layers/activations/test_relu.py +++ b/test/layers/activations/test_relu.py @@ -30,7 +30,7 @@ def forward(self, x): return F.relu(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_relu(change_ordering): model = LayerReLU() model.eval() @@ -38,7 +38,7 @@ def test_layer_relu(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_relu(change_ordering): model = FReLU() model.eval() diff --git a/test/layers/activations/test_relu6.py b/test/layers/activations/test_relu6.py index c81005e0..fd76d3d9 100644 --- a/test/layers/activations/test_relu6.py +++ b/test/layers/activations/test_relu6.py @@ -30,7 +30,7 @@ def forward(self, x): return F.relu6(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_relu6(change_ordering): model = LayerReLU6() model.eval() @@ -38,7 +38,7 @@ def test_layer_relu6(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_relu6(change_ordering): model = FReLU6() model.eval() diff --git a/test/layers/activations/test_selu.py b/test/layers/activations/test_selu.py index 9680cab8..3676c996 100644 --- a/test/layers/activations/test_selu.py +++ b/test/layers/activations/test_selu.py @@ -30,7 +30,7 @@ def forward(self, x): return F.selu(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_selu(change_ordering): model = LayerSELU() model.eval() @@ -38,7 +38,7 @@ def test_layer_selu(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_selu(change_ordering): model = FSELU() model.eval() diff --git a/test/layers/activations/test_sigmoid.py b/test/layers/activations/test_sigmoid.py index 7914efc2..eb998ccc 100644 --- a/test/layers/activations/test_sigmoid.py +++ b/test/layers/activations/test_sigmoid.py @@ -30,7 +30,7 @@ def forward(self, x): return F.sigmoid(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_sigmoid(change_ordering): model = LayerSigmoid() model.eval() @@ -38,7 +38,7 @@ def test_layer_sigmoid(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_sigmoid(change_ordering): model = FSigmoid() model.eval() diff --git a/test/layers/activations/test_softmax.py b/test/layers/activations/test_softmax.py index 0d3bc432..ff032e31 100644 --- a/test/layers/activations/test_softmax.py +++ b/test/layers/activations/test_softmax.py @@ -32,7 +32,7 @@ def forward(self, x): return F.softmax(x, self.dim) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('dim', [0, 1, 2, 3]) def test_layer_softmax(change_ordering, dim): model = LayerSoftmax(dim) @@ -41,7 +41,7 @@ def test_layer_softmax(change_ordering, dim): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('dim', [0, 1, 2, 3]) def test_f_softmax(change_ordering, dim): model = FSoftmax(dim) diff --git a/test/layers/activations/test_tanh.py b/test/layers/activations/test_tanh.py index 0f4c61c0..1cf92bc5 100644 --- a/test/layers/activations/test_tanh.py +++ b/test/layers/activations/test_tanh.py @@ -30,7 +30,7 @@ def forward(self, x): return F.tanh(x) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_layer_tanh(change_ordering): model = LayerTanh() model.eval() @@ -38,7 +38,7 @@ def test_layer_tanh(change_ordering): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_f_tanh(change_ordering): model = FTanh() model.eval() diff --git a/test/layers/constants/test_constant.py b/test/layers/constants/test_constant.py index e4ad0173..25be5913 100644 --- a/test/layers/constants/test_constant.py +++ b/test/layers/constants/test_constant.py @@ -2,9 +2,12 @@ import torch import torch.nn as nn import pytest - +from keras_data_format_converter import convert_channels_first_to_last +from onnx import helper, TensorProto, save +import onnxruntime as rt +from onnx2kerastl import onnx_to_keras from test.utils import convert_and_test - +import tensorflow as tf class FConstant(nn.Module): def __init__(self, constant): @@ -12,13 +15,90 @@ def __init__(self, constant): self.constant = constant def forward(self, x): - return x + torch.FloatTensor([self.constant]) + return x + nn.functional.one_hot(x) + + +class OneHot(): + def __init__(self, depth, values, axis): + super(OneHot, self).__init__() + self.depth = depth + self.values = values + self.axis = axis + + def get_onnx(self): + named_input = ['batch', 'h', 'w'] + if self.axis == -1: + output_shape = named_input + [self.depth] + else: + if self.axis < 0: + add = 1 + else: + add = 0 + output_shape = named_input[:self.axis+add] + [self.depth] + named_input[self.axis+add:] + model = helper.make_model(helper.make_graph( + nodes=[ + helper.make_node( + "Constant", + inputs=[], + outputs=["depth"], + value=helper.make_tensor('depth_tensor', TensorProto.INT64, [], np.array(self.depth).tobytes(), + raw=True), + ), + helper.make_node( + "Constant", + inputs=[], + outputs=["values"], + value=helper.make_tensor('values_tesnor', TensorProto.INT64, [2], + np.array([self.values[0], self.values[1]]).tobytes(), raw=True), + ), + helper.make_node( + "OneHot", + inputs=["indices", "depth", "values"], + outputs=["one_hot_encoded"], + axis=self.axis, + ), + ], + + name="test-model", + inputs=[ + helper.make_tensor_value_info("indices", TensorProto.FLOAT, named_input), + ], + outputs=[ + helper.make_tensor_value_info("one_hot_encoded", TensorProto.INT64, output_shape) + ] + )) + return model -@pytest.mark.parametrize('change_ordering', [True, False]) + +class TorchOneHot(nn.Module): + def __init__(self): + super(TorchOneHot, self).__init__() + + def forward(self, x): + return torch.nn.functional.one_hot(x, 12) + +@pytest.mark.skip('This test does not work well') @pytest.mark.parametrize('constant', [-1.0, 0.0, 1.0]) -def test_constant(change_ordering, constant): +def test_constant(constant): model = FConstant(constant) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) + +@pytest.mark.parametrize('depth', [10]) +@pytest.mark.parametrize('values', [[1,3]]) +@pytest.mark.parametrize('axis', [-1]) +def test_one_hot(depth, values, axis): + onnx_one_hot = OneHot(depth=depth, values=values, axis=axis).get_onnx() + indices = np.array([[[1, 9, 3], [2, 4, 5]]], dtype=np.float32) + keras_model = onnx_to_keras(onnx_one_hot, ['indices'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + keras_res = final_model(indices) + sess = rt.InferenceSession(onnx_one_hot.SerializeToString()) + input_name_1 = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + pred = sess.run([label_name], {input_name_1: indices,})[0] + assert tf.reduce_max(tf.abs(tf.transpose(keras_res,[0,1,3,2])-pred)).numpy() < 10**(-4) + assert tf.reduce_mean(tf.abs(tf.transpose(keras_res, [0, 1, 3, 2]) - pred)).numpy() < 10**(-4) + diff --git a/test/layers/convolutions/test_conv2d.py b/test/layers/convolutions/test_conv2d.py index 19b463fc..10b8c26f 100644 --- a/test/layers/convolutions/test_conv2d.py +++ b/test/layers/convolutions/test_conv2d.py @@ -33,7 +33,7 @@ def func(change_ordering, kernel_size, padding, stride, bias, dilation, groups): error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3, 5]) @pytest.mark.parametrize('stride', [1]) @@ -44,7 +44,7 @@ def test_conv2d_case1(change_ordering, kernel_size, padding, stride, bias, dilat func(change_ordering, kernel_size, padding, stride, bias, dilation, groups) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3, 5]) @pytest.mark.parametrize('stride', [1, 2, 3]) diff --git a/test/layers/convolutions/test_conv3d.py b/test/layers/convolutions/test_conv3d.py index bc1b36fe..461d41ab 100644 --- a/test/layers/convolutions/test_conv3d.py +++ b/test/layers/convolutions/test_conv3d.py @@ -34,7 +34,7 @@ def func(change_ordering, kernel_size, padding, stride, bias, dilation, groups, @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3, 5]) @pytest.mark.parametrize('stride', [1]) @@ -55,7 +55,7 @@ def test_conv3d_case1(change_ordering, kernel_size, padding, stride, bias, dilat @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3, 5]) @pytest.mark.parametrize('stride', [1, 2, 3]) diff --git a/test/layers/convolutions/test_convtranspose2d.py b/test/layers/convolutions/test_convtranspose2d.py index 5d7bdc0a..9d4c4494 100644 --- a/test/layers/convolutions/test_convtranspose2d.py +++ b/test/layers/convolutions/test_convtranspose2d.py @@ -16,7 +16,7 @@ def forward(self, x): return x -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5]) @pytest.mark.parametrize('padding', [0, 1, 3]) @pytest.mark.parametrize('stride', [1, 2]) diff --git a/test/layers/elementwise/test_add.py b/test/layers/elementwise/test_add.py index 89fef8e2..09cdcd26 100644 --- a/test/layers/elementwise/test_add.py +++ b/test/layers/elementwise/test_add.py @@ -16,7 +16,7 @@ def forward(self, x, y): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_add(change_ordering): model = FAdd() model.eval() diff --git a/test/layers/elementwise/test_bitshift.py b/test/layers/elementwise/test_bitshift.py new file mode 100644 index 00000000..bdc2c174 --- /dev/null +++ b/test/layers/elementwise/test_bitshift.py @@ -0,0 +1,54 @@ +import tf2onnx.onnx_opset.generator +import torch +import numpy as np +from test.utils import convert_and_test, torch2keras +from keras_data_format_converter import convert_channels_first_to_last +from onnx import helper, TensorProto, save +import onnxruntime as rt +from onnx2kerastl import onnx_to_keras +import tensorflow as tf + +class BitShift(): + def __init__(self, direction): + super(BitShift, self).__init__() + self.direction = direction + + def get_onnx(self): + named_input = ['batch', 'h', 'w'] + model = helper.make_model(helper.make_graph( + nodes=[ + helper.make_node( + "BitShift", + inputs=["x", "y"], + outputs=["bitshifted"], + direction=self.direction + ), + ], + + name="test-model", + inputs=[ + helper.make_tensor_value_info("x", TensorProto.UINT64, named_input), + helper.make_tensor_value_info("y", TensorProto.UINT64, named_input), + ], + outputs=[ + helper.make_tensor_value_info("bitshifted", TensorProto.UINT64, named_input) + ] + + )) + return model + + +def test_bitshift(): + onnx_model = BitShift(direction='LEFT').get_onnx() + x = np.random.randint(low=-100, high=100, size=(1, 50, 50)) + y = np.random.randint(low=1, high=5, size=(1, 50, 50)) + sess = rt.InferenceSession(onnx_model.SerializeToString()) + input_name_1 = sess.get_inputs()[0].name + input_name_2 = sess.get_inputs()[1].name + label_name = sess.get_outputs()[0].name + pred = sess.run([label_name], {input_name_1: x.astype(np.uint64), input_name_2: y.astype(np.uint64)})[0] + keras_model = onnx_to_keras(onnx_model, ['x', 'y'], name_policy='attach_weights_name', input_types=[tf.int64, tf.int64]).converted_model + final_k = convert_channels_first_to_last(keras_model, ['x', 'y']) + assert (final_k([x, y])-pred).numpy().max() < 10**(-5) + + diff --git a/test/layers/elementwise/test_div.py b/test/layers/elementwise/test_div.py index d287e5e8..ded31118 100644 --- a/test/layers/elementwise/test_div.py +++ b/test/layers/elementwise/test_div.py @@ -1,10 +1,6 @@ import numpy as np -import torch -import torch.nn as nn -from torch.autograd import Variable -from onnx2keras import onnx_to_keras, check_torch_keras_error -import onnx import pytest +import torch.nn as nn from test.utils import convert_and_test @@ -22,7 +18,7 @@ def forward(self, x, y): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_div(change_ordering): model = FDiv() model.eval() diff --git a/test/layers/elementwise/test_mod.py b/test/layers/elementwise/test_mod.py new file mode 100644 index 00000000..fec2436e --- /dev/null +++ b/test/layers/elementwise/test_mod.py @@ -0,0 +1,25 @@ +import torch +import numpy as np +from test.utils import convert_and_test, torch2keras +from keras_data_format_converter import convert_channels_first_to_last +import pytest + + +class TorchMod(torch.nn.Module): + def __init__(self): + super(TorchMod, self).__init__() + + def forward(self, x, y): + return torch.fmod(x, y) + + +def test_mod(): + pt_model = TorchMod() + x = np.random.randint(low=-100, high=100, size=(1, 50, 50)) + y = np.random.randint(low=-100, high=100, size=(1, 50, 50)) + y[y==0] = 1 + k_model = torch2keras(pt_model, (x, y), verbose=False, change_ordering=False) + final_k = convert_channels_first_to_last(k_model, ['test_in_1', 'test_in_2']) + assert (final_k((x, y))-pt_model(torch.from_numpy(x), torch.from_numpy(y))).numpy().max() < 10**(-5) + + diff --git a/test/layers/elementwise/test_mul.py b/test/layers/elementwise/test_mul.py index 3be42443..4d5ceb25 100644 --- a/test/layers/elementwise/test_mul.py +++ b/test/layers/elementwise/test_mul.py @@ -16,7 +16,7 @@ def forward(self, x, y): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_mul(change_ordering): model = FMul() model.eval() diff --git a/test/layers/elementwise/test_round.py b/test/layers/elementwise/test_round.py new file mode 100644 index 00000000..5333f1ca --- /dev/null +++ b/test/layers/elementwise/test_round.py @@ -0,0 +1,18 @@ +import torch +import pytest +from test.utils import convert_and_test +import numpy as np + + +class TorchRound(torch.nn.Module): + def __init__(self): + super(TorchRound, self).__init__() + + def forward(self, x): + return torch.round(x) + + +def test_round(): + pt_model = TorchRound() + error = convert_and_test(pt_model, (np.random.random((1, 8, 3))), verbose=False) + diff --git a/test/layers/elementwise/test_sub.py b/test/layers/elementwise/test_sub.py index 6fbe1ffa..3ecf53d3 100644 --- a/test/layers/elementwise/test_sub.py +++ b/test/layers/elementwise/test_sub.py @@ -15,7 +15,7 @@ def forward(self, x, y): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_add(change_ordering): model = FSub() model.eval() diff --git a/test/layers/linears/test_linear.py b/test/layers/linears/test_linear.py index 2a7218e6..835f3709 100644 --- a/test/layers/linears/test_linear.py +++ b/test/layers/linears/test_linear.py @@ -17,8 +17,16 @@ def forward(self, x): return x +class DetTest(nn.Module): + def __init__(self): + super(DetTest, self).__init__() + + def forward(self, x): + return torch.det(x) + + @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('bias', [True, False]) def test_linear(change_ordering, bias): ins = np.random.choice([1, 3, 7, 128]) @@ -28,3 +36,8 @@ def test_linear(change_ordering, bias): input_np = np.random.uniform(0, 1, (1, ins)) error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + + +def test_det(): + matrix = np.random.random((2, 4, 4)) + error = convert_and_test(DetTest(), matrix, verbose=False, change_ordering=False) diff --git a/test/layers/normalizations/test_bn2d.py b/test/layers/normalizations/test_bn2d.py index 73459aa2..a6875001 100644 --- a/test/layers/normalizations/test_bn2d.py +++ b/test/layers/normalizations/test_bn2d.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_bn2d(change_ordering): inp_size = np.random.randint(10, 100) diff --git a/test/layers/normalizations/test_in2d.py b/test/layers/normalizations/test_in2d.py index 7f764b65..9ad7c5a5 100644 --- a/test/layers/normalizations/test_in2d.py +++ b/test/layers/normalizations/test_in2d.py @@ -21,7 +21,7 @@ def forward(self, x): # maybe it can be problem described here # https://discuss.pytorch.org/t/instance-norm-implement-by-basic-operations-has-different-result-comparing-to-torch-nn-instancenorm2d/87470/2 @pytest.mark.parametrize('epsilon', [1e-4]) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_instancenorm(change_ordering, epsilon): inp_size = np.random.randint(10, 100) diff --git a/test/layers/operations/test_abs.py b/test/layers/operations/test_abs.py new file mode 100644 index 00000000..942d5f03 --- /dev/null +++ b/test/layers/operations/test_abs.py @@ -0,0 +1,24 @@ +import numpy as np +import torch +import torch.nn as nn +import pytest + +from test.utils import convert_and_test + + +class LayerTest(nn.Module): + def __init__(self): + super(LayerTest, self).__init__() + + def forward(self, x): + x = torch.abs(x) + return x + + +@pytest.mark.parametrize('change_ordering', [True]) +def test_transpose_batch_and_abs(change_ordering): + model = LayerTest() + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 28, 28)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/layers/operations/test_bitwise_not.py b/test/layers/operations/test_bitwise_not.py new file mode 100644 index 00000000..831030f9 --- /dev/null +++ b/test/layers/operations/test_bitwise_not.py @@ -0,0 +1,38 @@ +from onnx import helper, TensorProto +import numpy as np +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl import onnx_to_keras + + +class BitwiseNot(): + def __init__(self): + super(BitwiseNot, self).__init__() + + def get_onnx(self): + model = helper.make_model(helper.make_graph( + nodes=[ + helper.make_node( + "BitwiseNot", + inputs=["x"], + outputs=["biwise_not"], + )], + + name="test-model", + inputs=[ + helper.make_tensor_value_info("x", TensorProto.INT32, ["B", "N", "C"]), + ], + outputs=[ + helper.make_tensor_value_info("biwise_not", TensorProto.INT32, ["B", "N", "C"]) + ] + + )) + return model + + +def test_bitwise_not(): + np_input = np.random.randint(low=1, high=2000, size=(1, 8, 3), dtype=np.int32) # onnx export only supports bool input + onnx_bitwise = BitwiseNot().get_onnx() + keras_model = onnx_to_keras(onnx_bitwise, ['x'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + keras_res = final_model(np_input) + assert (np.abs((np.bitwise_not(np_input) - keras_res).numpy()) < 1).all() \ No newline at end of file diff --git a/test/layers/operations/test_cast.py b/test/layers/operations/test_cast.py index ad9d4e2a..bbf37d1e 100644 --- a/test/layers/operations/test_cast.py +++ b/test/layers/operations/test_cast.py @@ -18,7 +18,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_cast(change_ordering): model = FCastTest() model.eval() diff --git a/test/layers/operations/test_clip.py b/test/layers/operations/test_clip.py index 09f405c3..17e29221 100644 --- a/test/layers/operations/test_clip.py +++ b/test/layers/operations/test_clip.py @@ -19,7 +19,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_clip(change_ordering): model = FClipTest() model.eval() diff --git a/test/layers/operations/test_cumsum.py b/test/layers/operations/test_cumsum.py new file mode 100644 index 00000000..8b443d99 --- /dev/null +++ b/test/layers/operations/test_cumsum.py @@ -0,0 +1,20 @@ +import torch +from test.utils import convert_and_test +import numpy as np +import pytest + + +class TorchCumSum(torch.nn.Module): + def __init__(self, axis): + super(TorchCumSum, self).__init__() + self.axis = axis + + def forward(self, x): + return torch.cumsum(x, dim=self.axis) + + +@pytest.mark.parametrize('axis', [-1, 1]) +def test_cumsum(axis): + pt_model = TorchCumSum(axis=axis) + error = convert_and_test(pt_model, (np.random.random((1, 8, 3))), verbose=False) + diff --git a/test/layers/operations/test_floor.py b/test/layers/operations/test_floor.py index 58473230..8d17cd73 100644 --- a/test/layers/operations/test_floor.py +++ b/test/layers/operations/test_floor.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_floor(change_ordering): model = FFloorTest() model.eval() diff --git a/test/layers/operations/test_if.py b/test/layers/operations/test_if.py new file mode 100644 index 00000000..19e61eb8 --- /dev/null +++ b/test/layers/operations/test_if.py @@ -0,0 +1,38 @@ +import torch +from test.utils import convert_and_test, torch2keras +from keras_data_format_converter import convert_channels_first_to_last +import numpy as np + + +class TorchIfInf(torch.nn.Module): + def __init__(self): + super(TorchIfInf, self).__init__() + + def forward(self, x): + return torch.isinf(x) + + +class TorchIfNan(torch.nn.Module): + def __init__(self): + super(TorchIfNan, self).__init__() + + def forward(self, x): + return torch.isnan(x) + + +def test_ifinf(): + pt_model = TorchIfInf() + np_input = 1/np.random.randint(low=0,high=2,size=(1, 8, 3)) + k_model = torch2keras(pt_model, np_input, verbose=False, change_ordering=False) + final_k = convert_channels_first_to_last(k_model, ['test_in']) + assert (pt_model(torch.from_numpy(np_input)).numpy() == np.swapaxes(final_k(np.swapaxes(np_input, 1, 2)), 2, 1)).all() + + +def test_ifnan(): + pt_model = TorchIfNan() + np_input = np.random.random(size=(1, 8, 3)) + selection = np.random.randint(low=0, high=2, size=(1, 8, 3)) + np_input[selection == 1] = np.nan + k_model = torch2keras(pt_model, np_input, verbose=False, change_ordering=False) + final_k = convert_channels_first_to_last(k_model, ['test_in']) + assert (pt_model(torch.from_numpy(np_input)).numpy() == np.swapaxes(final_k(np.swapaxes(np_input, 1, 2)), 2, 1)).all() diff --git a/test/layers/operations/test_nms.py b/test/layers/operations/test_nms.py new file mode 100644 index 00000000..b981b587 --- /dev/null +++ b/test/layers/operations/test_nms.py @@ -0,0 +1,28 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import urllib + + +def test_nms(): + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/onnx2kerras/nms/nms_v2.onnx", + "nms_v2.onnx") + onnx_model = onnx.load('nms_v2.onnx') + keras_model = onnx_to_keras(onnx_model, ['boxes', 'scores'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/onnx2kerras/nms/nms_in_boxes.npy", + "nms_in_boxes.npy") + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/onnx2kerras/nms/nms_in_scores.npy", + "nms_in_scores.npy") + boxes = np.load('nms_in_boxes.npy') + scores = np.load('nms_in_scores.npy') + keras_res = final_model([boxes.swapaxes(1, 2), scores]) + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/onnx2kerras/nms/nms_out.npy", + "nms_out.npy") + results = np.load('nms_out.npy') + assert len(set(keras_res[..., 0].numpy()).intersection(set(results))) == len(results) == keras_res.shape[0] diff --git a/test/layers/operations/test_norm.py b/test/layers/operations/test_norm.py index 8ebdcee4..0795cd9c 100644 --- a/test/layers/operations/test_norm.py +++ b/test/layers/operations/test_norm.py @@ -23,7 +23,7 @@ def forward(self, x): # TODO: Not working with dim=[2,3] and change_ordering=False ???? error about 0.0001-0.001 @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('dim', [[1, 2], [1, 3]]) @pytest.mark.parametrize('epsilon', [5e-5]) @pytest.mark.parametrize('keepdim', [True, False]) diff --git a/test/layers/operations/test_reduce.py b/test/layers/operations/test_reduce.py new file mode 100644 index 00000000..242a9fcb --- /dev/null +++ b/test/layers/operations/test_reduce.py @@ -0,0 +1,39 @@ +import torch +from test.utils import convert_and_test +import numpy as np +import pytest + + +class TorchReduceProd(torch.nn.Module): + def __init__(self, axis, keepdim): + super(TorchReduceProd, self).__init__() + self.axis = axis + self.keepdim = keepdim + + def forward(self, x): + return torch.prod(x, dim=self.axis, keepdim=self.keepdim) + + +class TorchReduceMin(torch.nn.Module): + def __init__(self, axis, keepdim): + super(TorchReduceMin, self).__init__() + self.axis = axis + self.keepdim = keepdim + + def forward(self, x): + return torch.min(x, dim=self.axis, keepdim=self.keepdim) + + +@pytest.mark.parametrize('axis', [-1, 1]) +@pytest.mark.parametrize('keepdim', [True, False]) +def test_reduce_prod(axis, keepdim): + pt_model = TorchReduceProd(axis=axis, keepdim=keepdim) + error = convert_and_test(pt_model, (np.random.random((1, 8, 3))), verbose=False) + + +@pytest.mark.parametrize('axis', [-1, 1]) +@pytest.mark.parametrize('keepdim', [True, False]) +def test_reduce_min(axis, keepdim): + pt_model = TorchReduceMin(axis=axis, keepdim=keepdim) + error = convert_and_test(pt_model, (np.random.random((1, 8, 3))), verbose=False) + diff --git a/test/layers/operations/test_size.py b/test/layers/operations/test_size.py new file mode 100644 index 00000000..11d02ab8 --- /dev/null +++ b/test/layers/operations/test_size.py @@ -0,0 +1,44 @@ +import numpy as np +from keras_data_format_converter import convert_channels_first_to_last +from onnx import helper, TensorProto +import onnxruntime as rt +from onnx2kerastl import onnx_to_keras + + +class OnnxSize(): + def __init__(self): + super(OnnxSize, self).__init__() + + def get_onnx(self): + model = helper.make_model(helper.make_graph( + nodes=[ + helper.make_node( + "Size", + inputs=["test_in"], + outputs=["test_out"], + )], + + name="test-model", + inputs=[ + helper.make_tensor_value_info("test_in", TensorProto.FLOAT, ["B", "N", "C"]), + ], + outputs=[ + helper.make_tensor_value_info("test_out", TensorProto.INT64, []) + ] + + )) + return model + + +def test_size(): + np_input = np.random.random((1, 8, 3)) # onnx export only supports bool input + onnx_size = OnnxSize().get_onnx() + keras_model = onnx_to_keras(onnx_size, ['test_in'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + keras_res = final_model(np_input) + sess = rt.InferenceSession(onnx_size.SerializeToString()) + input_name_1 = sess.get_inputs()[0].name + label_name = sess.get_outputs()[0].name + pred = sess.run([label_name], {input_name_1: np_input.astype(np.float32), })[0] + assert (keras_res-pred) < 1 + diff --git a/test/layers/operations/test_tan.py b/test/layers/operations/test_tan.py new file mode 100644 index 00000000..e0109f29 --- /dev/null +++ b/test/layers/operations/test_tan.py @@ -0,0 +1,17 @@ +import torch +from test.utils import convert_and_test +import numpy as np + + +class TorchTan(torch.nn.Module): + def __init__(self): + super(TorchTan, self).__init__() + + def forward(self, x): + return torch.tan(x) + + +def test_tan(): + pt_model = TorchTan() + error = convert_and_test(pt_model, (np.random.random((1, 8, 3))), verbose=False) + diff --git a/test/layers/poolings/test_avgpool2d.py b/test/layers/poolings/test_avgpool2d.py index 186dad88..65cb51af 100644 --- a/test/layers/poolings/test_avgpool2d.py +++ b/test/layers/poolings/test_avgpool2d.py @@ -16,7 +16,7 @@ def forward(self, x): return x -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3]) @pytest.mark.parametrize('stride', [1, 2, 3, 4]) diff --git a/test/layers/poolings/test_avgpool3d.py b/test/layers/poolings/test_avgpool3d.py index c153fea0..febb619a 100644 --- a/test/layers/poolings/test_avgpool3d.py +++ b/test/layers/poolings/test_avgpool3d.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3]) @pytest.mark.parametrize('stride', [1, 2, 3, 4]) diff --git a/test/layers/poolings/test_global_avgpool2d.py b/test/layers/poolings/test_global_avgpool2d.py index cbbbca0e..aa6f68a9 100644 --- a/test/layers/poolings/test_global_avgpool2d.py +++ b/test/layers/poolings/test_global_avgpool2d.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_global_avgpool2d(change_ordering): if not tf.test.gpu_device_name() and not change_ordering: pytest.skip("Skip! Since tensorflow AvgPoolingOp op currently only supports the NHWC tensor format on the CPU") diff --git a/test/layers/poolings/test_global_maxpool2d.py b/test/layers/poolings/test_global_maxpool2d.py index 4b6a3246..4ccd6ab6 100644 --- a/test/layers/poolings/test_global_maxpool2d.py +++ b/test/layers/poolings/test_global_maxpool2d.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.repeat(10) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_global_maxpool2d(change_ordering): if not tf.test.gpu_device_name() and not change_ordering: pytest.skip("Skip! Since tensorflow MaxPoolingOp op currently only supports the NHWC tensor format on the CPU") diff --git a/test/layers/poolings/test_maxpool2d.py b/test/layers/poolings/test_maxpool2d.py index 068b207c..2c85138e 100644 --- a/test/layers/poolings/test_maxpool2d.py +++ b/test/layers/poolings/test_maxpool2d.py @@ -16,7 +16,7 @@ def forward(self, x): return x -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3]) @pytest.mark.parametrize('stride', [1, 2, 3, 4]) diff --git a/test/layers/poolings/test_maxpool3d.py b/test/layers/poolings/test_maxpool3d.py index bbcef255..e64c513c 100644 --- a/test/layers/poolings/test_maxpool3d.py +++ b/test/layers/poolings/test_maxpool3d.py @@ -17,7 +17,7 @@ def forward(self, x): @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) @pytest.mark.parametrize('kernel_size', [1, 3, 5, 7]) @pytest.mark.parametrize('padding', [0, 1, 3]) @pytest.mark.parametrize('stride', [1, 2, 3, 4]) diff --git a/test/layers/poolings/test_maxpool3d_ceil.py b/test/layers/poolings/test_maxpool3d_ceil.py new file mode 100644 index 00000000..dc929e58 --- /dev/null +++ b/test/layers/poolings/test_maxpool3d_ceil.py @@ -0,0 +1,30 @@ +import numpy as np +import torch.nn as nn +import pytest +import tensorflow as tf + +from test.utils import convert_and_test + + +class LayerTest(nn.Module): + def __init__(self, kernel_size=3, stride=1): + super(LayerTest, self).__init__() + self.pool = nn.MaxPool3d(kernel_size=kernel_size, stride=stride, ceil_mode=True) + + def forward(self, x): + x = self.pool(x) + return x + + +@pytest.mark.slow +@pytest.mark.parametrize('change_ordering', [True]) +@pytest.mark.parametrize('kernel_size', [3]) +@pytest.mark.parametrize('stride', [2]) +def test_maxpool3d(change_ordering, kernel_size, stride): + if not tf.test.gpu_device_name() and not change_ordering: + pytest.skip("Skip! Since tensorflow MaxPoolingOp op currently only supports the NHWC tensor format on the CPU") + model = LayerTest(kernel_size=kernel_size, stride=stride) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 19, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) diff --git a/test/layers/poolings/test_topk.py b/test/layers/poolings/test_topk.py new file mode 100644 index 00000000..6367adef --- /dev/null +++ b/test/layers/poolings/test_topk.py @@ -0,0 +1,60 @@ +import torch +from test.utils import convert_and_test +import numpy as np +import pytest +import io +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import itertools + + +class TopK(torch.nn.Module): + def __init__(self, k, dim, largest, return_sorted): + self.k = k + self.dim = dim + self.largest = largest + self.return_sorted = return_sorted + super(TopK, self).__init__() + + def forward(self, x): + return torch.topk(x, self.k, self.dim, self.largest, self.return_sorted) + + +@pytest.mark.parametrize('return_sorted', [True, False]) +@pytest.mark.parametrize('largest', [True, False]) +@pytest.mark.parametrize('dim', [-1, 1]) +@pytest.mark.parametrize('k', [2]) +def test_topk(return_sorted, largest, dim, k): + k=2 + dim=1 + return_sorted=False + largest=True + np_input = np.random.random((1, 3, 8)) + pt_model = TopK(k, dim, largest, return_sorted) + temp_f = io.BytesIO() + torch.onnx.export(pt_model, torch.from_numpy(np_input), temp_f, verbose=True, + input_names=['test_in'], + output_names=['test_out_1', 'test_out_2']) + temp_f.seek(0) + onnx_model = onnx.load(temp_f) + keras_model = onnx_to_keras(onnx_model, ['test_in'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + keras_res = final_model(np.swapaxes(np_input, 1, 2)) + pt_res = pt_model(torch.from_numpy(np_input)) + if not return_sorted: + reshaped_pt_pred = np.array(pt_res[0].transpose(1,2)).reshape(-1,k) + reshaped_keras_pred = np.array(keras_res[0]).reshape(-1, k) + for i in range(reshaped_pt_pred.shape[0]): + has_valid_permute = False + pt_i_res = reshaped_pt_pred[i, :] + for permutation in itertools.permutations(reshaped_keras_pred[i,:]): + if ((permutation - pt_i_res).__abs__() < 10**(-6)).all(): + has_valid_permute = True + if has_valid_permute is False: + assert False + else: + value_same = ((np.swapaxes(keras_res[0], 1, 2) - pt_res[0].numpy()).__abs__() < 10**(-6)).all() + index_same = ((np.swapaxes(keras_res[1], 1, 2) - pt_res[1].numpy()).__abs__() < 1).all() + assert value_same and index_same + diff --git a/test/layers/poolings/test_unique.py b/test/layers/poolings/test_unique.py new file mode 100644 index 00000000..19afc204 --- /dev/null +++ b/test/layers/poolings/test_unique.py @@ -0,0 +1,64 @@ +import torch +from test.utils import convert_and_test +import numpy as np +import pytest +import io +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import itertools + + +class Unique(torch.nn.Module): + def __init__(self, to_sort=False, return_inverse=False, return_counts=False, dim=None): + super(Unique, self).__init__() + self.to_sort = to_sort + self.return_inverse = return_inverse + self.return_counts = return_counts + self.dim = dim + + def forward(self, x): + return torch.unique(x, sorted=self.to_sort, return_inverse=self.return_inverse, + return_counts=self.return_counts, dim=self.dim) + + +@pytest.mark.parametrize('return_inverse', [True]) +@pytest.mark.parametrize('return_counts', [True]) +@pytest.mark.parametrize('to_sort', [True, False]) +def test_unique(return_inverse, return_counts, to_sort): + to_sort = True + return_inverse = True + return_counts = True + dim = None + pt_model = Unique(to_sort=to_sort, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + output_names = ['unique'] + if return_inverse: + output_names += ['inverse'] + if return_counts: + output_names += ['counts'] + torch_input = torch.randint(5, 9, (1, 8, 10)) + temp_f = io.BytesIO() + # torch.onnx.export(pt_model, torch_input, temp_f, verbose=True, + # input_names=['x'], + # output_names=output_names) + torch.onnx.export(pt_model, torch_input, temp_f, verbose=True, + input_names=['x'], + output_names=output_names) + temp_f.seek(0) + onnx_model = onnx.load(temp_f) + keras_model = onnx_to_keras(onnx_model, ['x'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + rotated_input = np.swapaxes(torch_input.numpy(), 1, 2) + res_keras = final_model(rotated_input) + assert ((res_keras[0].numpy()[ + np.swapaxes(res_keras[1].numpy()[..., 0].reshape((1, 8, 10)), 1, 2).astype(int)][ + ..., 0] - rotated_input) < 0.5).all() + + # keras_res = final_model([input_t.numpy().swapaxes(1, 2), h0_t.numpy().swapaxes(1, 2)]) + # pt_res = pt_model(input_t, h0_t) + # diff_tens_state = (pt_res[0].swapaxes(1, 2).detach().numpy() - keras_res[0]).numpy().__abs__() + # diff_tens_out = (pt_res[1].swapaxes(1, 2).detach().numpy() - keras_res[1]).numpy().__abs__() + # eps = 10**(-5) + # if (diff_tens_state.max() < eps) & (diff_tens_out.max() < eps) is False: + # print(1) + # assert (diff_tens_state.max() < eps) & (diff_tens_out.max() < eps) diff --git a/test/layers/reshapes/test_gather_elements.py b/test/layers/reshapes/test_gather_elements.py new file mode 100644 index 00000000..8e6b0c0b --- /dev/null +++ b/test/layers/reshapes/test_gather_elements.py @@ -0,0 +1,38 @@ +import torch +from test.utils import convert_and_test +import numpy as np +import torch +from test.utils import convert_and_test +import numpy as np +import pytest +import io +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import itertools + + +class TorchGather(torch.nn.Module): + def __init__(self, dim): + self.dim = dim + super(TorchGather, self).__init__() + + def forward(self, x, y): + return torch.gather(x, self.dim, y) + + +def test_gather_elements(): + t = torch.tensor([[1, 2], [3, 4]]) + pt_model = TorchGather(1) + temp_f = io.BytesIO() + idx_arr = [[0, 0], [1, 0]] + torch_idx_arr = torch.tensor(idx_arr) + torch.onnx.export(pt_model, (t, torch_idx_arr), temp_f, verbose=True, + input_names=['test_in_1', 'test_in_2'], + output_names=['test_out']) + temp_f.seek(0) + onnx_model = onnx.load(temp_f) + keras_model = onnx_to_keras(onnx_model, ['test_in_1', 'test_in_2'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + assert (final_model([t.numpy(), np.array(idx_arr)])-pt_model(t, torch_idx_arr) < 1).numpy().all() + diff --git a/test/layers/reshapes/test_slice.py b/test/layers/reshapes/test_slice.py index 18a28b54..ab3a44d6 100644 --- a/test/layers/reshapes/test_slice.py +++ b/test/layers/reshapes/test_slice.py @@ -14,7 +14,7 @@ def forward(self, x): return x -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_slice(change_ordering): model = LayerTest() model.eval() diff --git a/test/layers/reshapes/test_split.py b/test/layers/reshapes/test_split.py index 3f6907a7..67cddc7c 100644 --- a/test/layers/reshapes/test_split.py +++ b/test/layers/reshapes/test_split.py @@ -14,7 +14,7 @@ def forward(self, x): return torch.split(x, 224//4, 3) -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_split(change_ordering): model = LayerTest() model.eval() diff --git a/test/layers/reshapes/test_squeeze.py b/test/layers/reshapes/test_squeeze.py index 6e8ec9e9..ce350765 100644 --- a/test/layers/reshapes/test_squeeze.py +++ b/test/layers/reshapes/test_squeeze.py @@ -14,7 +14,7 @@ def forward(self, x): return x -@pytest.mark.parametrize('change_ordering', [True, False]) +@pytest.mark.parametrize('change_ordering', [False]) def test_squeeze(change_ordering): model = LayerTest() model.eval() diff --git a/test/layers/reshapes/test_transpose_batch.py b/test/layers/reshapes/test_transpose_batch.py new file mode 100644 index 00000000..ba889a61 --- /dev/null +++ b/test/layers/reshapes/test_transpose_batch.py @@ -0,0 +1,25 @@ +import numpy as np +import torch +import torch.nn as nn +import pytest + +from test.utils import convert_and_test + + +class LayerTest(nn.Module): + def __init__(self): + super(LayerTest, self).__init__() + + def forward(self, x): + x = torch.permute(x, (1, 0, 2, 3)) + x = torch.permute(x, (1, 0, 2, 3)) + return x + + +@pytest.mark.parametrize('change_ordering', [True]) +def test_transpose_batch_and_abs(change_ordering): + model = LayerTest() + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 28, 28)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/layers/split/__init__.py b/test/layers/split/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/layers/split/test_split.py b/test/layers/split/test_split.py new file mode 100644 index 00000000..d54be1b2 --- /dev/null +++ b/test/layers/split/test_split.py @@ -0,0 +1,27 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +import torch +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import pytest + + +class SplitSection(torch.nn.Module): + + def __init__(self): + super(SplitSection, self).__init__() + + def forward(self, x): + return torch.split(x, [5, 5], dim=-1) + + +@pytest.mark.parametrize('opset_version', [12, 14]) +def test_split_v8(opset_version): + model = SplitSection() + inpt = torch.ones([1, 10, 10]) + torch.onnx.export(model, inpt, 'split_model.onnx', opset_version=opset_version) + onnx_model = onnx.load('split_model.onnx') + keras_model = onnx_to_keras(onnx_model, ['tensor'], name_policy='attach_weights_name').converted_model + final_model = convert_channels_first_to_last(keras_model, ['tensor']) + assert final_model(inpt)[0].shape == final_model(inpt)[1].shape == [1, 5, 10] + # assert np.abs(keras_preds-this_pred.detach().numpy()).max() < 1e-04 \ No newline at end of file diff --git a/test/models/cityscape_semseg/__init__.py b/test/models/cityscape_semseg/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/models/cityscape_semseg/cityscapes.py b/test/models/cityscape_semseg/cityscapes.py new file mode 100644 index 00000000..f51ee58f --- /dev/null +++ b/test/models/cityscape_semseg/cityscapes.py @@ -0,0 +1,147 @@ +import json +import os +from collections import namedtuple + +import torch +import torch.utils.data as data +from PIL import Image +import numpy as np + + +class Cityscapes(data.Dataset): + """Cityscapes Dataset. + + **Parameters:** + - **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located. + - **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val' + - **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types. + - **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` + - **target_transform** (callable, optional): A function/transform that takes in the target and transforms it. + """ + + # Based on https://github.com/mcordts/cityscapesScripts + CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id', + 'has_instances', 'ignore_in_eval', 'color']) + classes = [ + CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)), + CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)), + CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)), + CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)), + CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)), + CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)), + CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)), + CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)), + CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)), + CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)), + CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)), + CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)), + CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)), + CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)), + CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)), + CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)), + CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)), + CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)), + CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)), + CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)), + CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)), + CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)), + CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)), + CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)), + CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)), + CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)), + CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)), + CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)), + CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)), + CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)), + ] + + train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)] + train_id_to_color.append([0, 0, 0]) + train_id_to_color = np.array(train_id_to_color) + id_to_train_id = np.array([c.train_id for c in classes]) + + #train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35), + # (70, 130, 180), (220, 20, 60), (0, 0, 142)] + #train_id_to_color = np.array(train_id_to_color) + #id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1 + + def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None): + self.root = os.path.expanduser(root) + self.mode = 'gtFine' + self.target_type = target_type + self.images_dir = os.path.join(self.root, 'leftImg8bit', split) + + self.targets_dir = os.path.join(self.root, self.mode, split) + self.transform = transform + + self.split = split + self.images = [] + self.targets = [] + + if split not in ['train', 'test', 'val']: + raise ValueError('Invalid split for mode! Please use split="train", split="test"' + ' or split="val"') + + if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): + raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the' + ' specified "split" and "mode" are inside the "root" directory') + + for city in os.listdir(self.images_dir): + img_dir = os.path.join(self.images_dir, city) + target_dir = os.path.join(self.targets_dir, city) + + for file_name in os.listdir(img_dir): + self.images.append(os.path.join(img_dir, file_name)) + target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], + self._get_target_suffix(self.mode, self.target_type)) + self.targets.append(os.path.join(target_dir, target_name)) + + @classmethod + def encode_target(cls, target): + return cls.id_to_train_id[np.array(target)] + + @classmethod + def decode_target(cls, target): + target[target == 255] = 19 + #target = target.astype('uint8') + 1 + return cls.train_id_to_color[target] + + def __getitem__(self, index): + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is a tuple of all target types if target_type is a list with more + than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation. + """ + image = Image.open(self.images[index]).convert('RGB') + target = Image.open(self.targets[index]) + if self.transform: + image, target = self.transform(image, target) + target = self.encode_target(target) + return image, target + + def __len__(self): + return len(self.images) + + def _load_json(self, path): + with open(path, 'r') as file: + data = json.load(file) + return data + + def _get_target_suffix(self, mode, target_type): + if target_type == 'instance': + return '{}_instanceIds.png'.format(mode) + elif target_type == 'semantic': + return '{}_labelIds.png'.format(mode) + elif target_type == 'color': + return '{}_color.png'.format(mode) + elif target_type == 'polygon': + return '{}_polygons.json'.format(mode) + elif target_type == 'depth': + return '{}_disparity.png'.format(mode) \ No newline at end of file diff --git a/test/models/cityscape_semseg/network/__init__.py b/test/models/cityscape_semseg/network/__init__.py new file mode 100644 index 00000000..ad24f336 --- /dev/null +++ b/test/models/cityscape_semseg/network/__init__.py @@ -0,0 +1,2 @@ +from .modeling import * +from ._deeplab import convert_to_separable_conv \ No newline at end of file diff --git a/test/models/cityscape_semseg/network/_deeplab.py b/test/models/cityscape_semseg/network/_deeplab.py new file mode 100644 index 00000000..c82f7e97 --- /dev/null +++ b/test/models/cityscape_semseg/network/_deeplab.py @@ -0,0 +1,178 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .utils import _SimpleSegmentationModel + + +__all__ = ["DeepLabV3"] + + +class DeepLabV3(_SimpleSegmentationModel): + """ + Implements DeepLabV3 model from + `"Rethinking Atrous Convolution for Semantic Image Segmentation" + `_. + + Arguments: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + classifier (nn.Module): module that takes the "out" element returned from + the backbone and returns a dense prediction. + aux_classifier (nn.Module, optional): auxiliary classifier used during training + """ + pass + +class DeepLabHeadV3Plus(nn.Module): + def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]): + super(DeepLabHeadV3Plus, self).__init__() + self.project = nn.Sequential( + nn.Conv2d(low_level_channels, 48, 1, bias=False), + nn.BatchNorm2d(48), + nn.ReLU(inplace=True), + ) + + self.aspp = ASPP(in_channels, aspp_dilate) + + self.classifier = nn.Sequential( + nn.Conv2d(304, 256, 3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Conv2d(256, num_classes, 1) + ) + self._init_weight() + + def forward(self, feature): + low_level_feature = self.project( feature['low_level'] ) + output_feature = self.aspp(feature['out']) + output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear', align_corners=False) + return self.classifier( torch.cat( [ low_level_feature, output_feature ], dim=1 ) ) + + def _init_weight(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + +class DeepLabHead(nn.Module): + def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]): + super(DeepLabHead, self).__init__() + + self.classifier = nn.Sequential( + ASPP(in_channels, aspp_dilate), + nn.Conv2d(256, 256, 3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Conv2d(256, num_classes, 1) + ) + self._init_weight() + + def forward(self, feature): + return self.classifier( feature['out'] ) + + def _init_weight(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + +class AtrousSeparableConvolution(nn.Module): + """ Atrous Separable Convolution + """ + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, bias=True): + super(AtrousSeparableConvolution, self).__init__() + self.body = nn.Sequential( + # Separable Conv + nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ), + # PointWise Conv + nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias), + ) + + self._init_weight() + + def forward(self, x): + return self.body(x) + + def _init_weight(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + +class ASPPConv(nn.Sequential): + def __init__(self, in_channels, out_channels, dilation): + modules = [ + nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ] + super(ASPPConv, self).__init__(*modules) + +class ASPPPooling(nn.Sequential): + def __init__(self, in_channels, out_channels): + super(ASPPPooling, self).__init__( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True)) + + def forward(self, x): + size = x.shape[-2:] + x = super(ASPPPooling, self).forward(x) + return F.interpolate(x, size=size, mode='bilinear', align_corners=False) + +class ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates): + super(ASPP, self).__init__() + out_channels = 256 + modules = [] + modules.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True))) + + rate1, rate2, rate3 = tuple(atrous_rates) + modules.append(ASPPConv(in_channels, out_channels, rate1)) + modules.append(ASPPConv(in_channels, out_channels, rate2)) + modules.append(ASPPConv(in_channels, out_channels, rate3)) + modules.append(ASPPPooling(in_channels, out_channels)) + + self.convs = nn.ModuleList(modules) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + nn.Dropout(0.1),) + + def forward(self, x): + res = [] + for conv in self.convs: + res.append(conv(x)) + res = torch.cat(res, dim=1) + return self.project(res) + + + +def convert_to_separable_conv(module): + new_module = module + if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1: + new_module = AtrousSeparableConvolution(module.in_channels, + module.out_channels, + module.kernel_size, + module.stride, + module.padding, + module.dilation, + module.bias) + for name, child in module.named_children(): + new_module.add_module(name, convert_to_separable_conv(child)) + return new_module \ No newline at end of file diff --git a/test/models/cityscape_semseg/network/backbone/__init__.py b/test/models/cityscape_semseg/network/backbone/__init__.py new file mode 100644 index 00000000..2fe6e12b --- /dev/null +++ b/test/models/cityscape_semseg/network/backbone/__init__.py @@ -0,0 +1,4 @@ +from . import resnet +from . import mobilenetv2 +from . import hrnetv2 +from . import xception diff --git a/test/models/cityscape_semseg/network/backbone/hrnetv2.py b/test/models/cityscape_semseg/network/backbone/hrnetv2.py new file mode 100644 index 00000000..a33c6f26 --- /dev/null +++ b/test/models/cityscape_semseg/network/backbone/hrnetv2.py @@ -0,0 +1,345 @@ +import torch +from torch import nn +import torch.nn.functional as F +import os + +__all__ = ['HRNet', 'hrnetv2_48', 'hrnetv2_32'] + +# Checkpoint path of pre-trained backbone (edit to your path). Download backbone pretrained model hrnetv2-32 @ +# https://drive.google.com/file/d/1NxCK7Zgn5PmeS7W1jYLt5J9E0RRZ2oyF/view?usp=sharing .Personally, I added the backbone +# weights to the folder /checkpoints + +model_urls = { + 'hrnetv2_32': './checkpoints/model_best_epoch96_edit.pth', + 'hrnetv2_48': None +} + + +def check_pth(arch): + CKPT_PATH = model_urls[arch] + if os.path.exists(CKPT_PATH): + print(f"Backbone HRNet Pretrained weights at: {CKPT_PATH}, only usable for HRNetv2-32") + else: + print("No backbone checkpoint found for HRNetv2, please set pretrained=False when calling model") + return CKPT_PATH + # HRNetv2-48 not available yet, but you can train the whole model from scratch. + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class StageModule(nn.Module): + def __init__(self, stage, output_branches, c): + super(StageModule, self).__init__() + + self.number_of_branches = stage # number of branches is equivalent to the stage configuration. + self.output_branches = output_branches + + self.branches = nn.ModuleList() + + # Note: Resolution + Number of channels maintains the same throughout respective branch. + for i in range(self.number_of_branches): # Stage scales with the number of branches. Ex: Stage 2 -> 2 branch + channels = c * (2 ** i) # Scale channels by 2x for branch with lower resolution, + + # Paper does x4 basic block for each forward sequence in each branch (x4 basic block considered as a block) + branch = nn.Sequential(*[BasicBlock(channels, channels) for _ in range(4)]) + + self.branches.append(branch) # list containing all forward sequence of individual branches. + + # For each branch requires repeated fusion with all other branches after passing through x4 basic blocks. + self.fuse_layers = nn.ModuleList() + + for branch_output_number in range(self.output_branches): + + self.fuse_layers.append(nn.ModuleList()) + + for branch_number in range(self.number_of_branches): + if branch_number == branch_output_number: + self.fuse_layers[-1].append(nn.Sequential()) # Used in place of "None" because it is callable + elif branch_number > branch_output_number: + self.fuse_layers[-1].append(nn.Sequential( + nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_output_number), kernel_size=1, stride=1, + bias=False), + nn.BatchNorm2d(c * (2 ** branch_output_number), eps=1e-05, momentum=0.1, affine=True, + track_running_stats=True), + nn.Upsample(scale_factor=(2.0 ** (branch_number - branch_output_number)), mode='nearest'), + )) + elif branch_number < branch_output_number: + downsampling_fusion = [] + for _ in range(branch_output_number - branch_number - 1): + downsampling_fusion.append(nn.Sequential( + nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_number), kernel_size=3, stride=2, + padding=1, + bias=False), + nn.BatchNorm2d(c * (2 ** branch_number), eps=1e-05, momentum=0.1, affine=True, + track_running_stats=True), + nn.ReLU(inplace=True), + )) + downsampling_fusion.append(nn.Sequential( + nn.Conv2d(c * (2 ** branch_number), c * (2 ** branch_output_number), kernel_size=3, + stride=2, padding=1, + bias=False), + nn.BatchNorm2d(c * (2 ** branch_output_number), eps=1e-05, momentum=0.1, affine=True, + track_running_stats=True), + )) + self.fuse_layers[-1].append(nn.Sequential(*downsampling_fusion)) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + + # input to each stage is a list of inputs for each branch + x = [branch(branch_input) for branch, branch_input in zip(self.branches, x)] + + x_fused = [] + for branch_output_index in range( + self.output_branches): # Amount of output branches == total length of fusion layers + for input_index in range(self.number_of_branches): # The inputs of other branches to be fused. + if input_index == 0: + x_fused.append(self.fuse_layers[branch_output_index][input_index](x[input_index])) + else: + x_fused[branch_output_index] = x_fused[branch_output_index] + self.fuse_layers[branch_output_index][ + input_index](x[input_index]) + + # After fusing all streams together, you will need to pass the fused layers + for i in range(self.output_branches): + x_fused[i] = self.relu(x_fused[i]) + + return x_fused # returning a list of fused outputs + + +class HRNet(nn.Module): + def __init__(self, c=48, num_blocks=[1, 4, 3], num_classes=1000): + super(HRNet, self).__init__() + + # Stem: + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(64, eps=1e-05, affine=True, track_running_stats=True) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, eps=1e-05, affine=True, track_running_stats=True) + self.relu = nn.ReLU(inplace=True) + + # Stage 1: + downsample = nn.Sequential( + nn.Conv2d(64, 256, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(256, eps=1e-05, affine=True, track_running_stats=True), + ) + # Note that bottleneck module will expand the output channels according to the output channels*block.expansion + bn_expansion = Bottleneck.expansion # The channel expansion is set in the bottleneck class. + self.layer1 = nn.Sequential( + Bottleneck(64, 64, downsample=downsample), # Input is 64 for first module connection + Bottleneck(bn_expansion * 64, 64), + Bottleneck(bn_expansion * 64, 64), + Bottleneck(bn_expansion * 64, 64), + ) + + # Transition 1 - Creation of the first two branches (one full and one half resolution) + # Need to transition into high resolution stream and mid resolution stream + self.transition1 = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(256, c, kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(c, eps=1e-05, affine=True, track_running_stats=True), + nn.ReLU(inplace=True), + ), + nn.Sequential(nn.Sequential( # Double Sequential to fit with official pretrained weights + nn.Conv2d(256, c * 2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(c * 2, eps=1e-05, affine=True, track_running_stats=True), + nn.ReLU(inplace=True), + )), + ]) + + # Stage 2: + number_blocks_stage2 = num_blocks[0] + self.stage2 = nn.Sequential( + *[StageModule(stage=2, output_branches=2, c=c) for _ in range(number_blocks_stage2)]) + + # Transition 2 - Creation of the third branch (1/4 resolution) + self.transition2 = self._make_transition_layers(c, transition_number=2) + + # Stage 3: + number_blocks_stage3 = num_blocks[1] # number blocks you want to create before fusion + self.stage3 = nn.Sequential( + *[StageModule(stage=3, output_branches=3, c=c) for _ in range(number_blocks_stage3)]) + + # Transition - Creation of the fourth branch (1/8 resolution) + self.transition3 = self._make_transition_layers(c, transition_number=3) + + # Stage 4: + number_blocks_stage4 = num_blocks[2] # number blocks you want to create before fusion + self.stage4 = nn.Sequential( + *[StageModule(stage=4, output_branches=4, c=c) for _ in range(number_blocks_stage4)]) + + # Classifier (extra module if want to use for classification): + # pool, reduce dimensionality, flatten, connect to linear layer for classification: + out_channels = sum([c * 2 ** i for i in range(len(num_blocks)+1)]) # total output channels of HRNetV2 + pool_feature_map = 8 + self.bn_classifier = nn.Sequential( + nn.Conv2d(out_channels, out_channels // 4, kernel_size=1, bias=False), + nn.BatchNorm2d(out_channels // 4, eps=1e-05, affine=True, track_running_stats=True), + nn.ReLU(inplace=True), + nn.AdaptiveAvgPool2d(pool_feature_map), + nn.Flatten(), + nn.Linear(pool_feature_map * pool_feature_map * (out_channels // 4), num_classes), + ) + + @staticmethod + def _make_transition_layers(c, transition_number): + return nn.Sequential( + nn.Conv2d(c * (2 ** (transition_number - 1)), c * (2 ** transition_number), kernel_size=3, stride=2, + padding=1, bias=False), + nn.BatchNorm2d(c * (2 ** transition_number), eps=1e-05, affine=True, + track_running_stats=True), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + # Stem: + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + # Stage 1 + x = self.layer1(x) + x = [trans(x) for trans in self.transition1] # split to 2 branches, form a list. + + # Stage 2 + x = self.stage2(x) + x.append(self.transition2(x[-1])) + + # Stage 3 + x = self.stage3(x) + x.append(self.transition3(x[-1])) + + # Stage 4 + x = self.stage4(x) + + # HRNetV2 Example: (follow paper, upsample via bilinear interpolation and to highest resolution size) + output_h, output_w = x[0].size(2), x[0].size(3) # Upsample to size of highest resolution stream + x1 = F.interpolate(x[1], size=(output_h, output_w), mode='bilinear', align_corners=False) + x2 = F.interpolate(x[2], size=(output_h, output_w), mode='bilinear', align_corners=False) + x3 = F.interpolate(x[3], size=(output_h, output_w), mode='bilinear', align_corners=False) + + # Upsampling all the other resolution streams and then concatenate all (rather than adding/fusing like HRNetV1) + x = torch.cat([x[0], x1, x2, x3], dim=1) + x = self.bn_classifier(x) + return x + + +def _hrnet(arch, channels, num_blocks, pretrained, progress, **kwargs): + model = HRNet(channels, num_blocks, **kwargs) + if pretrained: + CKPT_PATH = check_pth(arch) + checkpoint = torch.load(CKPT_PATH) + model.load_state_dict(checkpoint['state_dict']) + return model + + +def hrnetv2_48(pretrained=False, progress=True, number_blocks=[1, 4, 3], **kwargs): + w_channels = 48 + return _hrnet('hrnetv2_48', w_channels, number_blocks, pretrained, progress, + **kwargs) + + +def hrnetv2_32(pretrained=False, progress=True, number_blocks=[1, 4, 3], **kwargs): + w_channels = 32 + return _hrnet('hrnetv2_32', w_channels, number_blocks, pretrained, progress, + **kwargs) + + +if __name__ == '__main__': + + try: + CKPT_PATH = os.path.join(os.path.abspath("."), '../../checkpoints/hrnetv2_32_model_best_epoch96.pth') + print("--- Running file as MAIN ---") + print(f"Backbone HRNET Pretrained weights as __main__ at: {CKPT_PATH}") + except: + print("No backbone checkpoint found for HRNetv2, please set pretrained=False when calling model") + + # Models + model = hrnetv2_32(pretrained=True) + #model = hrnetv2_48(pretrained=False) + + if torch.cuda.is_available(): + torch.backends.cudnn.deterministic = True + device = torch.device('cuda') + else: + device = torch.device('cpu') + model.to(device) + in_ = torch.ones(1, 3, 768, 768).to(device) + y = model(in_) + print(y.shape) + + # Calculate total number of parameters: + # pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + # print(pytorch_total_params) + + + + + + diff --git a/test/models/cityscape_semseg/network/backbone/mobilenetv2.py b/test/models/cityscape_semseg/network/backbone/mobilenetv2.py new file mode 100644 index 00000000..234dbc7f --- /dev/null +++ b/test/models/cityscape_semseg/network/backbone/mobilenetv2.py @@ -0,0 +1,190 @@ +from torch import nn +try: # for torchvision<0.4 + from torchvision.models.utils import load_state_dict_from_url +except: # for torchvision>=0.4 + from torch.hub import load_state_dict_from_url +import torch.nn.functional as F + +__all__ = ['MobileNetV2', 'mobilenet_v2'] + + +model_urls = { + 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth', +} + + +def _make_divisible(v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + :param v: + :param divisor: + :param min_value: + :return: + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNReLU(nn.Sequential): + def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, groups=1): + #padding = (kernel_size - 1) // 2 + super(ConvBNReLU, self).__init__( + nn.Conv2d(in_planes, out_planes, kernel_size, stride, 0, dilation=dilation, groups=groups, bias=False), + nn.BatchNorm2d(out_planes), + nn.ReLU6(inplace=True) + ) + +def fixed_padding(kernel_size, dilation): + kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + return (pad_beg, pad_end, pad_beg, pad_end) + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, dilation, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = int(round(inp * expand_ratio)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers = [] + if expand_ratio != 1: + # pw + layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) + + layers.extend([ + # dw + ConvBNReLU(hidden_dim, hidden_dim, stride=stride, dilation=dilation, groups=hidden_dim), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ]) + self.conv = nn.Sequential(*layers) + + self.input_padding = fixed_padding( 3, dilation ) + + def forward(self, x): + x_pad = F.pad(x, self.input_padding) + if self.use_res_connect: + return x + self.conv(x_pad) + else: + return self.conv(x_pad) + +class MobileNetV2(nn.Module): + def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8): + """ + MobileNet V2 main class + + Args: + num_classes (int): Number of classes + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + """ + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + self.output_stride = output_stride + current_stride = 1 + if inverted_residual_setting is None: + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # only check the first element, assuming user knows t,c,n,s are required + if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: + raise ValueError("inverted_residual_setting should be non-empty " + "or a 4-element list, got {}".format(inverted_residual_setting)) + + # building first layer + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) + features = [ConvBNReLU(3, input_channel, stride=2)] + current_stride *= 2 + dilation=1 + previous_dilation = 1 + + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + output_channel = _make_divisible(c * width_mult, round_nearest) + previous_dilation = dilation + if current_stride == output_stride: + stride = 1 + dilation *= s + else: + stride = s + current_stride *= s + output_channel = int(c * width_mult) + + for i in range(n): + if i==0: + features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t)) + else: + features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t)) + input_channel = output_channel + # building last several layers + features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) + # make it nn.Sequential + self.features = nn.Sequential(*features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, num_classes), + ) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = x.mean([2, 3]) + x = self.classifier(x) + return x + + +def mobilenet_v2(pretrained=False, progress=True, **kwargs): + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MobileNetV2(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], + progress=progress) + model.load_state_dict(state_dict) + return model diff --git a/test/models/cityscape_semseg/network/backbone/resnet.py b/test/models/cityscape_semseg/network/backbone/resnet.py new file mode 100644 index 00000000..366a5721 --- /dev/null +++ b/test/models/cityscape_semseg/network/backbone/resnet.py @@ -0,0 +1,346 @@ +import torch +import torch.nn as nn +try: # for torchvision<0.4 + from torchvision.models.utils import load_state_dict_from_url +except: # for torchvision>=0.4 + from torch.hub import load_state_dict_from_url + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + +def _resnet(arch, block, layers, pretrained, progress, **kwargs): + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" `_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" `_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) diff --git a/test/models/cityscape_semseg/network/backbone/xception.py b/test/models/cityscape_semseg/network/backbone/xception.py new file mode 100644 index 00000000..8e7012de --- /dev/null +++ b/test/models/cityscape_semseg/network/backbone/xception.py @@ -0,0 +1,238 @@ + +""" +Xception is adapted from https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py + +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) +@author: tstandley +Adapted by cadene +Creates an Xception Model as defined in: +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf +This weights ported from the Keras implementation. Achieves the following performance on the validation set: +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 +REMEMBER to set your image size to 3x299x299 for both test and validation +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" +from __future__ import print_function, division, absolute_import +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo +from torch.nn import init + +__all__ = ['xception'] + +pretrained_settings = { + 'xception': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/xception-43020ad28.pth', + 'input_space': 'RGB', + 'input_size': [3, 299, 299], + 'input_range': [0, 1], + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'num_classes': 1000, + 'scale': 0.8975 # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False): + super(SeparableConv2d,self).__init__() + + self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias) + self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias) + + def forward(self,x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True, dilation=1): + super(Block, self).__init__() + + if out_filters != in_filters or strides!=1: + self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_filters) + else: + self.skip=None + + rep=[] + + filters=in_filters + if grow_first: + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=dilation, dilation=dilation, bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + filters = out_filters + + for i in range(reps-1): + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=dilation,dilation=dilation,bias=False)) + rep.append(nn.BatchNorm2d(filters)) + + if not grow_first: + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=dilation,dilation=dilation,bias=False)) + rep.append(nn.BatchNorm2d(out_filters)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3,strides,1)) + self.rep = nn.Sequential(*rep) + + def forward(self,inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + x+=skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + def __init__(self, num_classes=1000, replace_stride_with_dilation=None): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + + self.num_classes = num_classes + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False, False] + if len(replace_stride_with_dilation) != 4: + raise ValueError("replace_stride_with_dilation should be None " + "or a 4-element tuple, got {}".format(replace_stride_with_dilation)) + + self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False) # 1 / 2 + self.bn1 = nn.BatchNorm2d(32) + self.relu1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32,64,3,bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.relu2 = nn.ReLU(inplace=True) + #do relu here + + self.block1=self._make_block(64,128,2,2,start_with_relu=False,grow_first=True, dilate=replace_stride_with_dilation[0]) # 1 / 4 + self.block2=self._make_block(128,256,2,2,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[1]) # 1 / 8 + self.block3=self._make_block(256,728,2,2,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) # 1 / 16 + + self.block4=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block5=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block6=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block7=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + + self.block8=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block9=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block10=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + self.block11=self._make_block(728,728,3,1,start_with_relu=True,grow_first=True, dilate=replace_stride_with_dilation[2]) + + self.block12=self._make_block(728,1024,2,2,start_with_relu=True,grow_first=False, dilate=replace_stride_with_dilation[3]) # 1 / 32 + + self.conv3 = SeparableConv2d(1024,1536,3,1,1, dilation=self.dilation) + self.bn3 = nn.BatchNorm2d(1536) + self.relu3 = nn.ReLU(inplace=True) + + #do relu here + self.conv4 = SeparableConv2d(1536,2048,3,1,1, dilation=self.dilation) + self.bn4 = nn.BatchNorm2d(2048) + + self.fc = nn.Linear(2048, num_classes) + + # #------- init weights -------- + # for m in self.modules(): + # if isinstance(m, nn.Conv2d): + # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + # m.weight.data.normal_(0, math.sqrt(2. / n)) + # elif isinstance(m, nn.BatchNorm2d): + # m.weight.data.fill_(1) + # m.bias.data.zero_() + # #----------------------------- + + def _make_block(self, in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True, dilate=False): + if dilate: + self.dilation *= strides + strides = 1 + return Block(in_filters,out_filters,reps,strides,start_with_relu=start_with_relu,grow_first=grow_first, dilation=self.dilation) + + def features(self, input): + x = self.conv1(input) + x = self.bn1(x) + x = self.relu1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.relu3(x) + + x = self.conv4(x) + x = self.bn4(x) + return x + + def logits(self, features): + x = nn.ReLU(inplace=True)(features) + + x = F.adaptive_avg_pool2d(x, (1, 1)) + x = x.view(x.size(0), -1) + x = self.last_linear(x) + return x + + def forward(self, input): + x = self.features(input) + x = self.logits(x) + return x + + +def xception(num_classes=1000, pretrained='imagenet', replace_stride_with_dilation=None): + model = Xception(num_classes=num_classes, replace_stride_with_dilation=replace_stride_with_dilation) + if pretrained: + settings = pretrained_settings['xception'][pretrained] + assert num_classes == settings['num_classes'], \ + "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes) + + model = Xception(num_classes=num_classes, replace_stride_with_dilation=replace_stride_with_dilation) + model.load_state_dict(model_zoo.load_url(settings['url'])) + + # TODO: ugly + model.last_linear = model.fc + del model.fc + return model \ No newline at end of file diff --git a/test/models/cityscape_semseg/network/modeling.py b/test/models/cityscape_semseg/network/modeling.py new file mode 100644 index 00000000..ebf409c2 --- /dev/null +++ b/test/models/cityscape_semseg/network/modeling.py @@ -0,0 +1,222 @@ +from .utils import IntermediateLayerGetter +from ._deeplab import DeepLabHead, DeepLabHeadV3Plus, DeepLabV3 +from .backbone import ( + resnet, + mobilenetv2, + hrnetv2, + xception +) + +def _segm_hrnet(name, backbone_name, num_classes, pretrained_backbone): + + backbone = hrnetv2.__dict__[backbone_name](pretrained_backbone) + # HRNetV2 config: + # the final output channels is dependent on highest resolution channel config (c). + # output of backbone will be the inplanes to assp: + hrnet_channels = int(backbone_name.split('_')[-1]) + inplanes = sum([hrnet_channels * 2 ** i for i in range(4)]) + low_level_planes = 256 # all hrnet version channel output from bottleneck is the same + aspp_dilate = [12, 24, 36] # If follow paper trend, can put [24, 48, 72]. + + if name=='deeplabv3plus': + return_layers = {'stage4': 'out', 'layer1': 'low_level'} + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) + elif name=='deeplabv3': + return_layers = {'stage4': 'out'} + classifier = DeepLabHead(inplanes, num_classes, aspp_dilate) + + backbone = IntermediateLayerGetter(backbone, return_layers=return_layers, hrnet_flag=True) + model = DeepLabV3(backbone, classifier) + return model + +def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone): + + if output_stride==8: + replace_stride_with_dilation=[False, True, True] + aspp_dilate = [12, 24, 36] + else: + replace_stride_with_dilation=[False, False, True] + aspp_dilate = [6, 12, 18] + + backbone = resnet.__dict__[backbone_name]( + pretrained=pretrained_backbone, + replace_stride_with_dilation=replace_stride_with_dilation) + + inplanes = 2048 + low_level_planes = 256 + + if name=='deeplabv3plus': + return_layers = {'layer4': 'out', 'layer1': 'low_level'} + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) + elif name=='deeplabv3': + return_layers = {'layer4': 'out'} + classifier = DeepLabHead(inplanes , num_classes, aspp_dilate) + backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) + + model = DeepLabV3(backbone, classifier) + return model + + +def _segm_xception(name, backbone_name, num_classes, output_stride, pretrained_backbone): + if output_stride==8: + replace_stride_with_dilation=[False, False, True, True] + aspp_dilate = [12, 24, 36] + else: + replace_stride_with_dilation=[False, False, False, True] + aspp_dilate = [6, 12, 18] + + backbone = xception.xception(pretrained= 'imagenet' if pretrained_backbone else False, replace_stride_with_dilation=replace_stride_with_dilation) + + inplanes = 2048 + low_level_planes = 128 + + if name=='deeplabv3plus': + return_layers = {'conv4': 'out', 'block1': 'low_level'} + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) + elif name=='deeplabv3': + return_layers = {'conv4': 'out'} + classifier = DeepLabHead(inplanes , num_classes, aspp_dilate) + backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) + model = DeepLabV3(backbone, classifier) + return model + + +def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone): + if output_stride==8: + aspp_dilate = [12, 24, 36] + else: + aspp_dilate = [6, 12, 18] + + backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride) + + # rename layers + backbone.low_level_features = backbone.features[0:4] + backbone.high_level_features = backbone.features[4:-1] + backbone.features = None + backbone.classifier = None + + inplanes = 320 + low_level_planes = 24 + + if name=='deeplabv3plus': + return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'} + classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate) + elif name=='deeplabv3': + return_layers = {'high_level_features': 'out'} + classifier = DeepLabHead(inplanes , num_classes, aspp_dilate) + backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) + + model = DeepLabV3(backbone, classifier) + return model + +def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone): + + if backbone=='mobilenetv2': + model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + elif backbone.startswith('resnet'): + model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + elif backbone.startswith('hrnetv2'): + model = _segm_hrnet(arch_type, backbone, num_classes, pretrained_backbone=pretrained_backbone) + elif backbone=='xception': + model = _segm_xception(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + else: + raise NotImplementedError + return model + + +# Deeplab v3 +def deeplabv3_hrnetv2_48(num_classes=21, output_stride=4, pretrained_backbone=False): # no pretrained backbone yet + return _load_model('deeplabv3', 'hrnetv2_48', output_stride, num_classes, pretrained_backbone=pretrained_backbone) + +def deeplabv3_hrnetv2_32(num_classes=21, output_stride=4, pretrained_backbone=True): + return _load_model('deeplabv3', 'hrnetv2_32', output_stride, num_classes, pretrained_backbone=pretrained_backbone) + +def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3 model with a ResNet-50 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3 model with a ResNet-101 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs): + """Constructs a DeepLabV3 model with a MobileNetv2 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3_xception(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs): + """Constructs a DeepLabV3 model with a Xception backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3', 'xception', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + + +# Deeplab v3+ +def deeplabv3plus_hrnetv2_48(num_classes=21, output_stride=4, pretrained_backbone=False): # no pretrained backbone yet + return _load_model('deeplabv3plus', 'hrnetv2_48', num_classes, output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3plus_hrnetv2_32(num_classes=21, output_stride=4, pretrained_backbone=True): + return _load_model('deeplabv3plus', 'hrnetv2_32', num_classes, output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3 model with a ResNet-50 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + + +def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3+ model with a ResNet-101 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + + +def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3+ model with a MobileNetv2 backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) + +def deeplabv3plus_xception(num_classes=21, output_stride=8, pretrained_backbone=True): + """Constructs a DeepLabV3+ model with a Xception backbone. + + Args: + num_classes (int): number of classes. + output_stride (int): output stride for deeplab. + pretrained_backbone (bool): If True, use the pretrained backbone. + """ + return _load_model('deeplabv3plus', 'xception', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) \ No newline at end of file diff --git a/test/models/cityscape_semseg/network/utils.py b/test/models/cityscape_semseg/network/utils.py new file mode 100644 index 00000000..58ea389f --- /dev/null +++ b/test/models/cityscape_semseg/network/utils.py @@ -0,0 +1,93 @@ +import torch +import torch.nn as nn +import numpy as np +import torch.nn.functional as F +from collections import OrderedDict + +class _SimpleSegmentationModel(nn.Module): + def __init__(self, backbone, classifier): + super(_SimpleSegmentationModel, self).__init__() + self.backbone = backbone + self.classifier = classifier + + def forward(self, x): + input_shape = x.shape[-2:] + features = self.backbone(x) + x = self.classifier(features) + x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False) + return x + + +class IntermediateLayerGetter(nn.ModuleDict): + """ + Module wrapper that returns intermediate layers from a model + + It has a strong assumption that the modules have been registered + into the model in the same order as they are used. + This means that one should **not** reuse the same nn.Module + twice in the forward if you want this to work. + + Additionally, it is only able to query submodules that are directly + assigned to the model. So if `model` is passed, `model.feature1` can + be returned, but not `model.feature1.layer2`. + + Arguments: + model (nn.Module): model on which we will extract the features + return_layers (Dict[name, new_name]): a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + + Examples:: + + >>> m = torchvision.models.resnet18(pretrained=True) + >>> # extract layer1 and layer3, giving as names `feat1` and feat2` + >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, + >>> {'layer1': 'feat1', 'layer3': 'feat2'}) + >>> out = new_m(torch.rand(1, 3, 224, 224)) + >>> print([(k, v.shape) for k, v in out.items()]) + >>> [('feat1', torch.Size([1, 64, 56, 56])), + >>> ('feat2', torch.Size([1, 256, 14, 14]))] + """ + def __init__(self, model, return_layers, hrnet_flag=False): + if not set(return_layers).issubset([name for name, _ in model.named_children()]): + raise ValueError("return_layers are not present in model") + + self.hrnet_flag = hrnet_flag + + orig_return_layers = return_layers + return_layers = {k: v for k, v in return_layers.items()} + layers = OrderedDict() + for name, module in model.named_children(): + layers[name] = module + if name in return_layers: + del return_layers[name] + if not return_layers: + break + + super(IntermediateLayerGetter, self).__init__(layers) + self.return_layers = orig_return_layers + + def forward(self, x): + out = OrderedDict() + for name, module in self.named_children(): + if self.hrnet_flag and name.startswith('transition'): # if using hrnet, you need to take care of transition + if name == 'transition1': # in transition1, you need to split the module to two streams first + x = [trans(x) for trans in module] + else: # all other transition is just an extra one stream split + x.append(module(x[-1])) + else: # other models (ex:resnet,mobilenet) are convolutions in series. + x = module(x) + + if name in self.return_layers: + out_name = self.return_layers[name] + if name == 'stage4' and self.hrnet_flag: # In HRNetV2, we upsample and concat all outputs streams together + output_h, output_w = x[0].size(2), x[0].size(3) # Upsample to size of highest resolution stream + x1 = F.interpolate(x[1], size=(output_h, output_w), mode='bilinear', align_corners=False) + x2 = F.interpolate(x[2], size=(output_h, output_w), mode='bilinear', align_corners=False) + x3 = F.interpolate(x[3], size=(output_h, output_w), mode='bilinear', align_corners=False) + x = torch.cat([x[0], x1, x2, x3], dim=1) + out[out_name] = x + else: + out[out_name] = x + return out diff --git a/test/models/fnet/__init__.py b/test/models/fnet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/models/fnet/conv3dnet.py b/test/models/fnet/conv3dnet.py new file mode 100644 index 00000000..855bc7b3 --- /dev/null +++ b/test/models/fnet/conv3dnet.py @@ -0,0 +1,97 @@ +import torch + + +class Net(torch.nn.Module): + def __init__(self, depth=4, mult_chan=32, in_channels=1, out_channels=1): + super().__init__() + self.depth = depth + self.mult_chan = mult_chan + self.in_channels = in_channels + self.out_channels = out_channels + + self.net_recurse = _Net_recurse( + n_in_channels=self.in_channels, mult_chan=self.mult_chan, depth_parent=self.depth, depth=self.depth + ) + self.conv_out = torch.nn.Conv3d( + self.mult_chan, self.out_channels, kernel_size=3, padding=1 + ) + + def forward(self, x): + x_rec = self.net_recurse(x) + return self.conv_out(x_rec) + + +class _Net_recurse(torch.nn.Module): + def __init__(self, n_in_channels, mult_chan=2, depth_parent=0, depth=0): + """Class for recursive definition of U-network.p + + Parameters + ---------- + in_channels + Number of channels for input. + mult_chan + Factor to determine number of output channels + depth + If 0, this subnet will only be convolutions that double the channel + count. + + """ + super().__init__() + + self.depth = depth + + if self.depth == depth_parent: + n_out_channels = mult_chan + else: + n_out_channels = n_in_channels * mult_chan + + self.sub_2conv_more = SubNet2Conv(n_in_channels, n_out_channels) + if depth > 0: + self.sub_2conv_less = SubNet2Conv(2 * n_out_channels, n_out_channels) + self.conv_down = torch.nn.Conv3d( + n_out_channels, n_out_channels, 2, stride=2 + ) + self.bn0 = torch.nn.BatchNorm3d(n_out_channels) + self.relu0 = torch.nn.ReLU() + self.convt = torch.nn.ConvTranspose3d( + 2 * n_out_channels, n_out_channels, kernel_size=2, stride=2 + ) + self.bn1 = torch.nn.BatchNorm3d(n_out_channels) + self.relu1 = torch.nn.ReLU() + self.sub_u = _Net_recurse(n_out_channels, mult_chan=2, depth_parent=depth_parent, depth=(depth - 1)) + + def forward(self, x): + if self.depth == 0: + return self.sub_2conv_more(x) + else: # depth > 0 + x_2conv_more = self.sub_2conv_more(x) + x_conv_down = self.conv_down(x_2conv_more) + x_bn0 = self.bn0(x_conv_down) + x_relu0 = self.relu0(x_bn0) + x_sub_u = self.sub_u(x_relu0) + x_convt = self.convt(x_sub_u) + x_bn1 = self.bn1(x_convt) + x_relu1 = self.relu1(x_bn1) + x_cat = torch.cat((x_2conv_more, x_relu1), 1) # concatenate + x_2conv_less = self.sub_2conv_less(x_cat) + return x_2conv_less + + +class SubNet2Conv(torch.nn.Module): + def __init__(self, n_in, n_out): + super().__init__() + self.conv1 = torch.nn.Conv3d(n_in, n_out, kernel_size=3, padding=1) + self.bn1 = torch.nn.BatchNorm3d(n_out) + self.relu1 = torch.nn.ReLU() + self.conv2 = torch.nn.Conv3d(n_out, n_out, kernel_size=3, padding=1) + self.bn2 = torch.nn.BatchNorm3d(n_out) + self.relu2 = torch.nn.ReLU() + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu2(x) + return x diff --git a/test/models/fnet/fnet2d.py b/test/models/fnet/fnet2d.py new file mode 100644 index 00000000..e00fd1ec --- /dev/null +++ b/test/models/fnet/fnet2d.py @@ -0,0 +1,87 @@ +import torch + + +class Net(torch.nn.Module): + def __init__(self): + super().__init__() + mult_chan = 32 + depth = 4 + self.net_recurse = _Net_recurse( + n_in_channels=1, mult_chan=mult_chan, depth=depth + ) + self.conv_out = torch.nn.Conv2d(mult_chan, 1, kernel_size=3, padding=1) + + def forward(self, x): + x_rec = self.net_recurse(x) + return self.conv_out(x_rec) + + +class _Net_recurse(torch.nn.Module): + def __init__(self, n_in_channels, mult_chan=2, depth=0): + """Class for recursive definition of U-network.p + + Parameters + ---------- + in_channels + Number of channels for input. + mult_chan + Factor to determine number of output channels + depth + If 0, this subnet will only be convolutions that double the channel + count. + + """ + super().__init__() + self.depth = depth + n_out_channels = n_in_channels * mult_chan + self.sub_2conv_more = SubNet2Conv(n_in_channels, n_out_channels) + + if depth > 0: + self.sub_2conv_less = SubNet2Conv(2 * n_out_channels, n_out_channels) + self.conv_down = torch.nn.Conv2d( + n_out_channels, n_out_channels, 2, stride=2 + ) + self.bn0 = torch.nn.BatchNorm2d(n_out_channels) + self.relu0 = torch.nn.ReLU() + self.convt = torch.nn.ConvTranspose2d( + 2 * n_out_channels, n_out_channels, kernel_size=2, stride=2 + ) + self.bn1 = torch.nn.BatchNorm2d(n_out_channels) + self.relu1 = torch.nn.ReLU() + self.sub_u = _Net_recurse(n_out_channels, mult_chan=2, depth=(depth - 1)) + + def forward(self, x): + if self.depth == 0: + return self.sub_2conv_more(x) + else: # depth > 0 + x_2conv_more = self.sub_2conv_more(x) + x_conv_down = self.conv_down(x_2conv_more) + x_bn0 = self.bn0(x_conv_down) + x_relu0 = self.relu0(x_bn0) + x_sub_u = self.sub_u(x_relu0) + x_convt = self.convt(x_sub_u) + x_bn1 = self.bn1(x_convt) + x_relu1 = self.relu1(x_bn1) + x_cat = torch.cat((x_2conv_more, x_relu1), 1) # concatenate + x_2conv_less = self.sub_2conv_less(x_cat) + return x_2conv_less + + +class SubNet2Conv(torch.nn.Module): + def __init__(self, n_in, n_out): + super().__init__() + self.conv1 = torch.nn.Conv2d(n_in, n_out, kernel_size=3, padding=1) + self.bn1 = torch.nn.BatchNorm2d(n_out) + self.relu1 = torch.nn.ReLU() + self.conv2 = torch.nn.Conv2d(n_out, n_out, kernel_size=3, padding=1) + self.bn2 = torch.nn.BatchNorm2d(n_out) + self.relu2 = torch.nn.ReLU() + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu2(x) + return x diff --git a/test/models/fnet/test_fnet.py b/test/models/fnet/test_fnet.py new file mode 100644 index 00000000..4072e470 --- /dev/null +++ b/test/models/fnet/test_fnet.py @@ -0,0 +1,15 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from test.models.fnet.conv3dnet import Net + + +@pytest.mark.slow +@pytest.mark.parametrize('pretrained', [True]) +def test_fnet(pretrained): + np.random.seed(seed=NP_SEED) + model = Net() + model.eval() + + input_np = np.random.uniform(0, 1, (1, 1, 32, 64, 64)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True, epsilon=2 * 10 ** (-5)) diff --git a/test/models/mnist/mnist-12.onnx b/test/models/mnist/mnist-12.onnx new file mode 100644 index 00000000..6661bfe3 Binary files /dev/null and b/test/models/mnist/mnist-12.onnx differ diff --git a/test/models/mnist/test_mnist.py b/test/models/mnist/test_mnist.py new file mode 100644 index 00000000..d172b654 --- /dev/null +++ b/test/models/mnist/test_mnist.py @@ -0,0 +1,23 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +import onnxruntime as ort +import pytest +import pathlib + +def test_mnist_mode(): + # load onnx model + dir = pathlib.Path(__file__).parent.resolve() + model_path = f"{dir}/mnist-12.onnx" + onnx_model = onnx.load(model_path) + # convert onnx model to keras + keras_model = onnx_to_keras(onnx_model, input_names=["Input3"], name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=False, verbose=True) + ort_session = ort.InferenceSession(model_path) + img = np.random.random((1,1,28,28)) + keras_output = final_model(img) + onnx_outputs = ort_session.run(None, {"Input3": img.astype(np.float32)}) + is_same = np.allclose(onnx_outputs, keras_output, 1e-6) diff --git a/test/models/private_tests/__init__.py b/test/models/private_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/models/private_tests/aws_utils.py b/test/models/private_tests/aws_utils.py new file mode 100644 index 00000000..1ce3d487 --- /dev/null +++ b/test/models/private_tests/aws_utils.py @@ -0,0 +1,66 @@ +import pytest +import boto3 +import os +import shutil +import tempfile + +BUCKET_NAME = 'tensorleap-engine-tests-dev' +PREFIX = 'onnx2keras' +if not 'LOCAL_TEST' in os.environ: + s3 = boto3.client( + 's3', + aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'], + aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'], + aws_session_token=os.environ['AWS_SESSION_TOKEN'], + region_name='us-east-1' + ) + +@pytest.fixture +def aws_s3_download(request): + def download_from_s3(aws_dir, dest_dir="", is_temp=False): + if 'LOCAL_TEST' in os.environ: + return dest_dir, is_temp + real_dir = "" + if not is_temp: + real_dir = dest_dir + if len(dest_dir) == 0: + raise Exception("Need to provide destination dir if non-temp directory is used for file downloading") + if not os.path.exists(dest_dir): + os.makedirs(dest_dir, exist_ok=True) + else: + # Create a temporary directory + real_dir = tempfile.mkdtemp() + path = f"{PREFIX}/{aws_dir}" # Use the provided directory as the prefix + # List objects in the bucket with the specified prefix + response = s3.list_objects_v2( + Bucket=BUCKET_NAME, + Prefix=path + ) + + # Download files to the temporary directory + if 'Contents' in response: + for obj in response['Contents']: + key = obj['Key'] + if obj['Size'] > 0: + rel_path = key[len(path):].lstrip("/") + dirname = os.path.dirname(rel_path) + full_dir = os.path.join(real_dir, dirname) + if len(full_dir) > 0 and not os.path.exists(full_dir): + os.makedirs(full_dir) + filename = os.path.join(real_dir, rel_path) + if not os.path.exists(filename): + s3.download_file(BUCKET_NAME, key, filename) + print(f"Downloaded {key} to {filename}") + else: + print("No objects found under the specified prefix.") + + # Provide the temporary directory path to the test function + return real_dir, is_temp + + # Yield the download_from_s3 function so it can be used as a fixture + dir_path, is_temp = download_from_s3(*request.param) + yield dir_path + + # Clean up the temporary directory after the test + if is_temp: + shutil.rmtree(dir_path) diff --git a/test/models/private_tests/test_a_1.py b/test/models/private_tests/test_a_1.py new file mode 100644 index 00000000..d69475ce --- /dev/null +++ b/test/models/private_tests/test_a_1.py @@ -0,0 +1,47 @@ +import onnxruntime as ort +import numpy as np +import onnx +from PIL import Image + +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +@pytest.mark.parametrize('aws_s3_download', [["pick/", "pick/", False]], indirect=True) +def test_pick_1(aws_s3_download): + model_path = f'{aws_s3_download}/sample-gen-grasp-v2.onnx' + image_path = f'{aws_s3_download}/pick_image_downsample_4x.jpg' + image = Image.open(image_path) + + # Resize the image to 512x512 + image_resized = image.resize((512, 512)) + + # Convert the image to a NumPy array + image_array = np.array(image_resized) + + image_array = np.transpose(image_array, (2, 0, 1)).astype(np.float32) / 255 + rgb_input = np.expand_dims(image_array, axis=0) + + pick_parameters = np.array([200, 300, 0, 0, 1, 0.2, 1, 0, 0, 0, 0, 0, 0]).astype(np.float32) + pick_parameters = np.expand_dims(pick_parameters, axis=0) + + session = ort.InferenceSession(model_path) + + # Get the names of the input and output nodes + input_names = [i.name for i in session.get_inputs()] + output_names = [o.name for o in session.get_outputs()] + + onnx_model = onnx.load(model_path) + keras_model = onnx_to_keras(onnx_model, input_names, name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=False) + res = final_model([pick_parameters, rgb_input]) + + res_onnx = session.run(output_names, {input_names[0]: rgb_input, input_names[1]: pick_parameters}) + + assert abs((res[0].numpy() - res_onnx[0])[0][0]) < 1e-3 + assert np.sum(np.abs(res[1].numpy() - res_onnx[1])) < 2e-3 + assert np.sum(np.abs(res[2].numpy() - res_onnx[2])) < 4e-3 + diff --git a/test/models/private_tests/test_a_2.py b/test/models/private_tests/test_a_2.py new file mode 100644 index 00000000..35a0e7aa --- /dev/null +++ b/test/models/private_tests/test_a_2.py @@ -0,0 +1,36 @@ +import onnxruntime as ort +import numpy as np +import onnx +from PIL import Image +import tensorflow as tf +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +@pytest.mark.parametrize('aws_s3_download', [["pick_2/", "pick_2/", False]], indirect=True) +def test_pick_2(aws_s3_download): + model_path = f'{aws_s3_download}/new_2.onnx' + + session = ort.InferenceSession(model_path) + + # Get the names of the input and output nodes + input_names = [i.name for i in session.get_inputs()] + output_names = [o.name for o in session.get_outputs()] + + onnx_model = onnx.load(model_path) + keras_model = onnx_to_keras(onnx_model, input_names, name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=False) + img = np.random.random((1, 3, 512, 512)).astype(np.float32) + pick_param = np.random.random((13)).astype(np.float32) + pick_parameters = np.expand_dims(pick_param, axis=0) + res = final_model([tf.convert_to_tensor(pick_parameters), tf.convert_to_tensor(img)]) + + res_onnx = session.run(output_names, {input_names[0]: img, input_names[1]: pick_parameters}) + + assert abs((res[0].numpy() - res_onnx[0])[0][0]) < 5e-2 + assert np.sum(np.abs(res[1].numpy() - res_onnx[1])) < 5e-2 + assert np.sum(np.abs(res[2].numpy() - res_onnx[2])) < 0.5 + diff --git a/test/models/private_tests/test_clip.py b/test/models/private_tests/test_clip.py new file mode 100644 index 00000000..e6a951f3 --- /dev/null +++ b/test/models/private_tests/test_clip.py @@ -0,0 +1,39 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +import onnxruntime as ort +from onnx2kerastl.customonnxlayer import onnx_custom_objects_map +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +@pytest.mark.parametrize('aws_s3_download', [["clip/", "clip/", False]], indirect=True) +def test_clip_model(aws_s3_download): + # declare paths + onnx_model_path = f'{aws_s3_download}/clip.onnx' + save_model_path = f'{aws_s3_download}/clip.h5' + token_ids = [320 for _ in range(75)] + token_ids.insert(0, 49406) + token_ids.append(49407) + input_data = {'input_ids': np.asarray(token_ids).astype(np.int64).reshape(1, -1), + 'attention_mask': np.ones((1, 77)).astype(np.int64)} + # load onnx model + onnx_model = onnx.load(onnx_model_path) + # extract feature names from the model + input_features = list(input_data.keys()) + # convert onnx model to keras + keras_model = onnx_to_keras(onnx_model, input_names=input_features, name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True, verbose=True) + # final_model = tf.keras.models.Model(inputs=final_model.input, outputs=final_model.layers[-2].output) + keras_output = keras_model(input_data) + final_model.save(save_model_path) + ort_session = ort.InferenceSession(onnx_model_path, providers=ort.get_available_providers()[0]) + onnx_outputs = ort_session.run(None, input_data) + loaded_keras_model = tf.keras.models.load_model(save_model_path, custom_objects=onnx_custom_objects_map) + loaded_keras_outputs = loaded_keras_model(input_data) + onnx_embedding = onnx_outputs[1] + keras_embedding = loaded_keras_outputs[1].numpy() + is_same = np.allclose(onnx_embedding, keras_embedding, 1e-3) diff --git a/test/models/private_tests/test_ctformer.py b/test/models/private_tests/test_ctformer.py new file mode 100644 index 00000000..f06b9d56 --- /dev/null +++ b/test/models/private_tests/test_ctformer.py @@ -0,0 +1,31 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +import onnxruntime as ort +import numpy as np +from test.models.private_tests.aws_utils import aws_s3_download + +import pytest +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + +@pytest.mark.parametrize('aws_s3_download', [["ctformer/", "ctformer/", False]], indirect=True) +def test_ctformer( + aws_s3_download) -> None: + onnx_path = f'{aws_s3_download}/ctformer.onnx' + onnx_model = onnx.load(onnx_path) + + rng = np.random.default_rng(seed=42) + input_array = np.random.rand(1, 1, 64, 64).astype(np.float32) + input_tensor = input_array + + keras_model = onnx_to_keras(onnx_model, input_names=['input'], name_policy='attach_weights_name' + , allow_partial_compilation=False) + keras_model = keras_model.converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=False) + ort_session = ort.InferenceSession(onnx_path ) + onnx_res = ort_session.run( + None, + {"input": input_array}, + )[0] + keras_preds = final_model(input_array)[0] + assert np.abs(keras_preds - onnx_res).max() < 1e-04 diff --git a/test/models/private_tests/test_dinov2.py b/test/models/private_tests/test_dinov2.py new file mode 100644 index 00000000..1498d4b5 --- /dev/null +++ b/test/models/private_tests/test_dinov2.py @@ -0,0 +1,34 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import onnxruntime as ort +import torch +import pytest +from test.models.private_tests.aws_utils import aws_s3_download + + +@pytest.mark.parametrize('aws_s3_download', [["dinov2/", "dinov2/", False]], indirect=True) +def test_dinov2(aws_s3_download): + # This is commented out in case we'll upgrade python + # batch_size = 1 + # dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14') + # wm = wrapper_model(dinov2_vits14).to('cpu') + # wm.eval() + # dummy_input = torch.FloatTensor(np.random.uniform(0, 1, (batch_size, 3, 224, 224))) + # torch.onnx.export(wm, dummy_input, "dino-2-test.onnx", input_names=['img'], + # output_names=['vit_out']) + np_input = list(np.random.rand(1, 3, 224, 224)) + onnx_path = f'{aws_s3_download}/dino-2-test.onnx' + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['img', 'masks'], allow_partial_compilation=False) + flipped_model = convert_channels_first_to_last(keras_model.converted_model, should_transform_inputs_and_outputs=False) + ort_session = ort.InferenceSession(onnx_path) + keras_res = flipped_model(np.array(np_input)) + res = ort_session.run( + ['vit_out'], + input_feed={"img": np.array(np_input).astype(np.float32)} + ) + t_mean, t_max = (res[0]-keras_res).__abs__().numpy().mean(), (res[0]-keras_res).__abs__().numpy().max() + assert t_mean < 5e-2 + assert t_max < 0.4 diff --git a/test/models/private_tests/test_gps.py b/test/models/private_tests/test_gps.py new file mode 100644 index 00000000..e31b2b3b --- /dev/null +++ b/test/models/private_tests/test_gps.py @@ -0,0 +1,32 @@ +import onnxruntime as ort +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +from test.models.private_tests.aws_utils import aws_s3_download +import pytest +import onnxruntime as rt + + +@pytest.mark.parametrize('aws_s3_download', [["gps/", "gps/", False]], indirect=True) +def test_gps(aws_s3_download): + onnx_model_path = f'{aws_s3_download}/gps_750_v1.onnx' + onnx_model = onnx.load(onnx_model_path) + keras_model = onnx_to_keras(onnx_model, ['images', 'gps', 'masks'], name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + data = np.random.random((1, 11, 3, 224, 224)) + gps = np.random.random((1, 10, 2)) + masks = np.ones((1, 11)) + masks[:, :8] = 0 + res = final_model([data.transpose([0, 2, 3, 4, 1]), masks, gps.transpose([0,2,1])]) + sess = rt.InferenceSession(onnx_model_path) + input_name_1 = sess.get_inputs()[0].name + input_name_2 = sess.get_inputs()[1].name + input_name_3 = sess.get_inputs()[2].name + label_name = sess.get_outputs()[0].name + pred = sess.run([label_name], + {input_name_1: data.astype(np.float32), input_name_2: gps.astype(np.float32), + input_name_3: masks.astype(np.float32)}) + assert (pred[0] - res).numpy().__abs__().max() < 2e-5 diff --git a/test/models/private_tests/test_iconqa.py b/test/models/private_tests/test_iconqa.py new file mode 100644 index 00000000..a93d65d0 --- /dev/null +++ b/test/models/private_tests/test_iconqa.py @@ -0,0 +1,47 @@ +import onnxruntime as ort +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +def get_keras_layer_model(keras_model, layer_name): + idx = np.argmax([keras_model.layers[i].name == layer_name for i in range(len(keras_model.layers))]) + return tf.keras.Model(keras_model.input, keras_model.layers[idx].output) + + +# res_torch = np.load('logits.npy') +@pytest.mark.parametrize('aws_s3_download', [["iconqa/", "iconqa/", False]], indirect=True) +def test_iconqa(aws_s3_download): + img = np.load(f'{aws_s3_download}/torch_img.npy') + c = np.load(f'{aws_s3_download}/c.npy') + q = np.load(f'{aws_s3_download}/q.npy') + onnx_model = onnx.load(f'{aws_s3_download}/complete_model.onnx') + keras_model = onnx_to_keras(onnx_model, ['img', 'question', 'choices'], name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_k = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + final_k.save('temp.h5') + random_d = np.random.random(size=img.shape)*50 + loaded_model = tf.keras.models.load_model('temp.h5') + res_perm = loaded_model([q, np.transpose(img, [0, 2, 3, 1]), c.swapaxes(1,2)]) + res_perm_2 = loaded_model([q, np.transpose(img+random_d, [0, 2, 3, 1]), c.swapaxes(1,2)]) + + sess = ort.InferenceSession(f'{aws_s3_download}/complete_model.onnx') + res_onnx = sess.run( + ['logits'], + input_feed={'choices': c, 'question':q, 'img':img.astype(np.float32)} + ) + res_2_onnx = sess.run( + ['logits'], + input_feed={'choices': c, 'question':q, 'img':img.astype(np.float32)+random_d.astype(np.float32)} + ) + diff_res = res_perm-res_onnx[0] + assert diff_res.numpy().mean() < 0.1 + assert diff_res.numpy().max() < 0.2 + + diff_res_2 = res_perm_2-res_2_onnx[0] + assert diff_res_2.numpy().mean() < 0.1 + assert diff_res_2.numpy().max() < 0.2 diff --git a/test/models/private_tests/test_interfuser.py b/test/models/private_tests/test_interfuser.py new file mode 100644 index 00000000..83e5b5cf --- /dev/null +++ b/test/models/private_tests/test_interfuser.py @@ -0,0 +1,38 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +import onnxruntime as ort +import numpy as np +import pytest +from test.models.private_tests.aws_utils import aws_s3_download + +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + +@pytest.mark.parametrize('aws_s3_download', [["interfuser/", "interfuser/", False]], indirect=True) +def test_interfuser(aws_s3_download): + onnx_path = f'{aws_s3_download}/interfuser_planKD_26_3M_256.onnx' + onnx_model = onnx.load(onnx_path) + output_names = ['out0', 'out1', 'out2', 'out3', 'out4', 'out5'] + input_keys = ['lidar', 'measurements', 'rgb','rgb_center','rgb_left', 'rgb_right', 'target_point'] + shapes = [ + (1, 3, 224, 224), + (1, 7), + (1, 3, 256, 256), + (1, 3, 128, 128), + (1, 3, 256, 256), + (1, 3, 256, 256), + (1, 2) + ] + + inputs = tuple(np.random.rand(*shape).astype(np.float32) for shape in shapes) + inputs = {key: inp_ for key, inp_ in zip(input_keys, inputs)} + keras_model = onnx_to_keras(onnx_model, input_keys, name_policy='attach_weights_name' + , allow_partial_compilation=False) + keras_model = keras_model.converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=False) + ort_session = ort.InferenceSession(onnx_path) + onnx_res = ort_session.run( + output_names, + input_feed=inputs)[0] + keras_preds = final_model(inputs)[0] + assert np.abs(keras_preds - onnx_res).max() < 1e-04 diff --git a/test/models/private_tests/test_kiwibot.py b/test/models/private_tests/test_kiwibot.py new file mode 100644 index 00000000..554fcd89 --- /dev/null +++ b/test/models/private_tests/test_kiwibot.py @@ -0,0 +1,42 @@ +import numpy as np +import onnx +import onnxruntime as ort +from onnx2kerastl import onnx_to_keras +import tensorflow as tf +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl.customonnxlayer import onnx_custom_objects_map +import pytest + +from test.models.private_tests.aws_utils import aws_s3_download + +@pytest.mark.parametrize('aws_s3_download', [["kiwibot/", "kiwibot/", False]], indirect=True) +def test_kiwibot(aws_s3_download): + onnx_model_path = f'{aws_s3_download}/model.onnx' + save_model_path = f'{aws_s3_download}/model.h5' + + input_data = np.random.uniform(0, 255, (1, 360, 640, 3)).astype(np.uint8) + # load onnx model + onnx_model = onnx.load(onnx_model_path) + # extract feature names from the model + input_features = [inp.name for inp in onnx_model.graph.input] + # convert onnx model to keras + keras_model = onnx_to_keras(onnx_model, input_names=input_features, + name_policy='attach_weights_name', allow_partial_compilation=False).converted_model + + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True, + verbose=True) + + final_model.save(save_model_path) + + loaded_keras_model = tf.keras.models.load_model(save_model_path, custom_objects=onnx_custom_objects_map) + keras_output = loaded_keras_model(input_data) + keras_output_np = [output.numpy().transpose((0, 2, 1)) for output in keras_output] + + onnx_session = ort.InferenceSession(onnx_model_path) + onnx_output = onnx_session.run(None, {'input_0': input_data.transpose((0, 3, 1, 2)).astype(np.float32)}) + + # masks after softmax + assert np.abs(keras_output_np[1] - onnx_output[1]).max() < 1e-1 + assert np.abs(keras_output_np[2] - onnx_output[2]).max() < 1e-1 + # mask after pixel class prediction + assert np.abs(keras_output_np[0] == onnx_output[0]).sum() / np.prod(keras_output_np[0].shape) > 0.99 diff --git a/test/models/private_tests/test_maskrcnn.py b/test/models/private_tests/test_maskrcnn.py new file mode 100644 index 00000000..0e0bd5db --- /dev/null +++ b/test/models/private_tests/test_maskrcnn.py @@ -0,0 +1,76 @@ +import numpy as np +import onnx +import pytest + +from onnx2kerastl import onnx_to_keras +import tensorflow as tf +from keras_data_format_converter import convert_channels_first_to_last +from test.models.private_tests.aws_utils import aws_s3_download + + +def find_best_matches(A, B): + n = A.shape[0] + best_matches = np.ones(n, dtype=int)*(-1) + for i in range(n): + best_distance = float('inf') + best_index = -1 + for j in range(n): + if j not in best_matches: + distance = np.sum((A[i] - B[j]) ** 2) + if distance < best_distance: + best_distance = distance + best_index = j + if best_index == -1: + raise Exception("Could not greedily match the order of the two arrays") + best_matches[i] = best_index + return best_matches + + +def error_test(a, b, epsilon=0.04): + abs_difference = np.sqrt((a-b)**2) + assert abs_difference.max() < epsilon + assert abs_difference.mean() < epsilon + + +@pytest.mark.parametrize('aws_s3_download', [["maskrcnn/", "maskrcnn/", False]], indirect=True) +def test_maskrcnn_eff(aws_s3_download): + onnx_model_path = f'{aws_s3_download}/maskrcnn.onnx' + # save_model_path = f'effizency_models/mod_efficiency.h5' + real_img = np.load(f'{aws_s3_download}/img.npy') + real_img = np.transpose(real_img, [1, 2, 0])[None, ...] + real_img = real_img - np.array([103.53, 116.28, 123.675]) + # load onnx model + onnx_model = onnx.load(onnx_model_path) + # extract feature names from the model + input_features = [inp.name for inp in onnx_model.graph.input] + # # convert onnx model to keras + keras_model = onnx_to_keras(onnx_model, input_names=input_features, name_policy='attach_weights_name', + allow_partial_compilation=False) + final_model = convert_channels_first_to_last(keras_model.converted_model, should_transform_inputs_and_outputs=True, verbose=True) + # final_model.save('temp.h5') + # final_model = tf.keras.models.load_model('temp.h5') + keras_output = final_model(real_img) + fin = [] + for i in range(12): + fin.append(np.load(f'{aws_s3_download}/orig_outputs/out_{i}.npy')) + fin = [*fin[:4], *fin[5:]] + instance_idx_match = find_best_matches(np.transpose(keras_output[0].numpy()[0],[1,0]), fin[0]) + bb_loc_pixel_threshold = 8 #allow a movement of up to 8 pixels, 1% of image + error_test(np.transpose(keras_output[0].numpy()[0],[1,0]), fin[0][instance_idx_match], epsilon=bb_loc_pixel_threshold) + mask_prob_th = 0.025 + assert (fin[3][instance_idx_match] - np.transpose(keras_output[3][0], [3,0,1,2])).__abs__().mean()\ + < mask_prob_th + prob_th = 0.017 + prob_diff = (fin[1][instance_idx_match] - keras_output[1][0]).__abs__().numpy() + assert prob_diff.max() < prob_th + assert prob_diff.mean() < prob_th + assert (fin[4] - keras_output[4]).numpy().max() < 3e-5 + assert (fin[5]-np.transpose(keras_output[5].numpy(), [0, 2, 1])).max() < 3e-6 + assert (keras_output[6][0]-np.transpose(fin[6], [1, 0])).__abs__().numpy().mean() < 0.21 + assert (fin[7]-np.transpose(keras_output[7][0], [1, 0])).__abs__().mean() < 0.26 + assert (fin[8][instance_idx_match]-np.transpose(keras_output[8].numpy()[0], [3, 0, 1, 2])).__abs__().mean() < 0.29 + assert (fin[9]-np.transpose(keras_output[9].numpy()[0],[1,0])).__abs__().max() < 4e-4 + assert (fin[10]-keras_output[10][0]).numpy().__abs__().max() < 1.4e-5 + print(1) + # dummy_input = np.ones((1, 800, 800, 3))+np.random.random((1, 800, 800, 3)) + # res = final_model(dummy_input) # be sure we are able to diff --git a/test/models/private_tests/test_mmdet_convnext.py b/test/models/private_tests/test_mmdet_convnext.py new file mode 100644 index 00000000..c38e50fa --- /dev/null +++ b/test/models/private_tests/test_mmdet_convnext.py @@ -0,0 +1,31 @@ +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +import tensorflow as tf +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl.customonnxlayer import onnx_custom_objects_map +import pytest +from test.models.private_tests.aws_utils import aws_s3_download + + +@pytest.mark.parametrize('aws_s3_download', [["mmdet_convnext/", "mmdet_convnext/", False]], indirect=True) +def test_mmdet_convnext(aws_s3_download): + onnx_model_path = f'{aws_s3_download}/simplified_por_convnext.onnx' + save_model_path = f'{aws_s3_download}/simplified_por_convnext.h5' + + input_data = np.random.uniform(0, 1, (1, 480, 640, 3)).astype(np.float32) + # load onnx model + onnx_model = onnx.load(onnx_model_path) + # extract feature names from the model + input_features = [inp.name for inp in onnx_model.graph.input] + # convert onnx model to keras + keras_model = onnx_to_keras(onnx_model, input_names=input_features, + name_policy='attach_weights_name', allow_partial_compilation=False).converted_model + + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True, + verbose=True) + + final_model.save(save_model_path) + + loaded_keras_model = tf.keras.models.load_model(save_model_path, custom_objects=onnx_custom_objects_map) + keras_output = loaded_keras_model(input_data) diff --git a/test/models/private_tests/test_swin.py b/test/models/private_tests/test_swin.py new file mode 100644 index 00000000..65cb5e5d --- /dev/null +++ b/test/models/private_tests/test_swin.py @@ -0,0 +1,25 @@ +import onnxruntime as ort +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +@pytest.mark.parametrize('aws_s3_download', [["swin/", "swin/", False]], indirect=True) +def test_swin(aws_s3_download): + model_path = f'{aws_s3_download}/swin_v2_t.onnx' + inpt = np.load(f'{aws_s3_download}/input.npy') + result = np.load(f'{aws_s3_download}/output.npy') + onnx_model = onnx.load(model_path) + keras_model = onnx_to_keras(onnx_model, ['input'], name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + res = final_model(inpt) + mean_error = (res-result).numpy().__abs__().mean() + max_error = (res-result).numpy().__abs__().max() + eps = 5e-6 + assert mean_error < eps + assert max_error < eps diff --git a/test/models/private_tests/test_traffic_light.py b/test/models/private_tests/test_traffic_light.py new file mode 100644 index 00000000..316684d7 --- /dev/null +++ b/test/models/private_tests/test_traffic_light.py @@ -0,0 +1,56 @@ +import onnxruntime as ort +import numpy as np +import onnx +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import tensorflow as tf +from test.models.private_tests.aws_utils import aws_s3_download +import pytest + + +@pytest.mark.parametrize('aws_s3_download', [["traffic_light/", "traffic_light/", False]], indirect=True) +def test_traffic_light(aws_s3_download): + model_path = f'{aws_s3_download}/model.onnx' + img = np.load(f'{aws_s3_download}/traffic_input.npy') + onnx_model = onnx.load(model_path) + keras_model = onnx_to_keras(onnx_model, ['image'], name_policy='attach_weights_name', + allow_partial_compilation=False).converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + res = final_model(np.transpose(img, [0, 2, 3, 1])) + ort_session = ort.InferenceSession(model_path) + + res_onnx = ort_session.run( + ['bbox', 'scores', 'classes', 'cls_probabilities', 'rois', 'objects_idx_2d', 'scores_filtered', 'bulb_state', + 'tcd_face'], + input_feed={"image": img} + ) + eps_mean = 1e-6 + eps_max = 1e-5 + # These are really really close + + assert (res[0] - res_onnx[0]).__abs__().numpy().mean() < 2.5 * eps_mean + assert (res[0] - res_onnx[0]).__abs__().numpy().max() < 7 * eps_max + assert (res[1] - res_onnx[1]).__abs__().numpy().mean() < eps_mean + assert (res[1] - res_onnx[1]).__abs__().numpy().max() < eps_mean + + assert (res[2] - res_onnx[2]).__abs__().numpy().mean() < eps_mean + assert (res[2] - res_onnx[2]).__abs__().numpy().max() < eps_max + assert (res[3] - res_onnx[3]).__abs__().numpy().mean() < eps_mean + assert (res[3] - res_onnx[3]).__abs__().numpy().max() < eps_max + + assert (res[4] - res_onnx[4]).__abs__().numpy().mean() < 1.5*eps_mean + assert (res[4] - res_onnx[4]).__abs__().numpy().max() < 7 * eps_max + + assert (res[5][:, 0] - res_onnx[5]).__abs__().numpy().mean() < eps_mean + assert (res[5][:, 0] - res_onnx[5]).__abs__().numpy().max() < eps_max + + assert (res[6] - res_onnx[6]).__abs__().numpy().mean() < eps_mean + assert (res[6] - res_onnx[6]).__abs__().numpy().max() < eps_max + + # These two have lower accuracy but are still acceptable + + (tf.nn.softmax(res[7][:4, :]) - tf.nn.softmax(res_onnx[7][:4, :])).numpy().__abs__().mean() < 5e-3 + (tf.nn.softmax(res[7][:4, :]) - tf.nn.softmax(res_onnx[7][:4, :])).numpy().__abs__().max() < 5e-2 + + (tf.nn.softmax(res[8][:4, :]) - tf.nn.softmax(res_onnx[8][:4, :])).numpy().__abs__().mean() < 5e-4 + (tf.nn.softmax(res[8][:4, :]) - tf.nn.softmax(res_onnx[8][:4, :])).numpy().__abs__().max() < 1e-2 diff --git a/test/models/private_tests/test_yolov11.py b/test/models/private_tests/test_yolov11.py new file mode 100644 index 00000000..19ab9ea6 --- /dev/null +++ b/test/models/private_tests/test_yolov11.py @@ -0,0 +1,32 @@ +import numpy as np +import onnx +import pytest +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl import onnx_to_keras +from test.utils import NP_SEED +import onnxruntime as ort +from test.models.private_tests.aws_utils import aws_s3_download + + + +@pytest.mark.parametrize('aws_s3_download', [["yolov11/", "yolov11/", False]], indirect=True) +def test_yolov11(aws_s3_download): + np.random.seed(seed=NP_SEED) + yolov11_model_path = f'{aws_s3_download}/yolo11s.onnx' + onnx_model = onnx.load(yolov11_model_path) + input_all = [_input.name for _input in onnx_model.graph.input] + input_initializer = [node.name for node in onnx_model.graph.initializer] + input_names = list(set(input_all) - set(input_initializer)) + k_model = onnx_to_keras(onnx_model, input_names, name_policy='attach_weights_name', allow_partial_compilation=False) + flipped_model = convert_channels_first_to_last(k_model.converted_model, should_transform_inputs_and_outputs=False) + input_np = np.random.uniform(0, 1, (1, 3, 640, 640)) + keras_res = flipped_model(input_np).numpy() + ort_session = ort.InferenceSession(yolov11_model_path) + onnx_res = ort_session.run( + ['output0'], + input_feed={input_all[0]: input_np.astype(np.float32)})[0] + d_res=np.abs(onnx_res-keras_res) + assert d_res.mean() < 1e-5 + assert d_res.max() < 1e-3 + + diff --git a/test/models/test_albert_huggingface.py b/test/models/test_albert_huggingface.py new file mode 100644 index 00000000..798fcf82 --- /dev/null +++ b/test/models/test_albert_huggingface.py @@ -0,0 +1,38 @@ +import onnx +import pytest +import tensorflow as tf +from transformers import AlbertTokenizer, TFAlbertModel +from transformers.onnx import FeaturesManager +from pathlib import Path +from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + + +@pytest.mark.skip +@pytest.mark.slow +def test_albert_huggingface(): + onnx_path = 'albert.onnx' + model_name = "albert-base-v2" + model_name_for_features = "albert" + model = TFAlbertModel.from_pretrained(model_name) + tokenizer = AlbertTokenizer.from_pretrained(model_name) + real_inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") + OnnxConfig.default_fixed_batch = 1 + albert_features = list(FeaturesManager.get_supported_features_for_model_type(model_name_for_features).keys()) + onnx_path = Path(onnx_path) + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature='default') + onnx_config = model_onnx_config(model.config) + onnx_inputs, onnx_outputs = export(tokenizer, model, onnx_config, onnx_config.default_onnx_opset, onnx_path) + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['input_ids', 'token_type_ids', 'attention_mask'], + input_types=[tf.int32, tf.int32, tf.float32]) + input_np = [real_inputs['input_ids'], + real_inputs['token_type_ids'], + real_inputs['attention_mask']] + out = model(real_inputs) + flipped_model = convert_channels_first_to_last(keras_model, []) + flipped_otpt = flipped_model(input_np) + assert np.abs((out['last_hidden_state'] - flipped_otpt[0])).max() < 1e-04 + assert np.abs((out['pooler_output'] - flipped_otpt[1])).max() < 1e-04 diff --git a/test/models/test_albert_qa_huggingface.py b/test/models/test_albert_qa_huggingface.py new file mode 100644 index 00000000..47a76802 --- /dev/null +++ b/test/models/test_albert_qa_huggingface.py @@ -0,0 +1,48 @@ +import onnx +import pytest +import tensorflow as tf +from transformers import AlbertTokenizer, TFAlbertForQuestionAnswering +from transformers.onnx import FeaturesManager +from pathlib import Path +from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + + +@pytest.mark.slow +def test_albert_qa_huggingface(): + save_model = True + onnx_path = 'model.onnx' + model_name_for_features = "albert" + tokenizer = AlbertTokenizer.from_pretrained("vumichien/albert-base-v2-squad2") + model = TFAlbertForQuestionAnswering.from_pretrained("vumichien/albert-base-v2-squad2") + question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + inputs = tokenizer(question, text, return_tensors="tf") + outputs = model(**inputs) + answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) + answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) + predict_answer_tokens = inputs.input_ids[0, answer_start_index: answer_end_index + 1] + answer = tokenizer.decode(predict_answer_tokens) + OnnxConfig.default_fixed_batch = 1 + OnnxConfig.default_fixed_sequence = 14 + albert_features = list(FeaturesManager.get_supported_features_for_model_type(model_name_for_features).keys()) + print(albert_features) + onnx_path = Path(onnx_path) + if save_model == True: + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature='question-answering') + onnx_config = model_onnx_config(model.config) + onnx_inputs, onnx_outputs = export(tokenizer, model, onnx_config, onnx_config.default_onnx_opset, onnx_path) + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['input_ids', 'token_type_ids', 'attention_mask'], + input_types=[tf.int32, tf.int32, tf.float32], + allow_partial_compilation=False) + keras_model = keras_model.converted_model + input_np = [inputs['input_ids'], + inputs['token_type_ids'], + inputs['attention_mask']] + out = model(inputs) + flipped_model = convert_channels_first_to_last(keras_model, []) + flipped_otpt = flipped_model(input_np) + assert np.abs((out[0]-flipped_otpt[1])).max() < 1e-04 + assert np.abs((out[1]-flipped_otpt[0])).max() < 1e-04 diff --git a/test/models/test_alexnet.py b/test/models/test_alexnet.py index a680d12d..3daf02bf 100644 --- a/test/models/test_alexnet.py +++ b/test/models/test_alexnet.py @@ -1,17 +1,14 @@ import numpy as np import pytest -import tensorflow as tf - from test.utils import convert_and_test from torchvision.models import alexnet +from test.utils import NP_SEED - -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_alexnet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = alexnet() +@pytest.mark.parametrize('pretrained', [True]) +def test_alexnet(pretrained): + np.random.seed(seed=NP_SEED) + model = alexnet(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_bert_huggingface.py b/test/models/test_bert_huggingface.py new file mode 100644 index 00000000..9fa51d2e --- /dev/null +++ b/test/models/test_bert_huggingface.py @@ -0,0 +1,42 @@ +import onnx +import pytest +import tensorflow as tf +import torch +from transformers import BertTokenizer, BertModel +from transformers.onnx import FeaturesManager +from pathlib import Path +from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + + +@pytest.mark.skip(reason="Fails on CI but works locally (might be too big?)") +@pytest.mark.slow +def test_bert_huggingface(): + onnx_path = 'bert_huggingface.onnx' + model_name = "bert-base-uncased" + model_name_for_features = "bert" + model = BertModel.from_pretrained(model_name) + tokenizer = BertTokenizer.from_pretrained(model_name) + real_inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + OnnxConfig.default_fixed_sequence = 8 # this does nothing here, serves as a reminder + OnnxConfig.default_fixed_batch = 2 # this does nothing here, serves as a reminder + albert_features = list(FeaturesManager.get_supported_features_for_model_type(model_name_for_features).keys()) + print(albert_features) + onnx_path = Path(onnx_path) + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature='default') + onnx_config = model_onnx_config(model.config) + onnx_inputs, onnx_outputs = export(tokenizer, model, onnx_config, onnx_config.default_onnx_opset, onnx_path) + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['input_ids', 'token_type_ids', 'attention_mask'], + input_types=[tf.int32, tf.int32, tf.float32]) + input_np = [real_inputs['input_ids'].numpy(), + real_inputs['token_type_ids'].numpy(), + real_inputs['attention_mask'].numpy()] + with torch.no_grad(): + out = model(**real_inputs) + flipped_model = convert_channels_first_to_last(keras_model, []) + flipped_otpt = flipped_model(input_np) + assert np.abs((out['last_hidden_state'].detach().numpy() - flipped_otpt[0])).max() < 1e-04 + assert np.abs((out['pooler_output'].detach().numpy() - flipped_otpt[1])).max() < 1e-04 diff --git a/test/models/test_bert_huggingface_classification.py b/test/models/test_bert_huggingface_classification.py new file mode 100644 index 00000000..0e78d5b8 --- /dev/null +++ b/test/models/test_bert_huggingface_classification.py @@ -0,0 +1,50 @@ +import onnx +import pytest +import tensorflow as tf +# import torch +from transformers import BertTokenizer, BertModel +from transformers.onnx import FeaturesManager +from pathlib import Path +from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl.customonnxlayer import onnx_custom_objects_map +from transformers import AutoModelForSequenceClassification +import torch + +# @pytest.mark.skip(reason="Fails on CI but works locally (might be too big?)") +def test_bert_huggingface_classifcation(): + onnx_path = 'bert_huggingface.onnx' + model_name = "bert-base-uncased" + model_name_for_features = "bert" + id2label = {0: "IS_DAMAGED", 1: "NOT_DAMAGED"} + label2id = {"IS_DAMAGED": 0, "NOT_DAMAGED": 1} + model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2, id2label=id2label, label2id=label2id) + tokenizer = BertTokenizer.from_pretrained(model_name) + real_inputs = tokenizer("Hello, my dog is cute", return_tensors="pt" , padding='max_length', max_length=100) + OnnxConfig.default_fixed_sequence = 8 # this does nothing here, serves as a reminder + OnnxConfig.default_fixed_batch = 2 # this does nothing here, serves as a reminder + albert_features = list(FeaturesManager.get_supported_features_for_model_type(model_name_for_features).keys()) + onnx_path = Path(onnx_path) + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature='sequence-classification') + onnx_config = model_onnx_config(model.config) + onnx_inputs, onnx_outputs = export(tokenizer, model, onnx_config, onnx_config.default_onnx_opset, onnx_path) + onnx_model = onnx.load(onnx_path) + keras_model = onnx_to_keras(onnx_model, ['input_ids', 'token_type_ids', 'attention_mask'], + input_types=[tf.int32, tf.int32, tf.float32], + allow_partial_compilation=False) + keras_model = keras_model.converted_model + input_np = [real_inputs['input_ids'].numpy(), + real_inputs['token_type_ids'].numpy(), + real_inputs['attention_mask'].numpy()] + with torch.no_grad(): + out = model(**real_inputs) + flipped_model = convert_channels_first_to_last(keras_model, []) + flipped_model.save('temp.h5') + model = tf.keras.models.load_model('temp.h5', custom_objects=onnx_custom_objects_map) + flipped_otpt = model(input_np) + assert ((flipped_otpt-out.logits.detach().numpy()).__abs__().numpy().max() < 5*10**-6) + +if __name__ == "__main__": + test_bert_huggingface_classifcation() \ No newline at end of file diff --git a/test/models/test_convnext.py b/test/models/test_convnext.py new file mode 100644 index 00000000..0a8eb2bf --- /dev/null +++ b/test/models/test_convnext.py @@ -0,0 +1,16 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models import convnext_base + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [convnext_base]) +@pytest.mark.parametrize('pretrained', [True]) +def test_convnext(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_deeplab.py b/test/models/test_deeplab.py new file mode 100644 index 00000000..a5a27ace --- /dev/null +++ b/test/models/test_deeplab.py @@ -0,0 +1,16 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models.segmentation import deeplabv3_resnet50 + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [deeplabv3_resnet50]) +@pytest.mark.parametrize('pretrained', [True]) +def test_deeplab(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 256, 256)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True, epsilon=2*10**(-5)) diff --git a/test/models/test_densenet.py b/test/models/test_densenet.py index e7770100..07b888d7 100644 --- a/test/models/test_densenet.py +++ b/test/models/test_densenet.py @@ -1,18 +1,22 @@ import numpy as np import pytest -import tensorflow as tf - -from test.utils import convert_and_test +from torch import nn +from test.utils import convert_and_test, NP_SEED from torchvision.models.densenet import densenet121 + @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_densenet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = densenet121() +@pytest.mark.parametrize('model_class', [densenet121]) +@pytest.mark.parametrize('pretrained', [True]) +def test_densenet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model = nn.Sequential( + model, + nn.Softmax(dim=1) + ) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_efficientnet.py b/test/models/test_efficientnet.py new file mode 100644 index 00000000..5a2c98e4 --- /dev/null +++ b/test/models/test_efficientnet.py @@ -0,0 +1,23 @@ +import numpy as np +import pytest +from torch import nn +from test.utils import convert_and_test, NP_SEED +from torchvision.models import efficientnet_b0 + + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [efficientnet_b0]) +@pytest.mark.parametrize('pretrained', [True]) +def test_efficientnet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model = nn.Sequential( + model, + nn.Softmax(dim=1) + ) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_glpn.py b/test/models/test_glpn.py new file mode 100644 index 00000000..c844997d --- /dev/null +++ b/test/models/test_glpn.py @@ -0,0 +1,56 @@ +import io +from transformers import AutoImageProcessor, GLPNForDepthEstimation, DPTForDepthEstimation +import onnx +import torch +import requests +from PIL import Image +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last + + + +def remove_last_two_nodes(onnx_model): + + # Remove the last two nodes from the graph + for i in range(6): + onnx_model.graph.node.pop() + + # Set the third-to-last node as the new output + new_output_name = onnx_model.graph.node[-1].output[0] + + # Extend the graph's output with the new output + onnx_model.graph.output.extend(onnx_model.graph.value_info) + onnx_model.graph.output[-1].name = new_output_name + return onnx_model + + +def test_glpn(): + import sys + sys.setrecursionlimit(10000) + + model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti") + model = model.eval() + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw) + image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti") + inputs = image_processor(images=image, return_tensors="pt") + pixel_values = inputs.data['pixel_values'] + + # torch.onnx.export(model, pixel_values, 'glpn.onnx') + # onnx_model = onnx.load('glpn.onnx') + temp_f = io.BytesIO() + torch.onnx.export(model, pixel_values, temp_f) + temp_f.seek(0) + onnx_model = onnx.load(temp_f) + onnx_model = remove_last_two_nodes(onnx_model) + keras_model = onnx_to_keras(onnx_model, ['input.1'], name_policy='attach_weights_name', + allow_partial_compilation=False) + keras_model = keras_model.converted_model + permuted_inputs = np.swapaxes(np.swapaxes(pixel_values, 1, 2), 2, 3) + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + model = model.eval() + this_pred = model(torch.Tensor(pixel_values)) + keras_preds = final_model(permuted_inputs) + torch_pred = this_pred['predicted_depth'] + assert np.abs(keras_preds[..., 0] - torch_pred.detach().numpy()).max() < 1e-04 \ No newline at end of file diff --git a/test/models/test_googlenet.py b/test/models/test_googlenet.py index c34bc965..5351a857 100644 --- a/test/models/test_googlenet.py +++ b/test/models/test_googlenet.py @@ -1,18 +1,16 @@ import numpy as np import pytest -import tensorflow as tf - -from test.utils import convert_and_test +from test.utils import convert_and_test, NP_SEED from torchvision.models import googlenet + @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_googlenet(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = googlenet() +@pytest.mark.parametrize('pretrained', [True]) +def test_googlenet(pretrained): + np.random.seed(seed=NP_SEED) + model = googlenet(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True, epsilon=1.6*10**(-5)) diff --git a/test/models/test_inception.py b/test/models/test_inception.py new file mode 100644 index 00000000..a8c1a252 --- /dev/null +++ b/test/models/test_inception.py @@ -0,0 +1,23 @@ +import numpy as np +import pytest +from torch import nn +from test.utils import convert_and_test, NP_SEED +from torchvision.models import inception_v3 + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [inception_v3]) +@pytest.mark.parametrize('pretrained', [True]) +def test_inception(model_class, pretrained): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model = nn.Sequential( + model, + nn.Softmax(dim=1) + ) + + model.eval() + + input_np = np.random.uniform(0, 1, (2, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_llama_sentiment_analysis.py b/test/models/test_llama_sentiment_analysis.py new file mode 100644 index 00000000..addfd247 --- /dev/null +++ b/test/models/test_llama_sentiment_analysis.py @@ -0,0 +1,57 @@ +import os.path + +import onnx +import pytest +import tensorflow as tf +from transformers import AutoTokenizer +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from onnx2kerastl.customonnxlayer import onnx_custom_objects_map +#from test.utils import export_torch_to_onnx_optimum + + +@pytest.mark.skip(reason="Fails on CI but works locally (might be too big?)") +def test_llama_32_1b_inst(): + onnx_model_folder = 'onnx_model' + onnx_path = os.path.join(onnx_model_folder, 'model.onnx') + model_name = "meta-llama/Llama-3.2-1B-Instruct" + # --------------------------------- Export to ONNX ------------------------------------- + export_torch_to_onnx_optimum(model_name, model_output_path=onnx_model_folder) + # ----------------------------------------- Input Preparation -------------------------- + tokenizer = AutoTokenizer.from_pretrained(model_name) + tokenizer.pad_token = tokenizer.eos_token + text = "i love this movie!" + prompt = tokenizer.apply_chat_template( + [{"role": "user", + "content": f"What is the sentiment of this sentence: \"{text}\"? Respond with 'positive' or 'negative' only."}], + add_generation_prompt=True, + return_tensors="np" + ) + input_ids = prompt + attention_mask = (input_ids != tokenizer.pad_token_id).astype(np.int64) + position_ids = np.arange(input_ids.shape[1])[None, :] + model_inputs = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "position_ids": position_ids + } + keras_inputs = {k: tf.convert_to_tensor(v) for k, v in model_inputs.items()} + # --------------------------------- Export to Keras ------------------------------------- + onnx_model = onnx.load(onnx_path) # TODO: add to requirements, updated onnx==1.17.0 () + keras_model = onnx_to_keras(onnx_model, ['input_ids', 'attention_mask', 'position_ids'], + allow_partial_compilation=False) + keras_model = keras_model.converted_model + flipped_model = convert_channels_first_to_last(keras_model, []) + flipped_model.save('temp.h5') + model = tf.keras.models.load_model('temp.h5', custom_objects=onnx_custom_objects_map) + # --------------------------------- Evaluating Inference ------------------------------------- + outputs = model(keras_inputs) + last_token_logits = outputs[0, -1] + pred_token_id = np.argmax(last_token_logits) + pred_token = tokenizer.decode([pred_token_id]).strip().lower() + + assert pred_token=='positive' + +if __name__ == "__main__": + test_llama_32_1b_inst() \ No newline at end of file diff --git a/test/models/test_mbnet2.py b/test/models/test_mbnet2.py index dde732fe..1f2973ba 100644 --- a/test/models/test_mbnet2.py +++ b/test/models/test_mbnet2.py @@ -1,17 +1,15 @@ import numpy as np import pytest -import tensorflow as tf - -from test.utils import convert_and_test +from test.utils import convert_and_test, NP_SEED from torchvision.models import mobilenet_v2 -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_mobilenetv2(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = mobilenet_v2() + +@pytest.mark.parametrize('pretrained', [True]) +def test_mobilenetv2(pretrained): + np.random.seed(seed=NP_SEED) + model = mobilenet_v2(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True, epsilon=1.5*10**(-5)) diff --git a/test/models/test_mnasnet.py b/test/models/test_mnasnet.py index 0ce309b0..fd446795 100644 --- a/test/models/test_mnasnet.py +++ b/test/models/test_mnasnet.py @@ -1,19 +1,17 @@ import numpy as np import pytest -import tensorflow as tf +from test.utils import convert_and_test, NP_SEED +from torchvision.models import mnasnet0_5 -from test.utils import convert_and_test -from torchvision.models import mnasnet0_5, mnasnet1_0, mnasnet0_75, mnasnet1_3 @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -@pytest.mark.parametrize('model_class', [mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3]) -def test_mnasnet(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = model_class() +@pytest.mark.parametrize('model_class', [mnasnet0_5]) +@pytest.mark.parametrize('pretrained', [True]) +def test_mnasnet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) \ No newline at end of file diff --git a/test/models/test_mobilenetv3.py b/test/models/test_mobilenetv3.py new file mode 100644 index 00000000..89a1fa38 --- /dev/null +++ b/test/models/test_mobilenetv3.py @@ -0,0 +1,22 @@ +import numpy as np +import pytest +from torch import nn +from test.utils import convert_and_test, NP_SEED +from torchvision.models import mobilenet_v3_small + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [mobilenet_v3_small]) +@pytest.mark.parametrize('pretrained', [True]) +def test_mobilenetv3(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model = nn.Sequential( + model, + nn.Softmax(dim=1) + ) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 124, 124)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_mobilnet_cityscapes.py b/test/models/test_mobilnet_cityscapes.py new file mode 100644 index 00000000..6aea6138 --- /dev/null +++ b/test/models/test_mobilnet_cityscapes.py @@ -0,0 +1,75 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +import torch +from torch import nn +from torchvision import transforms as T +from test.models.cityscape_semseg.cityscapes import Cityscapes +from test.models.cityscape_semseg import network +import io +import onnx +from onnx2kerastl import onnx_to_keras +from onnx2kerastl.utils import check_torch_keras_error +import urllib.request + +DATASET = "KITTI" # KITTI + + +def set_bn_momentum(model, momentum=0.1): + for m in model.modules(): + if isinstance(m, nn.BatchNorm2d): + m.momentum = momentum + + +def torch2keras(model, img, device='cpu'): + temp_f = io.BytesIO() + input_names = ['img'] + torch_input = torch.randn(2, *np.array(img.shape)[1:], device=device) + np_input = torch_input.cpu().numpy() + torch.onnx.export(model, torch_input, temp_f, + training=torch.onnx.TrainingMode.TRAINING, input_names=input_names, + output_names=['segmenation']) + + temp_f.seek(0) + onnx_model = onnx.load(temp_f) + k_model = onnx_to_keras(onnx_model, input_names, change_ordering=False, allow_partial_compilation=False) + k_model = k_model.converted_model + error = check_torch_keras_error(model, k_model, np_input, change_ordering=False, epsilon=5*10**(-3), + should_transform_inputs=True) + + +@pytest.mark.slow +@pytest.mark.parametrize('model', ['deeplabv3plus_mobilenet']) +@pytest.mark.parametrize('num_classes', [19]) +@pytest.mark.parametrize('output_strides', [16]) +def test_mobile_net_cityscape(model, num_classes, output_strides): + np.random.seed(seed=NP_SEED) + LOAD_WEIGHTS = True + device='cpu' + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/Cityscapes_weights/best_deeplabv3plus_mobilenet_cityscapes_os16.pth", + "tmp.pth") + model_weights_loc = "tmp.pth" + model = network.modeling.__dict__[model](num_classes=num_classes, output_stride=output_strides).cpu() + model.eval() + set_bn_momentum(model.backbone, momentum=0.01) + if LOAD_WEIGHTS: + checkpoint = torch.load(model_weights_loc, map_location=torch.device('cpu')) + model.load_state_dict(checkpoint["model_state"]) + del checkpoint + model.to(device) + transform = T.Compose([ + T.ToTensor(), + T.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]), + ]) + #Input size MUST be divisble by 16 in both axis (this restriction could be removed if we find a way for fractional upsample in keras) + if DATASET == "KITTI": + img = np.random.randint(0, 255, (384, 1232, 3), dtype=np.uint8) + + else: + img = np.random.randint(0, 255, (1024, 2048, 3), dtype=np.uint8) + np.random.seed(42) + img = transform(img).unsqueeze(0) # To tensor of NCHW + img = img.to(device) + torch2keras(model, img) diff --git a/test/models/test_openclip.py b/test/models/test_openclip.py new file mode 100644 index 00000000..429566eb --- /dev/null +++ b/test/models/test_openclip.py @@ -0,0 +1,19 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import urllib + + +def test_openclip(): + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/openclip/openclip.onnx", + "openclip.onnx") + onnx_model = onnx.load('openclip.onnx') + keras_model = onnx_to_keras(onnx_model, ["pixel_values"], name_policy='attach_weights_name', + allow_partial_compilation=False) + keras_model = keras_model.converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + tf_preds = final_model(np.random.random((1, 224, 224, 3))) + diff --git a/test/models/test_raft_ci.py b/test/models/test_raft_ci.py new file mode 100644 index 00000000..5e51faf7 --- /dev/null +++ b/test/models/test_raft_ci.py @@ -0,0 +1,21 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +import urllib + + +def test_raft_ci(): + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/raft/raft.onnx", + "raft.onnx") + onnx_model = onnx.load('raft.onnx') + keras_model = onnx_to_keras(onnx_model, ['onnx::Div_0', 'onnx::Div_1'], name_policy='attach_weights_name', + allow_partial_compilation=False) + keras_model = keras_model.converted_model + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + first_im = np.random.random((440, 1024, 3))[None, ...] + second_im = np.random.random((440, 1024, 3))[None, ...] + tf_preds = final_model([first_im, second_im]) + diff --git a/test/models/test_regnet.py b/test/models/test_regnet.py new file mode 100644 index 00000000..70bfb3a3 --- /dev/null +++ b/test/models/test_regnet.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models import regnet_x_8gf + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [regnet_x_8gf]) +@pytest.mark.parametrize('pretrained', [True]) +def test_regnet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + + model = model_class(pretrained=pretrained) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) \ No newline at end of file diff --git a/test/models/test_resnet.py b/test/models/test_resnet.py new file mode 100644 index 00000000..0fb43940 --- /dev/null +++ b/test/models/test_resnet.py @@ -0,0 +1,20 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models import resnet18 +from torch import nn + + +@pytest.mark.parametrize('model_class', [resnet18]) +@pytest.mark.parametrize('pretrained', [True]) +def test_resnet18(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model = nn.Sequential( + model, + nn.Softmax(dim=1) + ) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_resnet18.py b/test/models/test_resnet18.py deleted file mode 100644 index 7e69ffad..00000000 --- a/test/models/test_resnet18.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np -import pytest -import tensorflow as tf - -from torchvision.models import resnet18 - -from test.utils import convert_and_test - - -@pytest.mark.parametrize('change_ordering', [True, False]) -def test_resnet18(change_ordering): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = resnet18() - model.eval() - - input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) diff --git a/test/models/test_resnext.py b/test/models/test_resnext.py index 242ded7f..aa707ffb 100644 --- a/test/models/test_resnext.py +++ b/test/models/test_resnext.py @@ -1,19 +1,17 @@ import numpy as np import pytest -import tensorflow as tf +from test.utils import convert_and_test, NP_SEED +from torchvision.models import resnext50_32x4d -from test.utils import convert_and_test -from torchvision.models import resnext50_32x4d, resnext101_32x8d @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -@pytest.mark.parametrize('model_class', [resnext50_32x4d, resnext101_32x8d]) -def test_resnext(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = model_class() +@pytest.mark.parametrize('model_class', [resnext50_32x4d]) +@pytest.mark.parametrize('pretrained', [True]) +def test_resnext(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_shufflenet.py b/test/models/test_shufflenet.py new file mode 100644 index 00000000..5a89b174 --- /dev/null +++ b/test/models/test_shufflenet.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models import shufflenet_v2_x0_5 + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [shufflenet_v2_x0_5]) +@pytest.mark.parametrize('pretrained', [True]) +@pytest.mark.skip(reason="Does not export Pytorch->Onnx well due to torch.chunck") +def test_shufflenet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_squeezenet.py b/test/models/test_squeezenet.py index 25246520..4e81f410 100644 --- a/test/models/test_squeezenet.py +++ b/test/models/test_squeezenet.py @@ -1,19 +1,17 @@ import numpy as np import pytest -import tensorflow as tf +from test.utils import convert_and_test, NP_SEED +from torchvision.models import squeezenet1_0 -from test.utils import convert_and_test -from torchvision.models import squeezenet1_0, squeezenet1_1 @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) -@pytest.mark.parametrize('model_class', [squeezenet1_1, squeezenet1_0]) -def test_squeezenet(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = model_class() +@pytest.mark.parametrize('model_class', [squeezenet1_0]) +@pytest.mark.parametrize('pretrained', [True]) +def test_squeezenet(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_vgg.py b/test/models/test_vgg.py index cf0e7866..db865a5e 100644 --- a/test/models/test_vgg.py +++ b/test/models/test_vgg.py @@ -1,19 +1,17 @@ import numpy as np import pytest -import tensorflow as tf - -from test.utils import convert_and_test +from test.utils import convert_and_test, NP_SEED from torchvision.models import vgg11, vgg11_bn + @pytest.mark.slow -@pytest.mark.parametrize('change_ordering', [True, False]) @pytest.mark.parametrize('model_class', [vgg11, vgg11_bn]) -def test_vgg(change_ordering, model_class): - if not tf.test.gpu_device_name() and not change_ordering: - pytest.skip("Skip! Since tensorflow Conv2D op currently only supports the NHWC tensor format on the CPU") - model = model_class() +@pytest.mark.parametrize('pretrained', [True, False]) +def test_vgg(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) model.eval() input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) - error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_vit.py b/test/models/test_vit.py new file mode 100644 index 00000000..8010d76b --- /dev/null +++ b/test/models/test_vit.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest +from test.utils import convert_and_test, NP_SEED +from torchvision.models import vit_b_16 + + + +@pytest.mark.slow +@pytest.mark.parametrize('model_class', [vit_b_16]) +@pytest.mark.parametrize('pretrained', [True]) +@pytest.mark.skip(reason="Has torch.chunck which does not export well in Onnx") +def test_vit(pretrained, model_class): + np.random.seed(seed=NP_SEED) + model = model_class(pretrained=pretrained) + model.eval() + + input_np = np.random.uniform(0, 1, (1, 3, 224, 224)) + error = convert_and_test(model, input_np, verbose=False, should_transform_inputs=True) diff --git a/test/models/test_vit_huggingface.py b/test/models/test_vit_huggingface.py new file mode 100644 index 00000000..851dc000 --- /dev/null +++ b/test/models/test_vit_huggingface.py @@ -0,0 +1,56 @@ +import onnx +import tensorflow as tf +from transformers.onnx import FeaturesManager +from pathlib import Path +from transformers import ViTFeatureExtractor, ViTModel +from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from packaging import version +from collections import OrderedDict +from typing import Mapping +from functools import partial + + +class ViTOnnxConfig(OnnxConfig): + + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + +def test_vit_huggingface(): + save_model = True + onnx_path = 'vit_huggingface.onnx' + model_name = "google/vit-base-patch16-224-in21k" + model = ViTModel.from_pretrained(model_name) + tokenizer = ViTFeatureExtractor.from_pretrained(model_name) + OnnxConfig.default_fixed_batch = 1 + OnnxConfig.default_fixed_sequence = 3 + OnnxConfig.default_batch_size = 1 + OnnxConfig.default_sequence_length = 3 + if save_model: + onnx_path = Path(onnx_path) + model_onnx_config = partial(ViTOnnxConfig.from_model_config, task='default') + onnx_config = model_onnx_config(model.config) + onnx_inputs, onnx_outputs = export(tokenizer, model, onnx_config, onnx_config.default_onnx_opset, onnx_path) + onnx_model = onnx.load(onnx_path) + # keras_model = onnx_to_keras(onnx_model, ['pixel_values'], batch_size=1) + keras_model = onnx_to_keras(onnx_model, ['pixel_values'], allow_partial_compilation=False) + keras_model = keras_model.converted_model + final_model = convert_channels_first_to_last(keras_model, ['pixel_values']) + tokens = tokenizer(np.ones([300, 300, 3], dtype=np.uint8), return_tensors="pt") + pt_res = model(**tokens) + keras_res = final_model(tokens['pixel_values'].numpy().reshape(1, 224, 224, 3)) + assert np.abs(pt_res[1].detach().numpy()-keras_res[1]).max() < 1e-04 \ No newline at end of file diff --git a/test/models/test_x3d.py b/test/models/test_x3d.py new file mode 100644 index 00000000..0c3dddd4 --- /dev/null +++ b/test/models/test_x3d.py @@ -0,0 +1,38 @@ +# code to proprely load data here: https://pytorch.org/hub/facebookresearch_pytorchvideo_x3d/ +import onnx +# from transformers.onnx import export, OnnxConfig +import numpy as np +from onnx2kerastl import onnx_to_keras +from keras_data_format_converter import convert_channels_first_to_last +from packaging import version +from collections import OrderedDict +from typing import Mapping +import urllib + + +def test_x3d(): + import torch + model_name = 'x3d_s' + model = torch.hub.load('facebookresearch/pytorchvideo', model_name, pretrained=True) + model = model.eval() + # torch.onnx.export(n_model, torch.ones(1, 3, 13, 182, 182), 'x3d.onnx', export_params=True, input_names=['input'], + # output_names=['output'], + # dynamic_axes={'input': {0: 'batch_size'}, # variable length axes + # 'output': {0: 'batch_size'}}) - this requires an earlier version of tensorflow + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/x3d_tests/x3d.onnx", + "x3d_s.onnx") + urllib.request.urlretrieve( + "https://storage.googleapis.com/example-datasets-47ml982d/x3d_tests/inputs.npy", + "x3d_input.npy") + onnx_model = onnx.load('x3d_s.onnx') + inputs = np.load('x3d_input.npy') + keras_model = onnx_to_keras(onnx_model, ['input'], name_policy='attach_weights_name' + , allow_partial_compilation=False) + keras_model = keras_model.converted_model + permuted_inputs = np.swapaxes(np.swapaxes(np.swapaxes(inputs, 0, 1), 1, 2), 2, 3) + final_model = convert_channels_first_to_last(keras_model, should_transform_inputs_and_outputs=True) + model = model.eval() + this_pred = model(torch.Tensor(inputs)[None, ...]) + keras_preds = final_model(permuted_inputs[None, ...]) + assert np.abs(keras_preds - this_pred.detach().numpy()).max() < 1e-04 diff --git a/test/models/yolo_v7/test_yolov7.py b/test/models/yolo_v7/test_yolov7.py new file mode 100644 index 00000000..7782bfc6 --- /dev/null +++ b/test/models/yolo_v7/test_yolov7.py @@ -0,0 +1,25 @@ +import pathlib + +import numpy as np +import onnx +import pytest + +from onnx2kerastl import onnx_to_keras +from test.utils import NP_SEED + + +@pytest.mark.slow +@pytest.mark.parametrize('pretrained', [True]) +def test_yolov7(pretrained): + np.random.seed(seed=NP_SEED) + + dir = pathlib.Path(__file__).parent.resolve() + yolov7_model_path = f"{dir}/yolov7-tiny.onnx" + onnx_model = onnx.load(yolov7_model_path) + + input_all = [_input.name for _input in onnx_model.graph.input] + input_initializer = [node.name for node in onnx_model.graph.initializer] + input_names = list(set(input_all) - set(input_initializer)) + k_model = onnx_to_keras(onnx_model, input_names, name_policy='attach_weights_name', allow_partial_compilation=False) + # input_np = np.random.uniform(0, 1, (1, 1, 32, 64, 64)) + # error = test_conversion(onnx_model, k_model, input_np, epsilon=2 * 10 ** (-5)) diff --git a/test/models/yolo_v7/yolov7-tiny.onnx b/test/models/yolo_v7/yolov7-tiny.onnx new file mode 100644 index 00000000..701fecd5 Binary files /dev/null and b/test/models/yolo_v7/yolov7-tiny.onnx differ diff --git a/test/requirements.txt b/test/requirements.txt index 480ee740..76211b4d 100644 --- a/test/requirements.txt +++ b/test/requirements.txt @@ -1,4 +1,5 @@ torch>=1.1.0,<=1.5.0 torchvision>=0.3.0,<=0.6.0 +optimum==1.23.3 pytest pytest-repeat \ No newline at end of file diff --git a/test/utils.py b/test/utils.py index 68c85ef2..b2a4d387 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1,8 +1,18 @@ import io -import torch + import onnx +import torch +from keras.layers import Lambda +from keras.models import Model -from onnx2keras import onnx_to_keras, check_torch_keras_error +from onnx2kerastl import onnx_to_keras +from onnx2kerastl.utils import check_torch_keras_error +#from optimum.exporters.onnx import main_export + +NP_SEED = 42 + +class LambdaLayerException(Exception): + pass def torch2keras(model: torch.nn.Module, input_variable, verbose=True, change_ordering=False): @@ -18,7 +28,8 @@ def torch2keras(model: torch.nn.Module, input_variable, verbose=True, change_ord output_names=['test_out']) temp_f.seek(0) onnx_model = onnx.load(temp_f) - k_model = onnx_to_keras(onnx_model, input_names, change_ordering=change_ordering) + k_model = onnx_to_keras(onnx_model, input_names, change_ordering=change_ordering, allow_partial_compilation=False) + k_model = k_model.converted_model return k_model @@ -26,7 +37,52 @@ def convert_and_test(model: torch.nn.Module, input_variable, verbose=True, change_ordering=False, - epsilon=1e-5): + epsilon=1e-5, + should_transform_inputs=False): k_model = torch2keras(model, input_variable, verbose=verbose, change_ordering=change_ordering) - error = check_torch_keras_error(model, k_model, input_variable, change_ordering=change_ordering, epsilon=epsilon) + error = test_conversion(model, k_model, input_variable, change_ordering=change_ordering, epsilon=epsilon, + should_transform_inputs=should_transform_inputs) + return error + + +def test_conversion(onnx_model, k_model, input_variable, change_ordering=False, epsilon=1e-5, + should_transform_inputs=False): + error = check_torch_keras_error(onnx_model, k_model, input_variable, change_ordering=change_ordering, epsilon=epsilon, + should_transform_inputs=should_transform_inputs) + # if is_lambda_layers_exist(k_model): + # raise LambdaLayerException("Found Lambda layers") return error + + +def is_lambda_layers_exist(model: Model): + return any(isinstance(layer, Lambda) for layer in model.layers) + + +# def export_torch_to_onnx_optimum(model_name: str, model_output_path: str, task="causal-lm"): +# """ +# this function get a model as an input (Hugginface or local path), creates a folder and save the onnx model as output. +# it uses the optimum library. +# NOTE: For llama model the maximum absolute difference of the logits larget than 1e-5, it shouldnt be that important! +# Args: +# model_name: model path (local or HF name) +# model_output_name: output folder path +# task: model task +# +# Returns: +# creates the onnx model in the output folder path +# """ +# main_export( +# model_name_or_path=model_name, +# task=task, +# output=model_output_path, +# opset=None, +# device="cpu", +# dtype=None, +# pad_token_id=None, +# trust_remote_code=False, +# do_validation=True, +# framework=None, +# no_post_process=False, +# model_kwargs=None, +# atol = 1e-5 +# ) \ No newline at end of file