diff --git a/.gitignore b/.gitignore index 7f25e7e0e..4b7e8cbaf 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ + *.pickle -*.txt *.bak # ignore compiled python files @@ -34,10 +34,13 @@ # Ignore testing trash *.qmod -# Bazel files -/bazel-* -# custom bazelrc for the TF op, created in configure.sh +# Files generated by configure.sh .bazelrc +.tf_configure.bazelrc +third_party/python_legacy + +# Bazel directories & files created at run time. +/bazel-* # Local TF Copy tensorflow/* diff --git a/BUILD b/BUILD new file mode 100644 index 000000000..1cc330ea4 --- /dev/null +++ b/BUILD @@ -0,0 +1,2 @@ +# Top-level Bazel BUILD file for TensorFlow Quantum. +# This file is intentionally empty. diff --git a/WORKSPACE b/WORKSPACE index 4b3e8970e..2011af012 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,14 +1,41 @@ # This file includes external dependencies that are required to compile the # TensorFlow op. +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +# TensorFlow's .bzl files, loaded later in this file, also load rules_python +# but we need a slightly newer version that is still compatible with TF's. +http_archive( + name = "rules_python", + sha256 = "c68bdc4fbec25de5b5493b8819cfc877c4ea299c0dcb15c244c5a00208cde311", + strip_prefix = "rules_python-0.31.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.31.0/rules_python-0.31.0.tar.gz", +) -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@rules_python//python:repositories.bzl", "py_repositories") +py_repositories() +local_repository( + name = "python", + path = "third_party/python_legacy", +) -EIGEN_COMMIT = "aa6964bf3a34fd607837dd8123bc42465185c4f8" +load("@python//:defs.bzl", "interpreter") +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "pypi", + requirements_lock = "//:requirements.txt", + python_interpreter = interpreter, +) + +load("@pypi//:requirements.bzl", "install_deps") + +install_deps() + +EIGEN_COMMIT = "aa6964bf3a34fd607837dd8123bc42465185c4f8" http_archive( name = "eigen", @@ -37,16 +64,13 @@ http_archive( urls = ["https://github.com/quantumlib/qsim/archive/refs/tags/v0.13.3.zip"], ) + http_archive( name = "org_tensorflow", - patches = [ - "//third_party/tf:tf.patch", - ], - sha256 = "f771db8d96ca13c72f73c85c9cfb6f5358e2de3dd62a97a9ae4b672fe4c6d094", - strip_prefix = "tensorflow-2.15.0", - urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.zip", - ], + patches = ["//third_party/tf:tf.patch"], + sha256 = "c8c8936e7b6156e669e08b3c388452bb973c1f41538149fce7ed4a4849c7a012", + strip_prefix = "tensorflow-2.16.2", + urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.16.2.zip"], ) diff --git a/configure.sh b/configure.sh index 0ca428c85..967bdf62c 100755 --- a/configure.sh +++ b/configure.sh @@ -13,23 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== + +set -uo pipefail + PLATFORM="$(uname -s | tr 'A-Z' 'a-z')" -function write_to_bazelrc() { - echo "$1" >> .bazelrc -} -function write_action_env_to_bazelrc() { - write_to_bazelrc "build --action_env $1=\"$2\"" +# --- helpers --------------------------------------------------------------- +function write_bazelrc() { + echo "${1}" >> .bazelrc } -function write_linkopt_dir_to_bazelrc() { - write_to_bazelrc "build --linkopt -Wl,-rpath,$1" >> .bazelrc +function write_tf_rc() { + echo "${1}" >> .tf_configure.bazelrc } - -function is_linux() { - [[ "${PLATFORM}" == "linux" ]] +function die() { + echo "ERROR: $*" >&2 + exit 1 } function is_macos() { @@ -37,143 +38,226 @@ function is_macos() { } function is_windows() { - # On windows, the shell script is actually running in msys [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]] } -function is_ppc64le() { - [[ "$(uname -m)" == "ppc64le" ]] -} +function write_legacy_python_repo() { + mkdir -p third_party/python_legacy + # empty WORKSPACE + cat > third_party/python_legacy/WORKSPACE <<'EOF' +# AUTOGENERATED by configure.sh. +# This file is intentionally empty. +EOF + + # simple BUILD that exports defs.bzl + cat > third_party/python_legacy/BUILD <<'EOF' +# AUTOGENERATED by configure.sh. + +package(default_visibility = ["//visibility:public"]) +exports_files(["defs.bzl"]) +EOF + + # defs.bzl MUST define 'interpreter' as a string, not a function. + # We also export py_runtime to satisfy older loads. + cat > third_party/python_legacy/defs.bzl <= 3.10 + if ! command -v python3 >/dev/null 2>&1; then + die "python3 not found. Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH." + fi + + if ! python3 - <<'PY' +import sys +raise SystemExit(0 if sys.version_info[:2] >= (3, 10) else 1) +PY + then + die "Python 3.10+ required for TensorFlow Quantum, but found " \ + "$(python3 -V 2>&1). Pass --python=/path/to/python3.10+ or set PYTHON_BIN_PATH." + fi + + PY="$(command -v python3)" +fi + +# Normalize to an absolute path. Use Python to print sys.executable because +# tools like pyenv use shim scripts that readlink would resolve to the script +# itself, not the actual interpreter binary. +PY_ABS="$("${PY}" -c 'import os,sys; print(os.path.abspath(sys.executable))')" +PYTHON_BIN_PATH="${PY_ABS}" + + +# --- choose CPU/GPU like upstream script (default CPU) --------------------- +TF_NEED_CUDA="" +y_for_cpu='Build against TensorFlow CPU backend? (Type n to use GPU) [Y/n] ' +while [[ -z "${TF_NEED_CUDA}" ]]; do + read -p "${y_for_cpu}" INPUT || true + case "${INPUT:-Y}" in + [Yy]* ) echo "CPU build selected."; TF_NEED_CUDA=0;; + [Nn]* ) echo "GPU build selected."; TF_NEED_CUDA=1;; + * ) echo "Please answer y or n.";; esac done +# For TF >= 2.1 this value isn’t actually consulted by TFQ, +# but we keep a compatible prompt/flag. +TF_CUDA_VERSION="12" -# Check if it's installed -# if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then -# echo 'Using installed tensorflow' -# else -# # Uninstall CPU version if it is installed. -# if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then -# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' -# pip uninstall tensorflow -# elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then -# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' -# pip uninstall tf-nightly -# fi -# # Install GPU version -# echo 'Installing tensorflow .....\n' -# pip install tensorflow -# fi - - - -TF_CFLAGS=( $(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS="$(python -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')" - - -write_to_bazelrc "build --experimental_repo_remote_exec" -write_to_bazelrc "build --spawn_strategy=standalone" -write_to_bazelrc "build --strategy=Genrule=standalone" -write_to_bazelrc "build -c opt" -write_to_bazelrc "build --cxxopt=\"-D_GLIBCXX_USE_CXX11_ABI=1\"" -write_to_bazelrc "build --cxxopt=\"-std=c++17\"" - -# The transitive inclusion of build rules from TensorFlow ends up including -# and building two copies of zlib (one from bazel_rules, one from the TF code -# baase itself). The version of zlib you get (at least in TF 2.15.0) ends up -# producing many compiler warnings that "a function declaration without a -# prototype is deprecated". It's difficult to patch the particular build rules -# involved, so the approach taken here is to silence those warnings for stuff -# in external/. TODO: figure out how to patch the BUILD files and put it there. -write_to_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype" -write_to_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype" - -# Similarly, these are other harmless warnings about unused functions coming -# from things pulled in by the TF bazel config rules. -write_to_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" -write_to_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" - -# The following supress warnings coming from qsim. -# TODO: fix the code in qsim & update TFQ to use the updated version. -write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" -write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" -write_to_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" -write_to_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" +# --- sanity: python is importable and has TF ------------------------------- +if [[ ! -x "${PYTHON_BIN_PATH}" ]]; then + die "${PYTHON_BIN_PATH} not found/executable." +fi +# Ensure TF is importable from system python (user should have installed it). +echo "Next, you may see warnings printed by loading TensorFlow packages." +echo "Do not be alarmed unless there are errors." +tf_output=$("${PYTHON_BIN_PATH}" - <<'PY' +import sys +import os +import glob -if is_windows; then - # Use pywrap_tensorflow instead of tensorflow_framework on Windows - SHARED_LIBRARY_DIR=${TF_CFLAGS:2:-7}"python" -else - SHARED_LIBRARY_DIR=${TF_LFLAGS:2} -fi -SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev) -if ! [[ $TF_LFLAGS =~ .*:.* ]]; then - if is_macos; then - SHARED_LIBRARY_NAME="libtensorflow_framework.dylib" - elif is_windows; then - # Use pywrap_tensorflow's import library on Windows. It is in the same dir as the dll/pyd. - SHARED_LIBRARY_NAME="_pywrap_tensorflow_internal.lib" - else - SHARED_LIBRARY_NAME="libtensorflow_framework.so" - fi -fi +try: + import tensorflow as tf + import tensorflow.sysconfig as sc +except ImportError: + sys.exit(1) + +print(sc.get_include()) + +lib_path = sc.get_lib() +lib_dir = lib_path if os.path.isdir(lib_path) else os.path.dirname(lib_path) +print(lib_dir) -HEADER_DIR=${TF_CFLAGS:2} -if is_windows; then - SHARED_LIBRARY_DIR=${SHARED_LIBRARY_DIR//\\//} - SHARED_LIBRARY_NAME=${SHARED_LIBRARY_NAME//\\//} - HEADER_DIR=${HEADER_DIR//\\//} +cands = (glob.glob(os.path.join(lib_dir, 'libtensorflow_framework.so*')) or + glob.glob(os.path.join(lib_dir, 'libtensorflow.so*')) or + glob.glob(os.path.join(lib_dir, '_pywrap_tensorflow_internal.*'))) +print(os.path.basename(cands[0]) if cands else 'libtensorflow_framework.so.2') +PY +) + +if [[ $? -ne 0 ]]; then + echo "ERROR: tensorflow not importable by Python (${PYTHON_BIN_PATH})" >&2 + exit 1 fi -write_action_env_to_bazelrc "TF_HEADER_DIR" ${HEADER_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME} -write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA} +{ + read -r HDR + read -r LIBDIR + read -r LIBNAME +} <<< "${tf_output}" + +echo +echo "Configuration values detected:" +echo " PYTHON_BIN_PATH=${PYTHON_BIN_PATH}" +echo " TF_HEADER_DIR=${HDR}" +echo " TF_SHARED_LIBRARY_DIR=${LIBDIR}" +echo " TF_SHARED_LIBRARY_NAME=${LIBNAME}" + +# --- write .tf_configure.bazelrc (repo_env for repository rules) ----------- +write_tf_rc "build --repo_env=PYTHON_BIN_PATH=${PYTHON_BIN_PATH}" +write_tf_rc "build --repo_env=TF_HEADER_DIR=${HDR}" +write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_DIR=${LIBDIR}" +write_tf_rc "build --repo_env=TF_SHARED_LIBRARY_NAME=${LIBNAME}" +write_tf_rc "build --repo_env=TF_NEED_CUDA=${TF_NEED_CUDA}" + +# Make sure repo rules and sub-config see legacy Keras (keras 2 instead of Keras 3) +write_tf_rc "build --repo_env=TF_USE_LEGACY_KERAS=1" + +# --- write third_party/python_legacy/ with interpreter -------------------- +write_legacy_python_repo + +# --- write .bazelrc (imports TF config usual flags) ----------------- +write_bazelrc "# WARNING: this file (.bazelrc) is AUTOGENERATED and overwritten" +write_bazelrc "# when configure.sh runs. Put customizations in .bazelrc.user." +write_bazelrc "" +write_bazelrc "try-import %workspace%/.tf_configure.bazelrc" +write_bazelrc "common --experimental_repo_remote_exec" +write_bazelrc "build --spawn_strategy=standalone" +write_bazelrc "build --strategy=Genrule=standalone" +write_bazelrc "build -c opt" +write_bazelrc "build --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=1" +write_bazelrc "build --cxxopt=-std=c++17" +write_bazelrc "build --action_env=TF_USE_LEGACY_KERAS=1" +write_bazelrc "build --action_env=PYTHON_BIN_PATH=${PYTHON_BIN_PATH}" + +# rpath so the dynamic linker finds TF’s shared lib if ! is_windows; then - write_linkopt_dir_to_bazelrc ${SHARED_LIBRARY_DIR} + write_bazelrc "build --linkopt=-Wl,-rpath,${LIBDIR}" fi -# TODO(yifeif): do not hardcode path -if [[ "$TF_NEED_CUDA" == "1" ]]; then - write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" - write_to_bazelrc "build:cuda --@local_config_cuda//:enable_cuda" - write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" +# The following supressions are for warnings coming from external dependencies. +# They're most likely inconsequential or false positives. Since we can't fix +# them, we suppress the warnings to reduce noise during builds. - write_action_env_to_bazelrc "TF_CUDA_VERSION" ${TF_CUDA_VERSION} - write_action_env_to_bazelrc "TF_CUDNN_VERSION" "8" +write_bazelrc "" +write_bazelrc "build --per_file_copt=external/.*@-Wno-deprecated-non-prototype" +write_bazelrc "build --host_per_file_copt=external/.*@-Wno-deprecated-non-prototype" +write_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" +write_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-unused-function" +write_bazelrc "build --per_file_copt=external/com_google_protobuf/.*@-Wno-stringop-overflow" +write_bazelrc "build --host_per_file_copt=external/com_google_protobuf/.*@-Wno-stringop-overflow" +write_bazelrc "build --per_file_copt=external/eigen/.*@-Wno-maybe-uninitialized" +write_bazelrc "build --host_per_file_copt=external/eigen/.*@-Wno-maybe-uninitialized" + +# The following warnings come from qsim. +# TODO: fix the code in qsim & update TFQ to use the updated version. +write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" +write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/noise/tfq_.*@-Wno-unused-but-set-variable" +write_bazelrc "build --per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" +write_bazelrc "build --host_per_file_copt=tensorflow_quantum/core/ops/math_ops/tfq_.*@-Wno-deprecated-declarations" + +# CUDA toggle +if [[ "${TF_NEED_CUDA}" == "1" ]]; then + write_bazelrc "" + write_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" + write_bazelrc "build:cuda --@local_config_cuda//:enable_cuda" + write_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" if is_windows; then - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" + write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" + write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}" else - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "/usr/lib/x86_64-linux-gnu" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "/usr/local/cuda" + write_tf_rc "build --repo_env=CUDNN_INSTALL_PATH=/usr/lib/x86_64-linux-gnu" + write_tf_rc "build --repo_env=CUDA_TOOLKIT_PATH=/usr/local/cuda" fi - write_to_bazelrc "build --config=cuda" - write_to_bazelrc "test --config=cuda" + write_bazelrc "build --config=cuda" + write_bazelrc "test --config=cuda" fi +# Follow TensorFlow's approach and load an optional user bazelrc file. +write_bazelrc "" +write_bazelrc "try-import %workspace%/.bazelrc.user" + +echo "Wrote .tf_configure.bazelrc and .bazelrc successfully." diff --git a/release/build_pip_package.sh b/release/build_pip_package.sh index 8bed5b909..a35929c99 100755 --- a/release/build_pip_package.sh +++ b/release/build_pip_package.sh @@ -1,59 +1,79 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== + set -e -set -x + +# Pick the Python that TFQ/TensorFlow used during configure/build. +# Order: explicit env -> python3 (>= 3.10) +PY="${PYTHON_BIN_PATH:-}" +if [[ -z "${PY}" ]]; then + if ! command -v python3 >/dev/null 2>&1; then + echo "ERROR: python3 not found. Set PYTHON_BIN_PATH to a Python 3.10+ interpreter." >&2 + exit 2 + fi + + # Require Python >= 3.10 for TFQ. + if ! python3 - <<'PY' +import sys +sys.exit(0 if sys.version_info[:2] >= (3, 10) else 1) +PY + then + echo "ERROR: Python 3.10+ required for TensorFlow Quantum; found $(python3 -V 2>&1)." >&2 + exit 2 + fi + + PY="$(command -v python3)" +fi +echo "Using Python: ${PY}" + +# Ensure packaging tools are present in THIS interpreter. +pip install -qq setuptools wheel build EXPORT_DIR="bazel-bin/release/build_pip_package.runfiles/__main__" -function main() { - DEST=${1} - EXTRA_FLAGS=${2} +main() { + DEST="${1}" + EXTRA_FLAGS="${2}" - if [[ -z ${DEST} ]]; then + if [[ -z "${DEST}" ]]; then echo "No destination directory provided." exit 1 fi - mkdir -p ${DEST} + mkdir -p "${DEST}" echo "=== destination directory: ${DEST}" - TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX) - - echo $(date) : "=== Using tmpdir: ${TMPDIR}" - + # Build the pip package in a temporary directory. + TMPDIR="$(mktemp -d -t tmp.XXXXXXXXXX)" + echo "$(date) : === Using tmpdir: ${TMPDIR}" echo "=== Copy TFQ files" # Copy over files necessary to run setup.py - cp ${EXPORT_DIR}/release/setup.py "${TMPDIR}" - cp ${EXPORT_DIR}/release/MANIFEST.in "${TMPDIR}" - - # Copy over all files in the tensorflow_quantum/ directory that are included in the BUILD - # rule. - mkdir "${TMPDIR}"/tensorflow_quantum - cp -r -v ${EXPORT_DIR}/tensorflow_quantum/* "${TMPDIR}"/tensorflow_quantum/ - - pushd ${TMPDIR} - echo $(date) : "=== Building wheel" - - python3 setup.py bdist_wheel ${EXTRA_FLAGS} > /dev/null + cp "${EXPORT_DIR}/release/setup.py" "${TMPDIR}" + cp "${EXPORT_DIR}/release/MANIFEST.in" "${TMPDIR}" + mkdir "${TMPDIR}/tensorflow_quantum" + cp -r -v "${EXPORT_DIR}/tensorflow_quantum/"* "${TMPDIR}/tensorflow_quantum/" + pushd "${TMPDIR}" + echo "$(date) : === Building wheel" + "${PY}" -m build -v --wheel ${EXTRA_FLAGS} > /dev/null cp dist/*.whl "${DEST}" popd - rm -rf ${TMPDIR} - echo $(date) : "=== Output wheel file is in: ${DEST}" + rm -rf "${TMPDIR}" + echo "$(date) : === Output wheel file is in: ${DEST}" } main "$@" diff --git a/release/setup.py b/release/setup.py index 571a11861..e3e33436b 100644 --- a/release/setup.py +++ b/release/setup.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""TensorFlow Quantum adds qauntum computing primitives to TensorFlow. +"""TensorFlow Quantum adds quantum computing primitives to TensorFlow. TensorFlow Quantum is an open source library for high performance batch quantum computation on quantum simulators and quantum computers. The goal @@ -20,29 +20,24 @@ of quantum data and quantum systems via hybrid models. TensorFlow Quantum was created in an ongoing collaboration between the -University of Waterloo and the Quantum AI team at Google along with help from -many other contributors within Google. +University of Waterloo and the Quantum AI team at Google along with help +from many other contributors within Google. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import sys +from __future__ import absolute_import, division, print_function +import sys from datetime import date -from setuptools import Extension -from setuptools import find_packages -from setuptools import setup -from setuptools.dist import Distribution -from setuptools.command.install import install +from setuptools import Extension, find_packages, setup +from setuptools.command.install import install +from setuptools.dist import Distribution -DOCLINES = __doc__.split('\n') +DOCLINES = __doc__.split("\n") class InstallPlatlib(install): - """Workaround so .so files in generated wheels - can be seen by auditwheel.""" + """Workaround so .so files in generated wheels are visible to auditwheel.""" def finalize_options(self): install.finalize_options(self) @@ -50,67 +45,69 @@ def finalize_options(self): self.install_lib = self.install_platlib -REQUIRED_PACKAGES = ['cirq-core==1.3.0', 'cirq-google==1.3.0', 'sympy == 1.12'] +REQUIRED_PACKAGES = [ + "cirq-core==1.3.0", + "cirq-google==1.3.0", + "sympy==1.14", +] + +# Placed as extras to avoid overwriting existing nightly TF installs. +EXTRA_PACKAGES = ["tensorflow>=2.16,<2.17"] -# placed as extra to not have required overwrite existing nightly installs if -# they exist. -EXTRA_PACKAGES = ['tensorflow == 2.15.0'] -CUR_VERSION = '0.7.4' +CUR_VERSION = "0.7.4" class BinaryDistribution(Distribution): - """This class is needed in order to create OS specific wheels.""" + """Create OS-specific wheels.""" def has_ext_modules(self): + """whether this has external modules.""" return True -nightly = False -if '--nightly' in sys.argv: - nightly = True - sys.argv.remove('--nightly') +NIGHTLY_FLAG = False +if "--nightly" in sys.argv: + NIGHTLY_FLAG = True + sys.argv.remove("--nightly") -project_name = 'tensorflow-quantum' -build_version = CUR_VERSION -if nightly: - project_name = 'tfq-nightly' - build_version = CUR_VERSION + '.dev' + str(date.today()).replace('-', '') +PROJECT_NAME = "tensorflow-quantum" +BUILD_VERSION = CUR_VERSION +if NIGHTLY_FLAG: + PROJECT_NAME = "tfq-nightly" + BUILD_VERSION = CUR_VERSION + ".dev" + str(date.today()).replace("-", "") setup( - name=project_name, - version=build_version, - description= - 'TensorFlow Quantum is a library for hybrid quantum-classical machine learning.', - long_description='\n'.join(DOCLINES[2:]), - author='Google Inc.', - author_email='no-reply@google.com', - url='https://github.com/tensorflow/quantum/', + name=PROJECT_NAME, + version=BUILD_VERSION, + description="Library for hybrid quantum-classical machine learning.", + long_description="\n".join(DOCLINES[2:]), + author="The TensorFlow Quantum Authors", + author_email="tensorflow-quantum-team@google.com", + url="https://github.com/tensorflow/quantum/", packages=find_packages(), install_requires=REQUIRED_PACKAGES, - extras_require={'extras': EXTRA_PACKAGES}, - # Add in any packaged data. + extras_require={"extras": EXTRA_PACKAGES}, include_package_data=True, - #ext_modules=[Extension('_foo', ['stub.cc'])], + # ext_modules=[Extension('_foo', ['stub.cc'])], zip_safe=False, distclass=BinaryDistribution, - # PyPI package information. classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Scientific/Engineering :: Physics', - 'Topic :: Scientific/Engineering :: Quantum Computing', + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Scientific/Engineering :: Physics", + "Topic :: Scientific/Engineering :: Quantum Computing", ], - license='Apache 2.0', - keywords='tensorflow machine learning quantum qml', - cmdclass={'install': InstallPlatlib}) + license="Apache 2.0", + keywords="tensorflow machine learning quantum qml", + cmdclass={"install": InstallPlatlib}, +) diff --git a/requirements.in b/requirements.in new file mode 100644 index 000000000..8e51c0c6d --- /dev/null +++ b/requirements.in @@ -0,0 +1,16 @@ +# Core development requirements for TensorFlow Quantum. This file is processed +# by pip-compile (from pip-tools) to produce requirements.txt. + +cirq-core~=1.3.0 +cirq-google~=1.3.0 +tensorflow>=2.16,<2.17 +tf-keras~=2.16.0 + +# TODO: the next ones are not truly core requirements. A better place should be +# found for them (long with others needed by scripts/*). They're here as a +# stop-gap measure until then. +yapf==0.43.0 +pylint==3.3.3 +nbformat==5.1.3 +nbclient==0.6.5 +tensorflow-docs diff --git a/requirements.txt b/requirements.txt index 9fe2d0446..031782a47 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,280 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe +# +absl-py==2.1.0 + # via + # keras + # tensorboard + # tensorflow + # tensorflow-docs +astor==0.8.1 + # via tensorflow-docs +astroid==3.3.11 + # via pylint +astunparse==1.6.3 + # via tensorflow +attrs==25.4.0 + # via + # jsonschema + # referencing +cachetools==6.2.2 + # via google-auth +certifi==2025.11.12 + # via requests +charset-normalizer==3.4.4 + # via requests cirq-core==1.3.0 + # via + # -r requirements.in + # cirq-google cirq-google==1.3.0 -sympy==1.12 -numpy==1.24.2 # TensorFlow can detect if it was built against other versions. + # via -r requirements.in +contourpy==1.3.2 + # via matplotlib +cycler==0.12.1 + # via matplotlib +dill==0.4.0 + # via pylint +duet==0.2.9 + # via cirq-core +flatbuffers==25.9.23 + # via tensorflow +fonttools==4.60.1 + # via matplotlib +gast==0.6.0 + # via tensorflow +google-api-core[grpc]==2.28.1 + # via cirq-google +google-auth==2.43.0 + # via google-api-core +google-pasta==0.2.0 + # via tensorflow +googleapis-common-protos==1.72.0 + # via + # google-api-core + # grpcio-status +grpcio==1.60.2 + # via + # google-api-core + # grpcio-status + # tensorboard + # tensorflow +grpcio-status==1.60.2 + # via google-api-core +h5py==3.15.1 + # via + # keras + # tensorflow +idna==3.11 + # via requests +ipython-genutils==0.2.0 + # via nbformat +isort==5.13.2 + # via pylint +jinja2==3.1.6 + # via tensorflow-docs +jsonschema==4.25.1 + # via nbformat +jsonschema-specifications==2025.9.1 + # via jsonschema +jupyter-client==8.6.3 + # via nbclient +jupyter-core==5.9.1 + # via + # jupyter-client + # nbformat +keras==3.12.0 + # via tensorflow +kiwisolver==1.4.9 + # via matplotlib +libclang==18.1.1 + # via tensorflow +markdown==3.10 + # via tensorboard +markdown-it-py==4.0.0 + # via rich +markupsafe==3.0.3 + # via + # jinja2 + # werkzeug +matplotlib==3.10.7 + # via cirq-core +mccabe==0.7.0 + # via pylint +mdurl==0.1.2 + # via markdown-it-py +ml-dtypes==0.3.2 + # via + # keras + # tensorflow +mpmath==1.3.0 + # via sympy +namex==0.1.0 + # via keras +nbclient==0.6.5 + # via -r requirements.in nbformat==5.1.3 + # via + # -r requirements.in + # nbclient + # tensorflow-docs +nest-asyncio==1.6.0 + # via nbclient +networkx==3.4.2 + # via cirq-core +numpy==1.26.4 + # via + # cirq-core + # contourpy + # h5py + # keras + # matplotlib + # ml-dtypes + # pandas + # scipy + # tensorboard + # tensorflow +opt-einsum==3.4.0 + # via tensorflow +optree==0.18.0 + # via keras +packaging==25.0 + # via + # keras + # matplotlib + # tensorflow +pandas==2.3.3 + # via cirq-core +pillow==12.0.0 + # via matplotlib +platformdirs==4.5.0 + # via + # jupyter-core + # pylint + # yapf +proto-plus==1.26.1 + # via + # cirq-google + # google-api-core +protobuf==4.25.8 + # via + # cirq-google + # google-api-core + # googleapis-common-protos + # grpcio-status + # proto-plus + # tensorboard + # tensorflow + # tensorflow-docs +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.2 + # via google-auth +pygments==2.19.2 + # via rich pylint==3.3.3 + # via -r requirements.in +pyparsing==3.2.5 + # via matplotlib +python-dateutil==2.9.0.post0 + # via + # jupyter-client + # matplotlib + # pandas +pytz==2025.2 + # via pandas +pyyaml==6.0.3 + # via tensorflow-docs +pyzmq==27.1.0 + # via jupyter-client +referencing==0.37.0 + # via + # jsonschema + # jsonschema-specifications +requests==2.32.5 + # via + # google-api-core + # tensorflow +rich==14.2.0 + # via keras +rpds-py==0.29.0 + # via + # jsonschema + # referencing +rsa==4.9.1 + # via google-auth +scipy==1.15.3 + # via cirq-core +six==1.17.0 + # via + # astunparse + # google-pasta + # python-dateutil + # tensorboard + # tensorflow +sortedcontainers==2.4.0 + # via cirq-core +sympy==1.14.0 + # via cirq-core +tensorboard==2.16.2 + # via tensorflow +tensorboard-data-server==0.7.2 + # via tensorboard +tensorflow==2.16.2 + # via + # -r requirements.in + # tf-keras +tensorflow-docs==2025.2.19.33219 + # via -r requirements.in +tensorflow-io-gcs-filesystem==0.37.1 + # via tensorflow +termcolor==3.2.0 + # via tensorflow +tf-keras==2.16.0 + # via -r requirements.in +tomli==2.3.0 + # via + # pylint + # yapf +tomlkit==0.13.3 + # via pylint +tornado==6.5.2 + # via jupyter-client +tqdm==4.67.1 + # via cirq-core +traitlets==5.14.3 + # via + # jupyter-client + # jupyter-core + # nbclient + # nbformat +typing-extensions==4.15.0 + # via + # astroid + # cirq-core + # optree + # referencing + # tensorflow +tzdata==2025.2 + # via pandas +urllib3==2.5.0 + # via requests +werkzeug==3.1.3 + # via tensorboard +wheel==0.45.1 + # via astunparse +wrapt==1.17.3 + # via tensorflow yapf==0.43.0 -tensorflow==2.15.0 + # via -r requirements.in + +# The following packages are considered to be unsafe in a requirements file: +setuptools==68.2.2 + # via + # tensorboard + # tensorflow diff --git a/scripts/benchmark_all.sh b/scripts/benchmark_all.sh old mode 100644 new mode 100755 index cd50209c2..8ee6dd127 --- a/scripts/benchmark_all.sh +++ b/scripts/benchmark_all.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing benchmarks."; -test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/...)) +test_outputs=$(bazel test -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors "$(bazel query //benchmarks/...)") exit_code=$? if [ "$exit_code" == "0" ]; then @@ -26,5 +26,5 @@ else fi echo "Running preconfigured benchmarks."; -bazel_run=${bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4"} -bazel_run benchmarks/scripts:benchmark_clifford_circuit -- --op_density 1 --n_moments 10 --n_qubits 4 \ No newline at end of file +bazel_run=${bazel run -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4"} +bazel_run benchmarks/scripts:benchmark_clifford_circuit -- --op_density 1 --n_moments 10 --n_qubits 4 diff --git a/scripts/build_pip_package_test.sh b/scripts/build_pip_package_test.sh index 644338b6a..88cc4e4b5 100755 --- a/scripts/build_pip_package_test.sh +++ b/scripts/build_pip_package_test.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,9 +17,9 @@ pip install -r requirements.txt # cd tensorflow_quantum -echo "Y\n" | ./configure.sh +printf "y\n" | ./configure.sh -bazel build -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" release:build_pip_package +bazel build -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" release:build_pip_package rm /tmp/tensorflow_quantum/* || echo ok bazel-bin/release/build_pip_package /tmp/tensorflow_quantum/ pip install -U /tmp/tensorflow_quantum/*.whl diff --git a/scripts/ci_validate_tutorials.sh b/scripts/ci_validate_tutorials.sh index e58355faf..4ecb8e6e7 100755 --- a/scripts/ci_validate_tutorials.sh +++ b/scripts/ci_validate_tutorials.sh @@ -14,10 +14,18 @@ # limitations under the License. # ============================================================================== -# Run the tutorials using the installed pip package -pip install jupyter nbclient==0.6.5 jupyter-client==6.1.12 ipython==7.22.0 -# Workaround for ipykernel - see https://github.com/ipython/ipykernel/issues/422 -pip install ipykernel==5.1.1 +set -e + +# Use legacy tf.keras (Keras 2) with TF 2.16 +export TF_USE_LEGACY_KERAS=1 + +# Tools for running notebooks non-interactively +pip install \ + "nbclient==0.6.5" \ + "jupyter-client==7.4.9" \ + "ipython>=8.10.0" \ + "ipykernel>=6.29.0" + # OpenAI Gym pip package needed for the quantum reinforcement learning tutorial pip install gym==0.24.1 # seaborn has also numpy dependency, it requires version >= 0.12.0. @@ -26,8 +34,10 @@ pip install seaborn==0.12.0 pip install -q git+https://github.com/tensorflow/docs # Leave the quantum directory, otherwise errors may occur cd .. + examples_output=$(python3 quantum/scripts/test_tutorials.py) exit_code=$? + if [ "$exit_code" == "0" ]; then exit 0; else diff --git a/scripts/msan_test.sh b/scripts/msan_test.sh index d47e8ccfe..988c623f7 100755 --- a/scripts/msan_test.sh +++ b/scripts/msan_test.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,19 +14,19 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel cc_tests with msan."; -test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" \ +test_outputs=$(bazel test -c opt \ --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" \ --cxxopt="-fsanitize=address" --linkopt="-fsanitize=address" \ --cxxopt="-g" --cxxopt="-O0" \ --notest_keep_going --test_output=errors \ //tensorflow_quantum/core/src:all && \ - bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" \ + bazel test -c opt \ --cxxopt="-mavx2" --cxxopt="-mavx" --cxxopt="-mfma" \ --cxxopt="-fsanitize=address" --linkopt="-fsanitize=address" \ --cxxopt="-g" --cxxopt="-O0" \ --notest_keep_going --test_output=errors \ //tensorflow_quantum/core/src:all && \ - bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" \ + bazel test -c opt \ --cxxopt="-fsanitize=address" --linkopt="-fsanitize=address" \ --cxxopt="-g" --cxxopt="-O0" \ --notest_keep_going --test_output=errors \ @@ -39,4 +39,4 @@ else echo "Testing failed, please correct errors before proceeding." echo "{$test_outputs}" exit 64; -fi \ No newline at end of file +fi diff --git a/scripts/test_all.sh b/scripts/test_all.sh index 5d5405fac..8147ee2a9 100755 --- a/scripts/test_all.sh +++ b/scripts/test_all.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --test_output=errors --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...) +test_outputs=$(bazel test -c opt --test_output=errors --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; diff --git a/scripts/test_benchmarks.sh b/scripts/test_benchmarks.sh old mode 100644 new mode 100755 index 07e3adec1..a37d31ec0 --- a/scripts/test_benchmarks.sh +++ b/scripts/test_benchmarks.sh @@ -1,12 +1,12 @@ #!/bin/bash # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,10 +14,10 @@ # limitations under the License. # ============================================================================== echo "Testing all Benchmarks."; -bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all) -# test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all)) +bazel test -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all) +# test_outputs=$(bazel test -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all)) bench_outputs=$() -# bench_outputs=$(bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors //benchmarks/scripts:benchmark_clifford_circuit) +# bench_outputs=$(bazel run -c opt --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors //benchmarks/scripts:benchmark_clifford_circuit) exit_code=$? if [ "$exit_code" == "0" ]; then echo "Testing Complete!"; @@ -26,4 +26,4 @@ else echo "Testing failed, please correct errors before proceeding." echo "{$test_outputs}" exit 64; -fi \ No newline at end of file +fi diff --git a/scripts/test_tutorials.py b/scripts/test_tutorials.py index 08a9d85d9..4d06010c6 100644 --- a/scripts/test_tutorials.py +++ b/scripts/test_tutorials.py @@ -13,40 +13,51 @@ # limitations under the License. # ============================================================================== """Module to ensure all notebooks execute without error by pytesting them.""" + +import os import glob import re from absl.testing import parameterized import nbformat import nbclient -import tensorflow as tf + +# Ensure we always use legacy tf.keras (Keras 2) when running tutorials. +# This must be set before importing TensorFlow so it picks up tf_keras. +os.environ.setdefault("TF_USE_LEGACY_KERAS", "1") + +# Pylint doesn't like code before imports, but we need the env var set first. +import tensorflow as tf # pylint: disable=wrong-import-position # Must be run from the directory containing `quantum` repo. NOTEBOOKS = glob.glob("quantum/docs/tutorials/*.ipynb") class ExamplesTest(tf.test.TestCase, parameterized.TestCase): + """Execute all tutorial notebooks and check they run without errors.""" @parameterized.parameters(NOTEBOOKS) def test_notebook(self, path): - """Test that notebooks open/run correctly.""" + """Test that notebooks open and run correctly.""" nb = nbformat.read(path, as_version=4) # Scrub any magic from the notebook before running. for cell in nb.get("cells"): - if cell['cell_type'] == 'code': - src = cell['source'] - # Comment out lines containing '!' but not '!=' - src = re.sub(r'\!(?!=)', r'#!', src) + if cell["cell_type"] == "code": + src = cell["source"] + # Comment out lines containing '!' but not '!='. + src = re.sub(r"\!(?!=)", r"#!", src) # For mnist.ipynb to reduce runtime in test. - src = re.sub('NUM_EXAMPLES ?= ?.*', 'NUM_EXAMPLES = 10', src) - # For quantum_reinforcement_learning.ipynb to reduce runtime in test. - src = re.sub('n_episodes ?= ?.*', 'n_episodes = 50', src) + src = re.sub(r"NUM_EXAMPLES ?= ?.*", "NUM_EXAMPLES = 10", src) + # For quantum_reinforcement_learning.ipynb: + # reduce runtime in test by limiting episodes. + src = re.sub(r"n_episodes ?= ?.*", "n_episodes = 50", src) # For noise.ipynb to reduce runtime in test. - src = re.sub('n_epochs ?= ?.*', 'n_epochs = 2', src) - cell['source'] = src + src = re.sub(r"n_epochs ?= ?.*", "n_epochs = 2", src) + cell["source"] = src _ = nbclient.execute(nb, timeout=900, kernel_name="python3") + if __name__ == "__main__": tf.test.main() diff --git a/tensorflow_quantum/__init__.py b/tensorflow_quantum/__init__.py index 7c781f882..3b6a0ad7c 100644 --- a/tensorflow_quantum/__init__.py +++ b/tensorflow_quantum/__init__.py @@ -64,4 +64,4 @@ del core # pylint: enable=undefined-variable -__version__ = '0.7.2' +__version__ = '0.7.4' diff --git a/tensorflow_quantum/core/ops/BUILD b/tensorflow_quantum/core/ops/BUILD index 504cc2657..4df629f1d 100644 --- a/tensorflow_quantum/core/ops/BUILD +++ b/tensorflow_quantum/core/ops/BUILD @@ -546,6 +546,7 @@ py_library( "//tensorflow_quantum/core/proto:pauli_sum_py_proto", "//tensorflow_quantum/core/proto:projector_sum_py_proto", "//tensorflow_quantum/core/serialize:serializer", + "@pypi//tensorflow", ], ) @@ -567,6 +568,7 @@ py_library( srcs_version = "PY3", deps = [ "//tensorflow_quantum/core/serialize:serializer", + "@pypi//tensorflow", ], ) @@ -628,5 +630,5 @@ py_library( name = "load_module", srcs = ["load_module.py"], srcs_version = "PY3", - deps = [], + deps = ["@pypi//tensorflow"], ) diff --git a/tensorflow_quantum/datasets/BUILD b/tensorflow_quantum/datasets/BUILD index cabfb790f..f0363fb58 100644 --- a/tensorflow_quantum/datasets/BUILD +++ b/tensorflow_quantum/datasets/BUILD @@ -19,12 +19,14 @@ py_library( name = "cluster_state", srcs = ["cluster_state.py"], srcs_version = "PY3", + deps = ["@pypi//tensorflow"], ) py_library( name = "spin_system", srcs = ["spin_system.py"], srcs_version = "PY3", + deps = ["@pypi//tensorflow"], ) py_test( diff --git a/tensorflow_quantum/python/BUILD b/tensorflow_quantum/python/BUILD index d69396775..d474bd85a 100644 --- a/tensorflow_quantum/python/BUILD +++ b/tensorflow_quantum/python/BUILD @@ -22,6 +22,7 @@ py_library( name = "quantum_context", srcs = ["quantum_context.py"], srcs_version = "PY3", + deps = ["@pypi//tensorflow"], ) py_test( @@ -38,6 +39,7 @@ py_library( deps = [ "//tensorflow_quantum/core/proto:program_py_proto", "//tensorflow_quantum/core/serialize:serializer", + "@pypi//tensorflow", ], ) diff --git a/tensorflow_quantum/python/differentiators/BUILD b/tensorflow_quantum/python/differentiators/BUILD index 9e5f28aab..576101b94 100644 --- a/tensorflow_quantum/python/differentiators/BUILD +++ b/tensorflow_quantum/python/differentiators/BUILD @@ -42,6 +42,7 @@ py_library( name = "differentiator", srcs = ["differentiator.py"], srcs_version = "PY3", + deps = ["@pypi//tensorflow"], ) py_library( diff --git a/third_party/tf/auditwheel b/third_party/tf/auditwheel old mode 100644 new mode 100755 index 30f511c86..2056b897b --- a/third_party/tf/auditwheel +++ b/third_party/tf/auditwheel @@ -1,9 +1,21 @@ -TF_SHARED_LIBRARY_NAME=$(grep -r TF_SHARED_LIBRARY_NAME .bazelrc | awk -F= '{print$2}') +#!/usr/bin/env bash -POLICY_JSON=$(find / -name manylinux-policy.json) +set -e -sed -i "s/libresolv.so.2\"/libresolv.so.2\", $TF_SHARED_LIBRARY_NAME/g" $POLICY_JSON +LIB_NAME=$(grep -r TF_SHARED_LIBRARY_NAME .tf_configure.bazelrc | \ + awk -F= '{print$3}') -cat $POLICY_JSON +# Find the policy file inside the Docker container environment. +PKG_ROOT=$(pipx runpip auditwheel show auditwheel | \ + grep "^Location:" | \ + sed 's/^Location: //') + +POLICY_FILE="${PKG_ROOT}/auditwheel/policy/manylinux-policy.json" +echo "Found policy file at ${POLICY_FILE}" + +# Splice in the name of the TensorFlow shared library file. +sed -i "s|libresolv.so.2\"|libresolv.so.2\", \"${LIB_NAME}\"|g" "${POLICY_FILE}" + +cat "${POLICY_FILE}" auditwheel $@ diff --git a/third_party/tf/tf.patch b/third_party/tf/tf.patch index 4ce7dc753..e32a38ad3 100644 --- a/third_party/tf/tf.patch +++ b/third_party/tf/tf.patch @@ -1,74 +1,75 @@ -diff --git tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -index a2bdd6a7eed..ec25c23d8d4 100644 ---- tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -+++ tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl -@@ -2,7 +2,7 @@ +# Patch used for tf 2.15, for tf 2.16> it is not needed anymore. +# diff --git tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# index a2bdd6a7eed..ec25c23d8d4 100644 +# --- tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# +++ tensorflow/tools/toolchains/cpus/aarch64/aarch64_compiler_configure.bzl +# @@ -2,7 +2,7 @@ - load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64.bzl", "remote_aarch64_configure") - load("//third_party/remote_config:remote_platform_configure.bzl", "remote_platform_configure") --load("//third_party/py:python_configure.bzl", "remote_python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "remote_python_configure") +# load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64.bzl", "remote_aarch64_configure") +# load("//third_party/remote_config:remote_platform_configure.bzl", "remote_platform_configure") +# -load("//third_party/py:python_configure.bzl", "remote_python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "remote_python_configure") - def ml2014_tf_aarch64_configs(name_container_map, env): - for name, container in name_container_map.items(): -diff --git tensorflow/tools/toolchains/remote_config/rbe_config.bzl tensorflow/tools/toolchains/remote_config/rbe_config.bzl -index 9f71a414bf7..57f70752323 100644 ---- tensorflow/tools/toolchains/remote_config/rbe_config.bzl -+++ tensorflow/tools/toolchains/remote_config/rbe_config.bzl -@@ -1,6 +1,6 @@ - """Macro that creates external repositories for remote config.""" +# def ml2014_tf_aarch64_configs(name_container_map, env): +# for name, container in name_container_map.items(): +# diff --git tensorflow/tools/toolchains/remote_config/rbe_config.bzl tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# index 9f71a414bf7..57f70752323 100644 +# --- tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# +++ tensorflow/tools/toolchains/remote_config/rbe_config.bzl +# @@ -1,6 +1,6 @@ +# """Macro that creates external repositories for remote config.""" --load("//third_party/py:python_configure.bzl", "local_python_configure", "remote_python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "local_python_configure", "remote_python_configure") - load("//third_party/gpus:cuda_configure.bzl", "remote_cuda_configure") - load("//third_party/nccl:nccl_configure.bzl", "remote_nccl_configure") - load("//third_party/gpus:rocm_configure.bzl", "remote_rocm_configure") -diff --git tensorflow/workspace2.bzl tensorflow/workspace2.bzl -index 7e9faa558a4..5b18cb0969a 100644 ---- tensorflow/workspace2.bzl -+++ tensorflow/workspace2.bzl -@@ -8,7 +8,7 @@ load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") - load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") - load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") - load("//third_party/git:git_configure.bzl", "git_configure") --load("//third_party/py:python_configure.bzl", "python_configure") -+load("//third_party/py/non_hermetic:python_configure.bzl", "python_configure") - load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") - load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl", "aarch64_compiler_configure") - load("//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl", "arm_compiler_configure") -diff --git third_party/py/non_hermetic/python_configure.bzl third_party/py/non_hermetic/python_configure.bzl -index 300cbfb6c71..09d98505dd9 100644 ---- third_party/py/non_hermetic/python_configure.bzl -+++ third_party/py/non_hermetic/python_configure.bzl -@@ -206,7 +206,7 @@ def _create_local_python_repository(repository_ctx): - # Resolve all labels before doing any real work. Resolving causes the - # function to be restarted with all previous state being lost. This - # can easily lead to a O(n^2) runtime in the number of labels. -- build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) -+ build_tpl = repository_ctx.path(Label("//third_party/py/non_hermetic:BUILD.tpl")) +# -load("//third_party/py:python_configure.bzl", "local_python_configure", "remote_python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "local_python_configure", "remote_python_configure") +# load("//third_party/gpus:cuda_configure.bzl", "remote_cuda_configure") +# load("//third_party/nccl:nccl_configure.bzl", "remote_nccl_configure") +# load("//third_party/gpus:rocm_configure.bzl", "remote_rocm_configure") +# diff --git tensorflow/workspace2.bzl tensorflow/workspace2.bzl +# index 7e9faa558a4..5b18cb0969a 100644 +# --- tensorflow/workspace2.bzl +# +++ tensorflow/workspace2.bzl +# @@ -8,7 +8,7 @@ load("//third_party/gpus:rocm_configure.bzl", "rocm_configure") +# load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure") +# load("//third_party/nccl:nccl_configure.bzl", "nccl_configure") +# load("//third_party/git:git_configure.bzl", "git_configure") +# -load("//third_party/py:python_configure.bzl", "python_configure") +# +load("//third_party/py/non_hermetic:python_configure.bzl", "python_configure") +# load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure") +# load("//tensorflow/tools/toolchains:cpus/aarch64/aarch64_compiler_configure.bzl", "aarch64_compiler_configure") +# load("//tensorflow/tools/toolchains:cpus/arm/arm_compiler_configure.bzl", "arm_compiler_configure") +# diff --git third_party/py/non_hermetic/python_configure.bzl third_party/py/non_hermetic/python_configure.bzl +# index 300cbfb6c71..09d98505dd9 100644 +# --- third_party/py/non_hermetic/python_configure.bzl +# +++ third_party/py/non_hermetic/python_configure.bzl +# @@ -206,7 +206,7 @@ def _create_local_python_repository(repository_ctx): +# # Resolve all labels before doing any real work. Resolving causes the +# # function to be restarted with all previous state being lost. This +# # can easily lead to a O(n^2) runtime in the number of labels. +# - build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl")) +# + build_tpl = repository_ctx.path(Label("//third_party/py/non_hermetic:BUILD.tpl")) - python_bin = get_python_bin(repository_ctx) - _check_python_bin(repository_ctx, python_bin) -diff --git third_party/py/numpy/BUILD third_party/py/numpy/BUILD -index 97c7907fc38..c80cc5287bc 100644 ---- third_party/py/numpy/BUILD -+++ third_party/py/numpy/BUILD -@@ -2,14 +2,15 @@ licenses(["restricted"]) +# python_bin = get_python_bin(repository_ctx) +# _check_python_bin(repository_ctx, python_bin) +# diff --git third_party/py/numpy/BUILD third_party/py/numpy/BUILD +# index 97c7907fc38..c80cc5287bc 100644 +# --- third_party/py/numpy/BUILD +# +++ third_party/py/numpy/BUILD +# @@ -2,14 +2,15 @@ licenses(["restricted"]) - package(default_visibility = ["//visibility:public"]) +# package(default_visibility = ["//visibility:public"]) --alias( -+py_library( - name = "numpy", -- actual = "@pypi_numpy//:pkg", -+ srcs = ["tf_numpy_dummy.py"], -+ srcs_version = "PY3", - ) +# -alias( +# +py_library( +# name = "numpy", +# - actual = "@pypi_numpy//:pkg", +# + srcs = ["tf_numpy_dummy.py"], +# + srcs_version = "PY3", +# ) - alias( - name = "headers", -- actual = "@pypi_numpy//:numpy_headers", -+ actual = "@local_config_python//:numpy_headers", - ) +# alias( +# name = "headers", +# - actual = "@pypi_numpy//:numpy_headers", +# + actual = "@local_config_python//:numpy_headers", +# ) - genrule( \ No newline at end of file +# genrule( \ No newline at end of file