From 39b1e678f9b26cbd551ca8927f30b8798b58573c Mon Sep 17 00:00:00 2001 From: Weiguo Ma Date: Thu, 14 Aug 2025 16:59:35 +0800 Subject: [PATCH 1/2] PR Draft for Qudit inferface --- check_all.sh | 0 result.txt | 1366 +++++++++++++++++ tensorcircuit/abstractcircuit.py | 255 +-- tensorcircuit/applications/layers.py | 10 +- tensorcircuit/applications/utils.py | 8 +- tensorcircuit/backends/abstract_backend.py | 56 + tensorcircuit/backends/cupy_backend.py | 2 +- tensorcircuit/backends/jax_backend.py | 8 +- tensorcircuit/backends/numpy_backend.py | 8 +- tensorcircuit/backends/pytorch_backend.py | 6 + tensorcircuit/backends/tensorflow_backend.py | 8 +- tensorcircuit/basecircuit.py | 263 +++- tensorcircuit/channels.py | 15 +- tensorcircuit/circuit.py | 150 +- tensorcircuit/cons.py | 10 +- tensorcircuit/densitymatrix.py | 22 +- tensorcircuit/gates/__init__.py | 358 +++++ .../{gates.py => gates/qubit_impl.py} | 352 ++--- tensorcircuit/gates/qudit_impl.py | 485 ++++++ tensorcircuit/mpscircuit.py | 177 ++- tensorcircuit/noisemodel.py | 7 + tensorcircuit/quantum.py | 438 ++++-- tensorcircuit/results/counts.py | 113 +- tensorcircuit/results/qem/qem_methods.py | 2 +- tensorcircuit/simplify.py | 4 +- tensorcircuit/stabilizercircuit.py | 11 +- tensorcircuit/templates/blocks.py | 6 +- tensorcircuit/templates/measurements.py | 4 +- tensorcircuit/translation.py | 14 +- test.ipynb | 122 ++ 30 files changed, 3561 insertions(+), 719 deletions(-) mode change 100644 => 100755 check_all.sh create mode 100644 result.txt create mode 100644 tensorcircuit/gates/__init__.py rename tensorcircuit/{gates.py => gates/qubit_impl.py} (79%) create mode 100644 tensorcircuit/gates/qudit_impl.py create mode 100644 test.ipynb diff --git a/check_all.sh b/check_all.sh old mode 100644 new mode 100755 diff --git a/result.txt b/result.txt new file mode 100644 index 00000000..bba50eff --- /dev/null +++ b/result.txt @@ -0,0 +1,1366 @@ +/opt/miniconda3/envs/testTC/lib/python3.12/site-packages/pytest_benchmark/logger.py:39: PytestBenchmarkWarning: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed reliably in a parallelized environment. + warner(PytestBenchmarkWarning(text)) +============================= test session starts ============================== +platform darwin -- Python 3.12.9, pytest-7.4.4, pluggy-1.6.0 +benchmark: 5.0.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) +rootdir: /Users/weiguoma/PyProgram/tensorcircuit-ng +configfile: pyproject.toml +plugins: benchmark-5.0.1, xdist-3.8.0, cov-6.2.1, lazy-fixture-0.6.3 +created: 14/14 workers +14 workers [772 items] + +...s.....ssss......s.........ss..s..s........s...s...............s..s.s. [ 9%] +s..s.......s.sss.....................F......F...........s.s.s.......s..s [ 18%] +.................s..................sss..s......s...s..sssssssssssssssss [ 27%] +sssssssssssssssss............................F.....sss.................. [ 37%] +.........s.....s..sssss................................................. [ 46%] +...................................................s.s.................. [ 55%] +s...............................s.F...F.....F..........FFFFFF..F........ [ 65%] +.........F...F.....................F................xs.................. [ 74%] +..ssssssssssss....sss................sss...................F.F..s....... [ 83%] +................................F....................................... [ 93%] +...............................................s...s [100%] +=================================== FAILURES =================================== +__________________________ test_circuit_to_json[tfb] ___________________________ +[gw4] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_to_json(backend): + c = tc.Circuit(3) + c.h(0) + c.CNOT(1, 2) + c.rxx(0, 2, theta=0.3) + c.crx(0, 1, theta=-0.8) + c.r(1, theta=tc.backend.ones([]), alpha=0.2) + c.toffoli(0, 2, 1) + c.ccnot(0, 1, 2) + c.multicontrol(1, 2, 0, ctrl=[0, 1], unitary=tc.gates._x_matrix) +> s = c.to_json() + +tests/test_circuit.py:1497: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tensorcircuit/abstractcircuit.py:1047: in to_json + return json.dumps(tcqasm) +/opt/miniconda3/envs/testTC/lib/python3.12/json/__init__.py:231: in dumps + return _default_encoder.encode(obj) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:200: in encode + chunks = self.iterencode(o, _one_shot=True) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:258: in iterencode + return _iterencode(o, 0) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +o = np.float32(1.5707964) + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + # Let the base class default method raise the TypeError + return super().default(o) + + """ +> raise TypeError(f'Object of type {o.__class__.__name__} ' + f'is not JSON serializable') +E TypeError: Object of type float32 is not JSON serializable + +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:180: TypeError +------------------------------ Captured log call ------------------------------- +WARNING tensorcircuit.translation:translation.py:654 experimental feature subject to fast protocol and implementation change, try on your own risk +__________________________ test_circuit_to_json[jaxb] __________________________ +[gw4] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_to_json(backend): + c = tc.Circuit(3) + c.h(0) + c.CNOT(1, 2) + c.rxx(0, 2, theta=0.3) + c.crx(0, 1, theta=-0.8) + c.r(1, theta=tc.backend.ones([]), alpha=0.2) + c.toffoli(0, 2, 1) + c.ccnot(0, 1, 2) + c.multicontrol(1, 2, 0, ctrl=[0, 1], unitary=tc.gates._x_matrix) +> s = c.to_json() + +tests/test_circuit.py:1497: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tensorcircuit/abstractcircuit.py:1047: in to_json + return json.dumps(tcqasm) +/opt/miniconda3/envs/testTC/lib/python3.12/json/__init__.py:231: in dumps + return _default_encoder.encode(obj) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:200: in encode + chunks = self.iterencode(o, _one_shot=True) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:258: in iterencode + return _iterencode(o, 0) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +o = np.float32(1.5707964) + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + # Let the base class default method raise the TypeError + return super().default(o) + + """ +> raise TypeError(f'Object of type {o.__class__.__name__} ' + f'is not JSON serializable') +E TypeError: Object of type float32 is not JSON serializable + +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:180: TypeError +------------------------------ Captured log call ------------------------------- +WARNING tensorcircuit.translation:translation.py:654 experimental feature subject to fast protocol and implementation change, try on your own risk +__________________________ test_mult_qubit_kraus[tfb] __________________________ +[gw9] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("jaxb"), lf("tfb")]) + def test_mult_qubit_kraus(backend): + xx = np.array( + [[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]], dtype=np.complex64 + ) / np.sqrt(2) + yz = np.array( + [[0, 0, -1.0j, 0], [0, 0, 0, 1.0j], [1.0j, 0, 0, 0], [0, -1.0j, 0, 0]], + dtype=np.complex64, + ) / np.sqrt(2) + + def forward(theta): + c = tc.DMCircuit_reference(3) + c.H(0) + c.rx(1, theta=theta) + c.apply_general_kraus( + [ + tc.gates.Gate(xx.reshape([2, 2, 2, 2])), + tc.gates.Gate(yz.reshape([2, 2, 2, 2])), + ], + [(0, 1), (0, 1)], + ) + c.H(1) + return tc.backend.real(tc.backend.sum(c.densitymatrix())) + + theta = tc.num_to_tensor(0.2) + vg = tc.backend.value_and_grad(forward) +> _, g1 = vg(theta) + +tests/test_dmcircuit.py:156: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tensorcircuit/backends/tensorflow_backend.py:851: in wrapper + y = f(*args, **kws) +tests/test_dmcircuit.py:144: in forward + c.apply_general_kraus( +tensorcircuit/densitymatrix.py:221: in apply_general_kraus + dd = dmc.densitymatrix() +tensorcircuit/densitymatrix.py:257: in densitymatrix + nodes, d_edges = self._copy_dm_tensor(conj=False, reuse=reuse) +tensorcircuit/basecircuit.py:282: in _copy_state_tensor + t = contractor(nodes, output_edge_order=d_edges) +tensorcircuit/cons.py:755: in custom + return _base(nodes, alg, output_edge_order, ignore_edge_order) +tensorcircuit/cons.py:681: in _base + new_node = tn.contract_between(nodes[a], nodes[b], allow_outer_product=True) +/opt/miniconda3/envs/testTC/lib/python3.12/site-packages/tensornetwork/network_components.py:2232: in contract_between + new_tensor = backend.tensordot(node1.tensor, node2.tensor, [axes1, axes2]) +tensorcircuit/backends/tensorflow_backend.py:78: in _tensordot_tf + return tf.tensordot(a, b, axes) +/opt/miniconda3/envs/testTC/lib/python3.12/site-packages/tensorflow/python/util/traceback_utils.py:153: in error_handler + raise e.with_traceback(filtered_tb) from None +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +e = _NotOkStatusException(), name = None + + def raise_from_not_ok_status(e, name) -> NoReturn: + e.message += (" name: " + str(name if name is not None else "")) +> raise core._status_to_exception(e) from None # pylint: disable=protected-access +E tensorflow.python.framework.errors_impl.InvalidArgumentError: cannot compute MatMul as input #1(zero-based) was expected to be a complex64 tensor but is a complex128 tensor [Op:MatMul] name: + +/opt/miniconda3/envs/testTC/lib/python3.12/site-packages/tensorflow/python/framework/ops.py:6006: InvalidArgumentError +___________________________ test_circuits[tfb-highp] ___________________________ +[gw4] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None, dtype = None + + @pytest.mark.parametrize( + "backend, dtype", [(lf("tfb"), lf("highp")), (lf("jaxb"), lf("highp"))] + ) + def test_circuits(backend, dtype): + circuits = get_test_circuits() + do_test_canonical(circuits) + do_test_wavefunction(circuits) + do_test_truncation(circuits, 0.902663090851, 0.910305380327) + do_test_amplitude(circuits) +> do_test_expectation(circuits) + +tests/test_mpscircuit.py:304: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tests/test_mpscircuit.py:170: in do_test_expectation + exp_mps = mps_exact.expectation(*gates) +tensorcircuit/mpscircuit.py:972: in expectation + mps = self.copy() +tensorcircuit/mpscircuit.py:821: in copy + result = self.copy_without_tensor() +tensorcircuit/mpscircuit.py:810: in copy_without_tensor + copied_value = copy(info[key]) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = + + def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + if issubclass(cls, type): + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier is not None: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor is not None: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: +> rv = reductor(4) +E TypeError: cannot pickle 'module' object + +/opt/miniconda3/envs/testTC/lib/python3.12/copy.py:87: TypeError +----------------------------- Captured stdout call ----------------------------- +0.9026630908518938 0.9103053803271294 +______________________ test_simple_circuits_ad[tfb-highp] ______________________ +[gw2] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None, dtype = None + + @pytest.mark.parametrize( + "backend, dtype", [(lf("tfb"), lf("highp")), (lf("jaxb"), lf("highp"))] + ) + def test_simple_circuits_ad(backend, dtype): + def expec(params): + mps = tc.MPSCircuit(N, split=split) + mps.rx(0, theta=params[0]) + mps.cx(0, 1) + mps.cx(1, 2) + mps.ry(2, theta=params[1]) + mps.rzz(1, 3, theta=params[2]) + x = [0, 2] + z = [1] + exp = mps.expectation_ps(x=x, z=z) + return tc.backend.real(exp) + + params = tc.backend.ones((3,), dtype=tc.dtypestr) + expec_vg = tc.backend.value_and_grad(expec) + expec_vg_jit = tc.backend.jit(expec_vg) +> exp = expec(params) + +tests/test_mpscircuit.py:362: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tests/test_mpscircuit.py:356: in expec + exp = mps.expectation_ps(x=x, z=z) +tensorcircuit/abstractcircuit.py:1392: in expectation_ps + return self.expectation( +tensorcircuit/mpscircuit.py:972: in expectation + mps = self.copy() +tensorcircuit/mpscircuit.py:821: in copy + result = self.copy_without_tensor() +tensorcircuit/mpscircuit.py:810: in copy_without_tensor + copied_value = copy(info[key]) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = + + def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + if issubclass(cls, type): + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier is not None: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor is not None: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: +> rv = reductor(4) +E TypeError: cannot pickle 'module' object + +/opt/miniconda3/envs/testTC/lib/python3.12/copy.py:87: TypeError +_________________________ test_distrubuted_contractor __________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +jaxb = None + + def test_distrubuted_contractor(jaxb): + def nodes_fn(params): + c = tc.Circuit(4) + c.rx(range(4), theta=params["x"]) + c.cnot([0, 1, 2], [1, 2, 3]) + c.ry(range(4), theta=params["y"]) + return c.expectation_before([tc.gates.z(), [-1]], reuse=False) + + params = {"x": np.ones([4]), "y": 0.3 * np.ones([4])} +> dc = experimental.DistributedContractor( + nodes_fn, + params, + { + "slicing_reconf_opts": {"target_size": 2**3}, + "max_repeats": 8, + "minimize": "write", + "parallel": False, + }, + ) + +tests/test_miscs.py:264: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +nodes_fn = .nodes_fn at 0x34f1a0720> +params = {'x': array([1., 1., 1., 1.]), 'y': array([0.3, 0.3, 0.3, 0.3])} +cotengra_options = {'max_repeats': 8, 'minimize': 'write', 'parallel': False, 'slicing_reconf_opts': {'target_size': 8}} +devices = None + + def __init__( + self, + nodes_fn: Callable[[Tensor], List[Gate]], + params: Tensor, + cotengra_options: Optional[Dict[str, Any]] = None, + devices: Optional[List[Any]] = None, + ) -> None: + global jaxlib + global ctg + + logger.info("Initializing DistributedContractor...") +> import cotengra as ctg +E ModuleNotFoundError: No module named 'cotengra' + +tensorcircuit/experimental.py:531: ModuleNotFoundError +_________________________ test_benchmark_circuits[tfb] _________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_benchmark_circuits(backend): + # QAOA + graph = [(2, 0), (0, 3), (1, 2)] + weight = [1] * len(graph) + params = np.array([[1, 1]]) + + _ = benchmark_circuits.QAOA_circuit(graph, weight, params) + + # mirror circuit + # return circuit and ideal counts {"01000":1} +> _, _ = benchmark_circuits.mirror_circuit( + depth=5, two_qubit_gate_prob=1, connectivity_graph=nx.complete_graph(3), seed=20 + ) + +tests/test_qem.py:31: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +depth = 5, two_qubit_gate_prob = 1 +connectivity_graph = +seed = 20, two_qubit_gate_name = 'CNOT' + + def mirror_circuit( + depth: int, + two_qubit_gate_prob: float, + connectivity_graph: nx.Graph, + seed: int, + two_qubit_gate_name: str = "CNOT", + ) -> Tuple[Any, Dict[str, float]]: + # Measuring the Capabilities of Quantum Computers https://arxiv.org/pdf/2008.11294.pdf +> cirq, bitstring_list = generate_mirror_circuit( + nlayers=depth, + two_qubit_gate_prob=two_qubit_gate_prob, + connectivity_graph=connectivity_graph, + two_qubit_gate_name=two_qubit_gate_name, + seed=seed, + ) +E NameError: name 'generate_mirror_circuit' is not defined + +tensorcircuit/results/qem/benchmark_circuits.py:67: NameError +________________________ test_benchmark_circuits[jaxb] _________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_benchmark_circuits(backend): + # QAOA + graph = [(2, 0), (0, 3), (1, 2)] + weight = [1] * len(graph) + params = np.array([[1, 1]]) + + _ = benchmark_circuits.QAOA_circuit(graph, weight, params) + + # mirror circuit + # return circuit and ideal counts {"01000":1} +> _, _ = benchmark_circuits.mirror_circuit( + depth=5, two_qubit_gate_prob=1, connectivity_graph=nx.complete_graph(3), seed=20 + ) + +tests/test_qem.py:31: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +depth = 5, two_qubit_gate_prob = 1 +connectivity_graph = +seed = 20, two_qubit_gate_name = 'CNOT' + + def mirror_circuit( + depth: int, + two_qubit_gate_prob: float, + connectivity_graph: nx.Graph, + seed: int, + two_qubit_gate_name: str = "CNOT", + ) -> Tuple[Any, Dict[str, float]]: + # Measuring the Capabilities of Quantum Computers https://arxiv.org/pdf/2008.11294.pdf +> cirq, bitstring_list = generate_mirror_circuit( + nlayers=depth, + two_qubit_gate_prob=two_qubit_gate_prob, + connectivity_graph=connectivity_graph, + two_qubit_gate_name=two_qubit_gate_name, + seed=seed, + ) +E NameError: name 'generate_mirror_circuit' is not defined + +tensorcircuit/results/qem/benchmark_circuits.py:67: NameError +________________________________ test_zne[tfb] _________________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_zne(backend): + c = tc.Circuit(2) + for _ in range(3): + c.rx(range(2), theta=0.4) + + error1 = tc.channels.generaldepolarizingchannel(0.01, 1) + noise_conf = NoiseConf() + noise_conf.add_noise("rx", error1) + + def execute(circuit): + value = circuit.expectation_ps(z=[0], noise_conf=noise_conf, nmc=10000) + return value + + random_state = np.random.RandomState(0) + noise_scaling_function = partial( +> zne_option.scaling.fold_gates_at_random, + # fidelities = {"single": 1.0}, + random_state=random_state, + ) +E AttributeError: 'NoneType' object has no attribute 'scaling' + +tests/test_qem.py:62: AttributeError +________________________________ test_zne[jaxb] ________________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_zne(backend): + c = tc.Circuit(2) + for _ in range(3): + c.rx(range(2), theta=0.4) + + error1 = tc.channels.generaldepolarizingchannel(0.01, 1) + noise_conf = NoiseConf() + noise_conf.add_noise("rx", error1) + + def execute(circuit): + value = circuit.expectation_ps(z=[0], noise_conf=noise_conf, nmc=10000) + return value + + random_state = np.random.RandomState(0) + noise_scaling_function = partial( +> zne_option.scaling.fold_gates_at_random, + # fidelities = {"single": 1.0}, + random_state=random_state, + ) +E AttributeError: 'NoneType' object has no attribute 'scaling' + +tests/test_qem.py:62: AttributeError +_________________________________ test_dd[tfb] _________________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_dd(backend): + c = tc.Circuit(2) + for _ in range(3): + c.rx(range(2), theta=0.4) + + def execute(circuit): + value = circuit.expectation_ps(z=[0]) + return value + + def execute2(circuit): + key = tc.backend.get_random_state(42) + count = circuit.sample( + batch=1000, allow_state=True, format_="count_dict_bin", random_generator=key + ) + return count + +> _ = apply_dd( + circuit=c, + executor=execute, + rule=["X", "X"], + rule_args={"spacing": -1}, + full_output=True, + ignore_idle_qubit=True, + fulldd=False, + ) + +tests/test_qem.py:102: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +circuit = +executor = .execute at 0x34ef9f2e0>, rule = ['X', 'X'] +rule_args = {'spacing': -1}, num_trials = 1, full_output = True +ignore_idle_qubit = True, fulldd = False, iscount = False + + def apply_dd( + circuit: Any, + executor: Callable[[Any], Any], + rule: Union[Callable[[int], Any], List[str]], + rule_args: Optional[Dict[str, Any]] = None, + num_trials: int = 1, + full_output: bool = False, + ignore_idle_qubit: bool = True, + fulldd: bool = False, + iscount: bool = False, + ) -> Union[ + float, Tuple[float, List[Any]], Dict[str, float], Tuple[Dict[str, float], List[Any]] + ]: + """ + Apply dynamic decoupling (DD) and return the mitigated results. + + + :param circuit: The aim circuit. + :type circuit: Any + :param executor: A executor that executes a circuit and return results. + :type executor: Callable[[Any], Any] + :param rule: The rule to construct DD sequence, can use default rule "dd_option.rules.xx" + or custom rule "['X','X']" + :type rule: Union[Callable[[int], Any], List[str]] + :param rule_args:An optional dictionary of keyword arguments for ``rule``, defaults to {}. + :type rule_args: Dict[str, Any], optional + :param num_trials: The number of independent experiments to average over, defaults to 1 + :type num_trials: int, optional + :param full_output: If ``False`` only the mitigated expectation value is + returned. If ``True`` a dictionary containing all DD data is + returned too, defaults to False + :type full_output: bool, optional + :param ig_idle_qubit: ignore the DD sequences that added to unused qubits, defaults to True + :type ig_idle_qubit: bool, optional + :param fulldd: dd sequence full fill the idle circuits, defaults to False + :type fulldd: bool, optional + :param iscount: whether the output is bit string, defaults to False + :type iscount: bool, optional + :return: mitigated expectation value or mitigated expectation value and DD circuit information + :rtype: Union[float, Tuple[float, Dict[str, Any]]] + """ + if rule_args is None: + rule_args = {} + + def dd_rule(slack_length: int, spacing: int = -1) -> Any: + """ + Set DD rule. + + :param slack_length: Length of idle window to fill. Automatically calculated for a circuit. + :type slack_length: int + :param spacing: How many identity spacing gates to apply between dynamical + decoupling gates, defaults to -1 + :type spacing: int, optional + """ + dd_sequence = dd_option.rules.general_rule( + slack_length=slack_length, + spacing=spacing, + gates=gates, + ) + return dd_sequence + + if isinstance(rule, list): +> import cirq +E ModuleNotFoundError: No module named 'cirq' + +tensorcircuit/results/qem/qem_methods.py:207: ModuleNotFoundError +________________________________ test_dd[jaxb] _________________________________ +[gw0] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("tfb"), lf("jaxb")]) + def test_dd(backend): + c = tc.Circuit(2) + for _ in range(3): + c.rx(range(2), theta=0.4) + + def execute(circuit): + value = circuit.expectation_ps(z=[0]) + return value + + def execute2(circuit): + key = tc.backend.get_random_state(42) + count = circuit.sample( + batch=1000, allow_state=True, format_="count_dict_bin", random_generator=key + ) + return count + +> _ = apply_dd( + circuit=c, + executor=execute, + rule=["X", "X"], + rule_args={"spacing": -1}, + full_output=True, + ignore_idle_qubit=True, + fulldd=False, + ) + +tests/test_qem.py:102: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +circuit = +executor = .execute at 0x34f595440>, rule = ['X', 'X'] +rule_args = {'spacing': -1}, num_trials = 1, full_output = True +ignore_idle_qubit = True, fulldd = False, iscount = False + + def apply_dd( + circuit: Any, + executor: Callable[[Any], Any], + rule: Union[Callable[[int], Any], List[str]], + rule_args: Optional[Dict[str, Any]] = None, + num_trials: int = 1, + full_output: bool = False, + ignore_idle_qubit: bool = True, + fulldd: bool = False, + iscount: bool = False, + ) -> Union[ + float, Tuple[float, List[Any]], Dict[str, float], Tuple[Dict[str, float], List[Any]] + ]: + """ + Apply dynamic decoupling (DD) and return the mitigated results. + + + :param circuit: The aim circuit. + :type circuit: Any + :param executor: A executor that executes a circuit and return results. + :type executor: Callable[[Any], Any] + :param rule: The rule to construct DD sequence, can use default rule "dd_option.rules.xx" + or custom rule "['X','X']" + :type rule: Union[Callable[[int], Any], List[str]] + :param rule_args:An optional dictionary of keyword arguments for ``rule``, defaults to {}. + :type rule_args: Dict[str, Any], optional + :param num_trials: The number of independent experiments to average over, defaults to 1 + :type num_trials: int, optional + :param full_output: If ``False`` only the mitigated expectation value is + returned. If ``True`` a dictionary containing all DD data is + returned too, defaults to False + :type full_output: bool, optional + :param ig_idle_qubit: ignore the DD sequences that added to unused qubits, defaults to True + :type ig_idle_qubit: bool, optional + :param fulldd: dd sequence full fill the idle circuits, defaults to False + :type fulldd: bool, optional + :param iscount: whether the output is bit string, defaults to False + :type iscount: bool, optional + :return: mitigated expectation value or mitigated expectation value and DD circuit information + :rtype: Union[float, Tuple[float, Dict[str, Any]]] + """ + if rule_args is None: + rule_args = {} + + def dd_rule(slack_length: int, spacing: int = -1) -> Any: + """ + Set DD rule. + + :param slack_length: Length of idle window to fill. Automatically calculated for a circuit. + :type slack_length: int + :param spacing: How many identity spacing gates to apply between dynamical + decoupling gates, defaults to -1 + :type spacing: int, optional + """ + dd_sequence = dd_option.rules.general_rule( + slack_length=slack_length, + spacing=spacing, + gates=gates, + ) + return dd_sequence + + if isinstance(rule, list): +> import cirq +E ModuleNotFoundError: No module named 'cirq' + +tensorcircuit/results/qem/qem_methods.py:207: ModuleNotFoundError +_____________________ test_simple_circuits_ad[jaxb-highp] ______________________ +[gw2] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None, dtype = None + + @pytest.mark.parametrize( + "backend, dtype", [(lf("tfb"), lf("highp")), (lf("jaxb"), lf("highp"))] + ) + def test_simple_circuits_ad(backend, dtype): + def expec(params): + mps = tc.MPSCircuit(N, split=split) + mps.rx(0, theta=params[0]) + mps.cx(0, 1) + mps.cx(1, 2) + mps.ry(2, theta=params[1]) + mps.rzz(1, 3, theta=params[2]) + x = [0, 2] + z = [1] + exp = mps.expectation_ps(x=x, z=z) + return tc.backend.real(exp) + + params = tc.backend.ones((3,), dtype=tc.dtypestr) + expec_vg = tc.backend.value_and_grad(expec) + expec_vg_jit = tc.backend.jit(expec_vg) +> exp = expec(params) + +tests/test_mpscircuit.py:362: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tests/test_mpscircuit.py:356: in expec + exp = mps.expectation_ps(x=x, z=z) +tensorcircuit/abstractcircuit.py:1392: in expectation_ps + return self.expectation( +tensorcircuit/mpscircuit.py:972: in expectation + mps = self.copy() +tensorcircuit/mpscircuit.py:821: in copy + result = self.copy_without_tensor() +tensorcircuit/mpscircuit.py:810: in copy_without_tensor + copied_value = copy(info[key]) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = + + def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + if issubclass(cls, type): + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier is not None: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor is not None: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: +> rv = reductor(4) +E TypeError: cannot pickle 'module' object + +/opt/miniconda3/envs/testTC/lib/python3.12/copy.py:87: TypeError +_________________________ test_circuit_inverse_2[npb] __________________________ +[gw6] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_inverse_2(backend): + inputs = np.random.uniform(size=[8]) + inputs /= np.linalg.norm(inputs) + c = tc.Circuit(3, inputs=inputs) + c.iswap(0, 1) + c.iswap(1, 0, theta=0.6) + c.rxx(1, 2, theta=-0.2) + c.cu(0, 1, lbd=2.0, theta=-0.7) + c.r(2, alpha=0.3) + c.sd(2) + c.cx(1, 2) + c.unitary(0, unitary=tc.gates._x_matrix) + c1 = c.inverse() + c.append(c1) + print(c.draw()) +> np.testing.assert_allclose(c.state(), inputs, atol=1e-5) + +tests/test_circuit.py:1381: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +args = (.compare at 0x3472151c0>, array([ 0.53586936-0.05293488j, -0.45244357+0.04615141j, +...lex64), array([0.54845657, 0.47309895, 0.28714183, 0.32934674, 0.09612444, + 0.01416939, 0.23025785, 0.47116208])) +kwds = {'equal_nan': True, 'err_msg': '', 'header': 'Not equal to tolerance rtol=1e-07, atol=1e-05', 'strict': False, ...} + + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): +> return func(*args, **kwds) +E AssertionError: +E Not equal to tolerance rtol=1e-07, atol=1e-05 +E +E Mismatched elements: 8 / 8 (100%) +E Max absolute difference among violations: 0.92669246 +E Max relative difference among violations: 7.00521395 +E ACTUAL: array([ 0.535869-0.052935j, -0.452444+0.046151j, 0.336438-0.07604j , +E -0.34967 +0.088152j, 0.149454-0.075728j, -0.077933+0.037009j, +E 0.187209-0.002277j, -0.428239+0.01545j ], dtype=complex64) +E DESIRED: array([0.548457, 0.473099, 0.287142, 0.329347, 0.096124, 0.014169, +E 0.230258, 0.471162]) + +/opt/miniconda3/envs/testTC/lib/python3.12/contextlib.py:81: AssertionError +----------------------------- Captured stdout call ----------------------------- + ┌───────────────┐┌───────────────────┐ » +q_0: ┤0 ├┤1 ├──────────────────────■────────» + │ (XX+YY)(π,π) ││ (XX+YY)(1.885,π) │┌────────────┐┌───────┴───────┐» +q_1: ┤1 ├┤0 ├┤0 ├┤ U(-0.7,0,2,0) ├» + └───────────────┘└───────────────────┘│ Rxx(-0.2) │└─────┬───┬─────┘» +q_2: ──────────────────────────────────────┤1 ├──────┤ r ├──────» + └────────────┘ └───┘ » +« ┌─────┐┌─────┐ » +«q_0: ┤ any ├┤ any ├─────────────■──────────────────────────» +« └─────┘└─────┘ ┌───────┴───────┐ ┌───────────┐» +«q_1: ──────────■─────■──┤ U(0.7,-2,0,0) ├─────┤0 ├» +« ┌─────┐ ┌─┴─┐ ┌─┴─┐└────┬─────┬────┘┌───┐│ Rxx(0.2) │» +«q_2: ┤ Sdg ├─┤ X ├─┤ X ├─────┤ Sdg ├─────┤ r ├┤1 ├» +« └─────┘ └───┘ └───┘ └─────┘ └───┘└───────────┘» +« ┌────────────────────┐┌────────────────┐ +«q_0: ┤1 ├┤0 ├ +« │ (XX+YY)(-1.885,π) ││ (XX+YY)(-π,π) │ +«q_1: ┤0 ├┤1 ├ +« └────────────────────┘└────────────────┘ +«q_2: ──────────────────────────────────────── +« +_________________________ test_circuit_inverse_2[tfb] __________________________ +[gw6] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_inverse_2(backend): + inputs = np.random.uniform(size=[8]) + inputs /= np.linalg.norm(inputs) + c = tc.Circuit(3, inputs=inputs) + c.iswap(0, 1) + c.iswap(1, 0, theta=0.6) + c.rxx(1, 2, theta=-0.2) + c.cu(0, 1, lbd=2.0, theta=-0.7) + c.r(2, alpha=0.3) + c.sd(2) + c.cx(1, 2) + c.unitary(0, unitary=tc.gates._x_matrix) + c1 = c.inverse() + c.append(c1) + print(c.draw()) +> np.testing.assert_allclose(c.state(), inputs, atol=1e-5) + +tests/test_circuit.py:1381: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +args = (.compare at 0x346d642c0>, array([ 0.2817256 -0.01330288j, -0.01777708+0.04259015j, +...lex64), array([0.33836018, 0.06513215, 0.26498456, 0.082767 , 0.39440637, + 0.42723191, 0.4829877 , 0.48253745])) +kwds = {'equal_nan': True, 'err_msg': '', 'header': 'Not equal to tolerance rtol=1e-07, atol=1e-05', 'strict': False, ...} + + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): +> return func(*args, **kwds) +E AssertionError: +E Not equal to tolerance rtol=1e-07, atol=1e-05 +E +E Mismatched elements: 8 / 8 (100%) +E Max absolute difference among violations: 0.92668351 +E Max relative difference among violations: 2.74141589 +E ACTUAL: array([ 0.281726-0.013303j, -0.017777+0.04259j , 0.316051-0.010468j, +E -0.137518+0.054384j, 0.39415 -0.077557j, -0.458228+0.077629j, +E 0.463695-0.068668j, -0.441975+0.063392j], dtype=complex64) +E DESIRED: array([0.33836 , 0.065132, 0.264985, 0.082767, 0.394406, 0.427232, +E 0.482988, 0.482537]) + +/opt/miniconda3/envs/testTC/lib/python3.12/contextlib.py:81: AssertionError +----------------------------- Captured stdout call ----------------------------- + ┌───────────────┐┌───────────────────┐ » +q_0: ┤0 ├┤1 ├──────────────────────■────────» + │ (XX+YY)(π,π) ││ (XX+YY)(1.885,π) │┌────────────┐┌───────┴───────┐» +q_1: ┤1 ├┤0 ├┤0 ├┤ U(-0.7,0,2,0) ├» + └───────────────┘└───────────────────┘│ Rxx(-0.2) │└─────┬───┬─────┘» +q_2: ──────────────────────────────────────┤1 ├──────┤ r ├──────» + └────────────┘ └───┘ » +« ┌─────┐┌─────┐ » +«q_0: ┤ any ├┤ any ├─────────────■──────────────────────────» +« └─────┘└─────┘ ┌───────┴───────┐ ┌───────────┐» +«q_1: ──────────■─────■──┤ U(0.7,-2,0,0) ├─────┤0 ├» +« ┌─────┐ ┌─┴─┐ ┌─┴─┐└────┬─────┬────┘┌───┐│ Rxx(0.2) │» +«q_2: ┤ Sdg ├─┤ X ├─┤ X ├─────┤ Sdg ├─────┤ r ├┤1 ├» +« └─────┘ └───┘ └───┘ └─────┘ └───┘└───────────┘» +« ┌────────────────────┐┌────────────────┐ +«q_0: ┤1 ├┤0 ├ +« │ (XX+YY)(-1.885,π) ││ (XX+YY)(-π,π) │ +«q_1: ┤0 ├┤1 ├ +« └────────────────────┘└────────────────┘ +«q_2: ──────────────────────────────────────── +« +_________________________ test_circuit_inverse_2[jaxb] _________________________ +[gw6] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_inverse_2(backend): + inputs = np.random.uniform(size=[8]) + inputs /= np.linalg.norm(inputs) + c = tc.Circuit(3, inputs=inputs) + c.iswap(0, 1) + c.iswap(1, 0, theta=0.6) + c.rxx(1, 2, theta=-0.2) + c.cu(0, 1, lbd=2.0, theta=-0.7) + c.r(2, alpha=0.3) + c.sd(2) + c.cx(1, 2) + c.unitary(0, unitary=tc.gates._x_matrix) + c1 = c.inverse() + c.append(c1) + print(c.draw()) +> np.testing.assert_allclose(c.state(), inputs, atol=1e-5) + +tests/test_circuit.py:1381: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +args = (.compare at 0x346d658a0>, array([-0.05571775-0.04894246j, -0.37061656+0.05728193j, +...lex64), array([0.00174632, 0.42543002, 0.35639291, 0.30450696, 0.39677372, + 0.49179465, 0.44586219, 0.03437152])) +kwds = {'equal_nan': True, 'err_msg': '', 'header': 'Not equal to tolerance rtol=1e-07, atol=1e-05', 'strict': False, ...} + + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): +> return func(*args, **kwds) +E AssertionError: +E Not equal to tolerance rtol=1e-07, atol=1e-05 +E +E Mismatched elements: 8 / 8 (100%) +E Max absolute difference among violations: 0.97662276 +E Max relative difference among violations: 43.22322355 +E ACTUAL: array([-0.055718-0.048942j, -0.370617+0.057282j, 0.353303-0.068378j, +E -0.350503+0.000281j, 0.438544-0.005524j, -0.482195+0.071662j, +E 0.401416-0.079045j, 0.007931+0.063772j], dtype=complex64) +E DESIRED: array([0.001746, 0.42543 , 0.356393, 0.304507, 0.396774, 0.491795, +E 0.445862, 0.034372]) + +/opt/miniconda3/envs/testTC/lib/python3.12/contextlib.py:81: AssertionError +----------------------------- Captured stdout call ----------------------------- + ┌───────────────┐┌───────────────────┐ » +q_0: ┤0 ├┤1 ├──────────────────────■────────» + │ (XX+YY)(π,π) ││ (XX+YY)(1.885,π) │┌────────────┐┌───────┴───────┐» +q_1: ┤1 ├┤0 ├┤0 ├┤ U(-0.7,0,2,0) ├» + └───────────────┘└───────────────────┘│ Rxx(-0.2) │└─────┬───┬─────┘» +q_2: ──────────────────────────────────────┤1 ├──────┤ r ├──────» + └────────────┘ └───┘ » +« ┌─────┐┌─────┐ » +«q_0: ┤ any ├┤ any ├─────────────■──────────────────────────» +« └─────┘└─────┘ ┌───────┴───────┐ ┌───────────┐» +«q_1: ──────────■─────■──┤ U(0.7,-2,0,0) ├─────┤0 ├» +« ┌─────┐ ┌─┴─┐ ┌─┴─┐└────┬─────┬────┘┌───┐│ Rxx(0.2) │» +«q_2: ┤ Sdg ├─┤ X ├─┤ X ├─────┤ Sdg ├─────┤ r ├┤1 ├» +« └─────┘ └───┘ └───┘ └─────┘ └───┘└───────────┘» +« ┌────────────────────┐┌────────────────┐ +«q_0: ┤1 ├┤0 ├ +« │ (XX+YY)(-1.885,π) ││ (XX+YY)(-π,π) │ +«q_1: ┤0 ├┤1 ├ +« └────────────────────┘└────────────────┘ +«q_2: ──────────────────────────────────────── +« +__________________________ test_circuits[jaxb-highp] ___________________________ +[gw4] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None, dtype = None + + @pytest.mark.parametrize( + "backend, dtype", [(lf("tfb"), lf("highp")), (lf("jaxb"), lf("highp"))] + ) + def test_circuits(backend, dtype): + circuits = get_test_circuits() + do_test_canonical(circuits) + do_test_wavefunction(circuits) + do_test_truncation(circuits, 0.902663090851, 0.910305380327) + do_test_amplitude(circuits) +> do_test_expectation(circuits) + +tests/test_mpscircuit.py:304: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tests/test_mpscircuit.py:170: in do_test_expectation + exp_mps = mps_exact.expectation(*gates) +tensorcircuit/mpscircuit.py:972: in expectation + mps = self.copy() +tensorcircuit/mpscircuit.py:821: in copy + result = self.copy_without_tensor() +tensorcircuit/mpscircuit.py:810: in copy_without_tensor + copied_value = copy(info[key]) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = + + def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + if issubclass(cls, type): + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier is not None: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor is not None: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: +> rv = reductor(4) +E TypeError: cannot pickle 'module' object + +/opt/miniconda3/envs/testTC/lib/python3.12/copy.py:87: TypeError +----------------------------- Captured stdout call ----------------------------- +0.9026630908518822 0.9103053803271302 +_________________________ test_circuits_jit[tfb-highp] _________________________ +[gw4] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None, dtype = None + + @pytest.mark.parametrize("backend, dtype", [(lf("tfb"), lf("highp"))]) + def test_circuits_jit(backend, dtype): + def expec(params): + mps = tc.MPSCircuit(N, split=split) + simulate(mps, check=False, params=params) + x = [0, 2] + y = [5, 3, 1] + z = [6, 4] + exp = mps.expectation_ps(x=x, y=y, z=z) + return tc.backend.real(exp) + + params = tc.backend.ones((3,), dtype=tc.dtypestr) + expec_vg = tc.backend.value_and_grad(expec) + expec_vg_jit = tc.backend.jit(expec_vg) +> exp = expec(params) + +tests/test_mpscircuit.py:327: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tests/test_mpscircuit.py:321: in expec + exp = mps.expectation_ps(x=x, y=y, z=z) +tensorcircuit/abstractcircuit.py:1392: in expectation_ps + return self.expectation( +tensorcircuit/mpscircuit.py:972: in expectation + mps = self.copy() +tensorcircuit/mpscircuit.py:821: in copy + result = self.copy_without_tensor() +tensorcircuit/mpscircuit.py:810: in copy_without_tensor + copied_value = copy(info[key]) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +x = + + def copy(x): + """Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + """ + + cls = type(x) + + copier = _copy_dispatch.get(cls) + if copier: + return copier(x) + + if issubclass(cls, type): + # treat it as a regular class: + return _copy_immutable(x) + + copier = getattr(cls, "__copy__", None) + if copier is not None: + return copier(x) + + reductor = dispatch_table.get(cls) + if reductor is not None: + rv = reductor(x) + else: + reductor = getattr(x, "__reduce_ex__", None) + if reductor is not None: +> rv = reductor(4) +E TypeError: cannot pickle 'module' object + +/opt/miniconda3/envs/testTC/lib/python3.12/copy.py:87: TypeError +__________________________ test_circuit_to_json[npb] ___________________________ +[gw7] darwin -- Python 3.12.9 /opt/miniconda3/envs/testTC/bin/python3.12 + +backend = None + + @pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) + def test_circuit_to_json(backend): + c = tc.Circuit(3) + c.h(0) + c.CNOT(1, 2) + c.rxx(0, 2, theta=0.3) + c.crx(0, 1, theta=-0.8) + c.r(1, theta=tc.backend.ones([]), alpha=0.2) + c.toffoli(0, 2, 1) + c.ccnot(0, 1, 2) + c.multicontrol(1, 2, 0, ctrl=[0, 1], unitary=tc.gates._x_matrix) +> s = c.to_json() + +tests/test_circuit.py:1497: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +tensorcircuit/abstractcircuit.py:1047: in to_json + return json.dumps(tcqasm) +/opt/miniconda3/envs/testTC/lib/python3.12/json/__init__.py:231: in dumps + return _default_encoder.encode(obj) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:200: in encode + chunks = self.iterencode(o, _one_shot=True) +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:258: in iterencode + return _iterencode(o, 0) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +o = np.float32(1.5707964) + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + # Let the base class default method raise the TypeError + return super().default(o) + + """ +> raise TypeError(f'Object of type {o.__class__.__name__} ' + f'is not JSON serializable') +E TypeError: Object of type float32 is not JSON serializable + +/opt/miniconda3/envs/testTC/lib/python3.12/json/encoder.py:180: TypeError +------------------------------ Captured log call ------------------------------- +WARNING tensorcircuit.translation:translation.py:654 experimental feature subject to fast protocol and implementation change, try on your own risk +=============================== warnings summary =============================== +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:212: RuntimeWarning: divide by zero encountered in matmul + um0um1 = um0 @ um1.T.conjugate() + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:212: RuntimeWarning: overflow encountered in matmul + um0um1 = um0 @ um1.T.conjugate() + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:212: RuntimeWarning: invalid value encountered in matmul + um0um1 = um0 @ um1.T.conjugate() + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:220: RuntimeWarning: divide by zero encountered in matmul + wmat = dmat @ vmat.T.conjugate() @ um1 + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:220: RuntimeWarning: overflow encountered in matmul + wmat = dmat @ vmat.T.conjugate() @ um1 + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] +tests/test_circuit.py::test_qir2qiskit[jaxb] + /opt/miniconda3/envs/testTC/lib/python3.12/site-packages/qiskit/synthesis/unitary/qsd.py:220: RuntimeWarning: invalid value encountered in matmul + wmat = dmat @ vmat.T.conjugate() @ um1 + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1102: RuntimeWarning: divide by zero encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1102: RuntimeWarning: overflow encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_circuit.py::test_qir2qiskit[npb] +tests/test_circuit.py::test_qir2qiskit[tfb] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1102: RuntimeWarning: invalid value encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_circuit.py::test_qiskit2tc + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1176: RuntimeWarning: divide by zero encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_circuit.py::test_qiskit2tc + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1176: RuntimeWarning: overflow encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_circuit.py::test_qiskit2tc + /Users/weiguoma/PyProgram/tensorcircuit-ng/tests/test_circuit.py:1176: RuntimeWarning: invalid value encountered in matmul + np.testing.assert_allclose(p_mat @ tc_unitary @ p_mat, qis_unitary, atol=1e-5) + +tests/test_lattice.py::TestCustomizeLattice::test_show_method_actually_draws_2d_labels +tests/test_lattice.py::TestLongRangeNeighborFinding::test_show_method_with_custom_bond_kwargs + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/templates/lattice.py:454: UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown + plt.show() + +tests/test_dmcircuit.py::test_dm_sexpps_jittable_vamppable_tf +tests/test_channels.py::test_readout[tfb] + :12: SyntaxWarning: invalid escape sequence '\i' + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:408: RuntimeWarning: divide by zero encountered in matmul + backend.transpose(u) @ backend.reshape(psi0, [-1, 1]) + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:408: RuntimeWarning: overflow encountered in matmul + backend.transpose(u) @ backend.reshape(psi0, [-1, 1]) + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:408: RuntimeWarning: invalid value encountered in matmul + backend.transpose(u) @ backend.reshape(psi0, [-1, 1]) + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:417: RuntimeWarning: divide by zero encountered in matmul + psi_exact = backend.conj(u) @ backend.reshape(ebetah_utpsi0, [-1, 1]) + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:417: RuntimeWarning: overflow encountered in matmul + psi_exact = backend.conj(u) @ backend.reshape(ebetah_utpsi0, [-1, 1]) + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-True] +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:417: RuntimeWarning: invalid value encountered in matmul + psi_exact = backend.conj(u) @ backend.reshape(ebetah_utpsi0, [-1, 1]) + +tests/test_timeevol.py: 30 warnings + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:781: RuntimeWarning: divide by zero encountered in matmul + r = h @ q + +tests/test_timeevol.py: 30 warnings + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:781: RuntimeWarning: overflow encountered in matmul + r = h @ q + +tests/test_timeevol.py: 30 warnings + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:781: RuntimeWarning: invalid value encountered in matmul + r = h @ q + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:646: RuntimeWarning: divide by zero encountered in matmul + return ((hamiltonian @ psi) - b * psi) / a + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:646: RuntimeWarning: overflow encountered in matmul + return ((hamiltonian @ psi) - b * psi) / a + +tests/test_timeevol.py::test_chebyshev_evol_basic[npb-False] + /Users/weiguoma/PyProgram/tensorcircuit-ng/tensorcircuit/timeevol.py:646: RuntimeWarning: invalid value encountered in matmul + return ((hamiltonian @ psi) - b * psi) / a + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +=========================== short test summary info ============================ +FAILED tests/test_circuit.py::test_circuit_to_json[tfb] - TypeError: Object o... +FAILED tests/test_circuit.py::test_circuit_to_json[jaxb] - TypeError: Object ... +FAILED tests/test_dmcircuit.py::test_mult_qubit_kraus[tfb] - tensorflow.pytho... +FAILED tests/test_mpscircuit.py::test_circuits[tfb-highp] - TypeError: cannot... +FAILED tests/test_mpscircuit.py::test_simple_circuits_ad[tfb-highp] - TypeErr... +FAILED tests/test_miscs.py::test_distrubuted_contractor - ModuleNotFoundError... +FAILED tests/test_qem.py::test_benchmark_circuits[tfb] - NameError: name 'gen... +FAILED tests/test_qem.py::test_benchmark_circuits[jaxb] - NameError: name 'ge... +FAILED tests/test_qem.py::test_zne[tfb] - AttributeError: 'NoneType' object h... +FAILED tests/test_qem.py::test_zne[jaxb] - AttributeError: 'NoneType' object ... +FAILED tests/test_qem.py::test_dd[tfb] - ModuleNotFoundError: No module named... +FAILED tests/test_qem.py::test_dd[jaxb] - ModuleNotFoundError: No module name... +FAILED tests/test_mpscircuit.py::test_simple_circuits_ad[jaxb-highp] - TypeEr... +FAILED tests/test_circuit.py::test_circuit_inverse_2[npb] - AssertionError: +FAILED tests/test_circuit.py::test_circuit_inverse_2[tfb] - AssertionError: +FAILED tests/test_circuit.py::test_circuit_inverse_2[jaxb] - AssertionError: +FAILED tests/test_mpscircuit.py::test_circuits[jaxb-highp] - TypeError: canno... +FAILED tests/test_mpscircuit.py::test_circuits_jit[tfb-highp] - TypeError: ca... +FAILED tests/test_circuit.py::test_circuit_to_json[npb] - TypeError: Object o... +==== 19 failed, 649 passed, 106 skipped, 1 xfailed, 136 warnings in 59.27s ===== diff --git a/tensorcircuit/abstractcircuit.py b/tensorcircuit/abstractcircuit.py index 3f3d5951..85a4c60c 100644 --- a/tensorcircuit/abstractcircuit.py +++ b/tensorcircuit/abstractcircuit.py @@ -1,15 +1,21 @@ """ Methods for abstract circuits independent of nodes, edges and contractions + +Note: + - Supports both qubit (d=2) and qudit (d>=2) systems. + - For string-encoded samples/counts when d<=36, digits use base-d characters 0–9A–Z (A=10, …, Z=35). """ # pylint: disable=invalid-name +import types +import inspect import json import logging from copy import deepcopy from functools import reduce from operator import add -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, NoReturn import numpy as np import tensornetwork as tn @@ -25,49 +31,15 @@ Gate = gates.Gate Tensor = Any -sgates = ( - ["i", "x", "y", "z", "h", "t", "s", "td", "sd", "wroot"] - + ["cnot", "cz", "swap", "cy", "ox", "oy", "oz"] - + ["toffoli", "fredkin"] -) -vgates = [ - "r", - "cr", - "u", - "cu", - "rx", - "ry", - "rz", - "phase", - "rxx", - "ryy", - "rzz", - "cphase", - "crx", - "cry", - "crz", - "orx", - "ory", - "orz", - "iswap", - "any", - "exp", - "exp1", -] -mpogates = ["multicontrol", "mpo"] -gate_aliases = [ - ["cnot", "cx"], - ["fredkin", "cswap"], - ["toffoli", "ccnot"], - ["toffoli", "ccx"], - ["any", "unitary"], - ["sd", "sdg"], - ["td", "tdg"], -] +sgates = gates.sgates +vgates = gates.vgates +mpogates = gates.mpogates +gate_aliases = gates.gate_aliases class AbstractCircuit: _nqubits: int + _d: int _qir: List[Dict[str, Any]] _extra_qir: List[Dict[str, Any]] inputs: Tensor @@ -79,6 +51,31 @@ class AbstractCircuit: mpogates = mpogates gate_aliases = gate_aliases + def _validate_dim(self, dim: Optional[int]) -> None: + self._d = 2 if dim is None else dim + # Require integer d>=2; current string-encoded IO supports d<=36 (0–9A–Z digits). + if not isinstance(self._d, int) or self._d < 2: + raise ValueError(f"Invalid dimension d={self._d}; must be an integer >= 2.") + if self._d > 36: + raise NotImplementedError( + "The Qudit interface is only supported for dimension < 36 now." + ) + + def _not_implemented_for_qudit(self) -> None: + if self._d != 2: + raise NotImplementedError( + f"The method ‘{inspect.stack()[1].function}’ is not supported for circuits with qudit systems (d != 2)." + ) + + def _get_gmod(self) -> types.ModuleType: + gmod = getattr(self, "_gmod", None) + if gmod is None: + from . import gates + + gmod = gates.get_gate_module(int(getattr(self, "_d", 2))) + self._gmod = gmod + return self._gmod + def apply_general_gate( self, gate: Union[Gate, QuOperator], @@ -111,8 +108,16 @@ def apply(self: "AbstractCircuit", *index: int, **vars: Any) -> None: if "split" in vars: split = vars["split"] del vars["split"] + gmod = self._get_gmod() + try: + gatef_dyn = getattr(gmod, name) + except AttributeError: + raise NotImplementedError( + f"Gate `{name}` is not supported for d={self._d}." + ) + gate_dict = { - "gatef": gatef, + "gatef": gatef_dyn, "index": index, "name": localname, "split": split, @@ -120,7 +125,7 @@ def apply(self: "AbstractCircuit", *index: int, **vars: Any) -> None: "parameters": vars, } # self._qir.append(gate_dict) - gate = gatef(**vars) + gate = gatef_dyn(**vars) self.apply_general_gate( gate, *index, @@ -175,8 +180,16 @@ def apply( localname = defaultname # split = None - gate = gatef() - gate_dict = {"gatef": gatef} + gmod = self._get_gmod() + try: + gatef_dyn = getattr(gmod, defaultname) + except AttributeError: + raise NotImplementedError( + f"Gate `{defaultname}` is not supported for d={self._d}." + ) + + gate = gatef_dyn() + gate_dict = {"gatef": gatef_dyn} self.apply_general_gate( gate, @@ -198,48 +211,75 @@ def apply_list(self: "AbstractCircuit", *index: int, **kws: Any) -> None: return apply_list + @staticmethod + def _named_stub(name: str) -> Callable[..., Gate]: + """Helper""" + + def _stub(*args: Any, **kwargs: Any) -> NoReturn: + raise RuntimeError(f"stub for gate `{name}` should not be called") + + _stub.__name__ = name + return _stub + @classmethod def _meta_apply(cls) -> None: """ The registration of gate methods on circuit class using reflection mechanism """ + from . import gates + + gmod = gates.get_gate_module(2) + for g in sgates: setattr( - cls, g, cls.apply_general_gate_delayed(gatef=getattr(gates, g), name=g) + cls, g, cls.apply_general_gate_delayed(gatef=cls._named_stub(g), name=g) ) setattr( cls, g.upper(), - cls.apply_general_gate_delayed(gatef=getattr(gates, g), name=g), + cls.apply_general_gate_delayed(gatef=cls._named_stub(g), name=g), ) - matrix = gates.matrix_for_gate(getattr(gates, g)()) - matrix = gates.bmatrix(matrix) - doc = """ - Apply **%s** gate on the circuit. - See :py:meth:`tensorcircuit.gates.%s_gate`. - - - :param index: Qubit number that the gate applies on. - The matrix for the gate is - - .. math:: - - %s + try: + matrix = gates.matrix_for_gate(getattr(gmod, g)()) + matrix = gates.bmatrix(matrix) + doc = """ + Apply **%s** gate on the circuit. + See :py:meth:`tensorcircuit.gates.%s_gate`. + + .. note:: + Gate availability and exact matrix depend on the circuit local dimension ``d``. + The matrix below is an example for ``d=2`` (qubit case). For ``d>2`` (qudit), + the gate has the same name but generally a different matrix. + + :param index: Subsystem (site) index that the gate applies on + (qubit when ``d=2``, qudit when ``d>2``). The example matrix for ``d=2`` is + + .. math:: + + %s + + :type index: int. + """ % ( + g.upper(), + g, + matrix, + ) + except AttributeError: + doc = """ + Apply **%s** gate on the circuit. + See :py:meth:`tensorcircuit.gates.%s_gate`. + + .. note:: + Gate availability and exact matrix depend on the circuit local dimension ``d`` + (qubit when ``d=2``, qudit when ``d>2``). + + :param index: Subsystem (site) index that the gate applies on. + :type index: int. + """ % ( + g.upper(), + g, + ) - :type index: int. - """ % ( - g.upper(), - g, - matrix, - ) - # docs = """ - # Apply **%s** gate on the circuit. - - # :param index: Qubit number that the gate applies on. - # :type index: int. - # """ % ( - # g.upper() - # ) getattr(cls, g).__doc__ = doc getattr(cls, g.upper()).__doc__ = doc @@ -248,22 +288,25 @@ def _meta_apply(cls) -> None: cls, g, cls.apply_general_variable_gate_delayed( - gatef=getattr(gates, g), name=g + gatef=cls._named_stub(g), name=g ), ) setattr( cls, g.upper(), cls.apply_general_variable_gate_delayed( - gatef=getattr(gates, g), name=g + gatef=cls._named_stub(g), name=g ), ) doc = """ Apply **%s** gate with parameters on the circuit. See :py:meth:`tensorcircuit.gates.%s_gate`. - - - :param index: Qubit number that the gate applies on. + + .. note:: + Gate availability and exact matrix/parameterization depend on the + circuit local dimension ``d`` (qubit when ``d=2``, qudit when ``d>2``). + + :param index: Subsystem (site) index that the gate applies on. :type index: int. :param vars: Parameters for the gate. :type vars: float. @@ -279,21 +322,25 @@ def _meta_apply(cls) -> None: cls, g, cls.apply_general_variable_gate_delayed( - gatef=getattr(gates, g), name=g, mpo=True + gatef=cls._named_stub(g), name=g, mpo=True ), ) setattr( cls, g.upper(), cls.apply_general_variable_gate_delayed( - gatef=getattr(gates, g), name=g, mpo=True + gatef=cls._named_stub(g), name=g, mpo=True ), ) doc = """ Apply %s gate in MPO format on the circuit. See :py:meth:`tensorcircuit.gates.%s_gate`. - - :param index: Qubit number that the gate applies on. + + .. note:: + Gate availability and the MPO decomposition depend on the circuit local + dimension ``d`` (qubit when ``d=2``, qudit when ``d>2``). + + :param index: Subsystem index that the gate applies on. :type index: int. :param vars: Parameters for the gate. :type vars: float. @@ -312,6 +359,7 @@ def _meta_apply(cls) -> None: def to_qir(self) -> List[Dict[str, Any]]: """ Return the quantum intermediate representation of the circuit. + (Gate availability and matrices depend on local dimension ``d``.) :Example: @@ -427,6 +475,11 @@ def inverse( :return: the inversed circuit :rtype: Circuit """ + raise NotImplementedError( + "Due to the issue with the registered gate collection, " + "the inverse method is currently unavailable." + ) + self._not_implemented_for_qudit() if circuit_params is None: circuit_params = {} if "nqubits" not in circuit_params: @@ -521,11 +574,10 @@ def initial_mapping( circuit_params: Optional[Dict[str, Any]] = None, ) -> "AbstractCircuit": """ - generate a new circuit with the qubit mapping given by ``logical_physical_mapping`` + generate a new circuit with the site mapping given by ``logical_physical_mapping`` - :param logical_physical_mapping: how to map logical qubits to the physical qubits on the new circuit - :type logical_physical_mapping: Dict[int, int] - :param n: number of qubit of the new circuit, can be different from the original one, defaults to None + :param logical_physical_mapping: how to map logical sites to the physical sites on the new circuit + :param n: number of sites of the new circuit, can be different from the original one, defaults to None :type n: Optional[int], optional :param circuit_params: _description_, defaults to None :type circuit_params: Optional[Dict[str, Any]], optional @@ -564,10 +616,9 @@ def initial_mapping( def get_positional_logical_mapping(self) -> Dict[int, int]: """ Get positional logical mapping dict based on measure instruction. - This function is useful when we only measure part of the qubits in the circuit, - to process the count result from partial measurement, we must be aware of the mapping, - i.e. for each position in the count bitstring, what is the corresponding qubits (logical) - defined on the circuit + This function is useful when only part of the sites are measured. To process the counts from partial + measurement, we must know the mapping — i.e. for each position in the base-d string + (d<=36 uses 0–9A–Z), what is the corresponding logical site defined on the circuit :return: ``positional_logical_mapping`` :rtype: Dict[int, int] @@ -756,6 +807,7 @@ def to_qiskit( :type enable_inputs: bool, defaults to False :return: A qiskit object of this circuit. """ + self._not_implemented_for_qudit() from .translation import perm_matrix, qir2qiskit qir = self.to_qir() @@ -783,6 +835,7 @@ def to_openqasm(self, **kws: Any) -> str: :return: circuit representation in openqasm format :rtype: str """ + self._not_implemented_for_qudit() qc = self.to_qiskit(enable_instruction=True) try: qasm_str = qc.qasm(**kws) # type: ignore @@ -799,6 +852,7 @@ def to_openqasm_file(self, file: str, **kws: Any) -> None: :param file: the file path to save the circuit :type file: str """ + self._not_implemented_for_qudit() with open(file, "w") as f: f.write(self.to_openqasm(**kws)) @@ -860,6 +914,7 @@ def draw(self, **kws: Any) -> Any: q_2: ┤ X ├───── └───┘ """ + self._not_implemented_for_qudit() return self.to_qiskit(enable_instruction=True).draw(**kws) @classmethod @@ -942,6 +997,7 @@ def to_json(self, file: Optional[str] = None, simplified: bool = False) -> Any: :return: None if dumps to file otherwise the json str :rtype: Any """ + self._not_implemented_for_qudit() from .translation import qir2json tcqasm = qir2json(self.to_qir(), simplified=simplified) @@ -954,6 +1010,7 @@ def to_json(self, file: Optional[str] = None, simplified: bool = False) -> Any: def from_qsim_file( cls, file: str, circuit_params: Optional[Dict[str, Any]] = None ) -> "AbstractCircuit": + """.. note:: qsim import here assumes qubit gates (d=2).""" with open(file, "r") as f: lines = f.readlines() if circuit_params is None: @@ -985,6 +1042,7 @@ def _convert_ints_and_floats(x: str) -> Union[str, int, float]: for line in qsim_str[1:] if line ] + # Note: qsim parsing here is qubit-only (d=2); multi-level (qudit) gates are not handled. # https://github.com/quantumlib/qsim/blob/master/docs/input_format.md # https://github.com/jcmgray/quimb/blob/master/quimb/tensor/circuit.py#L241 for gate in qsim_gates: @@ -1082,7 +1140,7 @@ def select_gate(self, which: Tensor, kraus: Sequence[Gate], *index: int) -> None :type which: Tensor :param kraus: A list of gate in the form of ``tc.gate`` or Tensor :type kraus: Sequence[Gate] - :param index: the qubit lines the gate applied on + :param index: the site indices the gate is applied on :type index: int """ kraus = [k.tensor if isinstance(k, tn.Node) else k for k in kraus] @@ -1116,11 +1174,12 @@ def cond_measurement(self, index: int) -> Tensor: matrix after this method is kept in mixed state without knowing the measuremet resuslts + .. note:: + This helper is qubit-only (d=2). For qudit (d>2), use explicit projectors/Kraus operators. - - :param index: the qubit for the z-basis measurement + :param index: the site index for the Z-basis measurement :type index: int - :return: 0 or 1 for z measurement on up and down freedom + :return: 0 or 1 for Z-basis measurement outcome :rtype: Tensor """ return self.general_kraus( # type: ignore @@ -1170,7 +1229,7 @@ def append( :param c: The other circuit to be appended :type c: BaseCircuit - :param indices: the qubit indices to which ``c`` is appended on. + :param indices: the site indices to which ``c`` is appended. Defaults to None, which means plain concatenation. :type indices: Optional[List[int]], optional :return: The composed circuit @@ -1195,6 +1254,10 @@ def copy(self) -> "AbstractCircuit": c = type(self).from_qir(qir, self.circuit_param) return c + @property + def gates(self) -> types.ModuleType: + return self._get_gmod() + def expectation( self, *ops: Tuple[tn.Node, List[int]], @@ -1266,6 +1329,8 @@ def expectation_ps( :return: Expectation value :rtype: Tensor """ + # This channel API is currently qubit-only (d=2). For d>2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() obs = [] if ps is not None: from .quantum import ps2xyz diff --git a/tensorcircuit/applications/layers.py b/tensorcircuit/applications/layers.py index 64c36bca..bece3d15 100644 --- a/tensorcircuit/applications/layers.py +++ b/tensorcircuit/applications/layers.py @@ -13,7 +13,8 @@ from ..circuit import Circuit from ..densitymatrix import DMCircuit -from ..gates import num_to_tensor, array_to_tensor, _swap_matrix +from .. import gates +from ..gates import num_to_tensor, array_to_tensor from ..channels import depolarizingchannel from ..abstractcircuit import sgates @@ -25,6 +26,7 @@ logger.warning(e) logger.warning("Therefore some functionality in %s may not work" % __name__) +_swap_matrix = gates._swap_matrix # type: ignore thismodule = sys.modules[__name__] @@ -298,7 +300,7 @@ def anyswaplayer_bitflip_mc( generate_gate_layer(gate) generate_any_gate_layer(gate) -for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]): +for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]): # type: ignore gates = gates[0] + gates[1] # type: ignore generate_double_gate(gates) # type: ignore generate_double_gate_layer(gates) # type: ignore @@ -308,7 +310,7 @@ def anyswaplayer_bitflip_mc( generate_any_double_gate_layer_bitflip_mc(gates) # type: ignore -for gates in itertools.product( +for gates in itertools.product( # type: ignore *[["rx", "ry", "rz", "xx", "yy", "zz"] for _ in range(2)] ): generate_double_layer_block(gates) # type: ignore @@ -504,7 +506,7 @@ def f( if gate != "H": generate_cirq_any_gate_layer(gate) - for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]): + for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]): # type: ignore gates = gates[0] + gates[1] # type: ignore generate_cirq_double_gate(gates) # type: ignore generate_cirq_double_gate_layer(gates) # type: ignore diff --git a/tensorcircuit/applications/utils.py b/tensorcircuit/applications/utils.py index 3ca54ff2..7d49f5e2 100644 --- a/tensorcircuit/applications/utils.py +++ b/tensorcircuit/applications/utils.py @@ -217,7 +217,7 @@ def train_qml_vag( c.exp( # type: ignore i, (i + 1) % 10, - unitary=array_to_tensor(G._swap_matrix), + unitary=array_to_tensor(G._swap_matrix), # type: ignore theta=cnnp[3 * epoch + 2, i], ) for i in range(1, nqubits, 2): @@ -225,7 +225,7 @@ def train_qml_vag( c.exp( # type: ignore i, (i + 1) % 10, - unitary=array_to_tensor(G._swap_matrix), + unitary=array_to_tensor(G._swap_matrix), # type: ignore theta=cnnp[3 * epoch + 2, i], ) for i in range(nqubits): @@ -287,7 +287,7 @@ def validate_qml_vag( c.exp( # type: ignore i, (i + 1) % 10, - unitary=array_to_tensor(G._swap_matrix), + unitary=array_to_tensor(G._swap_matrix), # type: ignore theta=cnnp[3 * epoch + 2, i], ) for i in range(1, nqubits, 2): @@ -295,7 +295,7 @@ def validate_qml_vag( c.exp( # type: ignore i, (i + 1) % 10, - unitary=array_to_tensor(G._swap_matrix), + unitary=array_to_tensor(G._swap_matrix), # type: ignore theta=cnnp[3 * epoch + 2, i], ) for i in range(nqubits): diff --git a/tensorcircuit/backends/abstract_backend.py b/tensorcircuit/backends/abstract_backend.py index adbad83f..d976df3e 100644 --- a/tensorcircuit/backends/abstract_backend.py +++ b/tensorcircuit/backends/abstract_backend.py @@ -9,6 +9,7 @@ from operator import mul from typing import Any, Callable, List, Optional, Sequence, Tuple, Union +import math import numpy as np from ..utils import return_partial @@ -405,6 +406,31 @@ def reshape2(self: Any, a: Tensor) -> Tensor: a = self.reshape(a, [2 for _ in range(nleg)]) return a + def reshaped(self: Any, a: Tensor, d: int) -> Tensor: + """ + Reshape a tensor to the [d, d, ...] shape. + + :param a: Input tensor + :type a: Tensor + :param d: edge length for each dimension + :type d: int + :return: the reshaped tensor + :rtype: Tensor + """ + if not isinstance(d, int) or d <= 0: + raise ValueError("d must be a positive integer.") + + size = self.sizen(a) + if size == 0: + return self.reshape(a, (0,)) + + nleg_float = math.log(size, d) + nleg = int(round(nleg_float)) + if d**nleg != size: + raise ValueError(f"cannot reshape: size {size} is not a power of d={d}") + + return self.reshape(a, (d,) * nleg) + def reshapem(self: Any, a: Tensor) -> Tensor: """ Reshape a tensor to the [l, l] shape. @@ -748,6 +774,36 @@ def mod(self: Any, x: Tensor, y: Tensor) -> Tensor: "Backend '{}' has not implemented `mod`.".format(self.name) ) + def floor(self: Any, x: Tensor) -> Tensor: + """ + Compute + + :param x: input values + :type x: Tensor + :return: results + :rtype: Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented `floor`.".format(self.name) + ) + + def clip(self: Any, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor: + """ + Clip values of x into [a_min, a_max], preserving dtype & device. + + :param a: input values + :type a: Tensor + :param a_min: minimum value + :type a_min: Tensor + :param a_max: maximum value + :type a_max: Tensor + :return: results + :rtype: Tensor + """ + raise NotImplementedError( + "Backend '{}' has not implemented `clip`.".format(self.name) + ) + def reverse(self: Any, a: Tensor) -> Tensor: """ return ``a[::-1]``, only 1D tensor is guaranteed for consistent behavior diff --git a/tensorcircuit/backends/cupy_backend.py b/tensorcircuit/backends/cupy_backend.py index 4525cf08..23d72554 100644 --- a/tensorcircuit/backends/cupy_backend.py +++ b/tensorcircuit/backends/cupy_backend.py @@ -11,7 +11,7 @@ import numpy as np try: - from numpy import ComplexWarning + from numpy import ComplexWarning # type: ignore except ImportError: # np2.0 compatibility from numpy.exceptions import ComplexWarning # type: ignore diff --git a/tensorcircuit/backends/jax_backend.py b/tensorcircuit/backends/jax_backend.py index a9d17b96..cb78ecf7 100644 --- a/tensorcircuit/backends/jax_backend.py +++ b/tensorcircuit/backends/jax_backend.py @@ -12,7 +12,7 @@ import numpy as np try: - from numpy import ComplexWarning + from numpy import ComplexWarning # type: ignore except ImportError: # np2.0 compatibility from numpy.exceptions import ComplexWarning # type: ignore @@ -342,6 +342,12 @@ def arange(self, start: int, stop: Optional[int] = None, step: int = 1) -> Tenso def mod(self, x: Tensor, y: Tensor) -> Tensor: return jnp.mod(x, y) + def floor(self, x: Tensor) -> Tensor: + return jnp.floor(x) + + def clip(self, x: Tensor, lower: Tensor, upper: Tensor) -> Tensor: + return jnp.clip(x, lower, upper) + def right_shift(self, x: Tensor, y: Tensor) -> Tensor: return jnp.right_shift(x, y) diff --git a/tensorcircuit/backends/numpy_backend.py b/tensorcircuit/backends/numpy_backend.py index 633a8467..18dc4e5a 100644 --- a/tensorcircuit/backends/numpy_backend.py +++ b/tensorcircuit/backends/numpy_backend.py @@ -11,7 +11,7 @@ import numpy as np try: - from numpy import ComplexWarning + from numpy import ComplexWarning # type: ignore except ImportError: # np2.0 compatibility from numpy.exceptions import ComplexWarning # type: ignore @@ -234,6 +234,12 @@ def arange(self, start: int, stop: Optional[int] = None, step: int = 1) -> Tenso def mod(self, x: Tensor, y: Tensor) -> Tensor: return np.mod(x, y) + def floor(self, x: Tensor) -> Tensor: + return np.floor(x) + + def clip(self, x: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor: + return np.clip(x, a_min, a_max) + def right_shift(self, x: Tensor, y: Tensor) -> Tensor: return np.right_shift(x, y) diff --git a/tensorcircuit/backends/pytorch_backend.py b/tensorcircuit/backends/pytorch_backend.py index 83176308..4b238066 100644 --- a/tensorcircuit/backends/pytorch_backend.py +++ b/tensorcircuit/backends/pytorch_backend.py @@ -409,6 +409,12 @@ def arange(self, start: int, stop: Optional[int] = None, step: int = 1) -> Tenso def mod(self, x: Tensor, y: Tensor) -> Tensor: return torchlib.fmod(x, y) + def floor(self, x: Tensor) -> Tensor: + return torchlib.floor(x) + + def clip(self, x: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor: + return torchlib.clamp(x, a_min, a_max) + def right_shift(self, x: Tensor, y: Tensor) -> Tensor: return torchlib.bitwise_right_shift(x, y) diff --git a/tensorcircuit/backends/tensorflow_backend.py b/tensorcircuit/backends/tensorflow_backend.py index ab79ae69..6a21db57 100644 --- a/tensorcircuit/backends/tensorflow_backend.py +++ b/tensorcircuit/backends/tensorflow_backend.py @@ -115,7 +115,7 @@ def _random_choice_tf( else: if not (isinstance(p, tf.Tensor) or isinstance(p, tf.Variable)): p = tf.constant(p) - dtype = p.dtype + dtype = p.dtype # type: ignore shape1 = reduce(mul, shape) p_cuml = tf.cumsum(p) r = p_cuml[-1] * (1 - g.uniform([shape1], dtype=dtype)) @@ -544,6 +544,9 @@ def unique_with_counts(self, a: Tensor, **kws: Any) -> Tuple[Tensor, Tensor]: def stack(self, a: Sequence[Tensor], axis: int = 0) -> Tensor: return tf.stack(a, axis=axis) + def clip(self, a: Tensor, a_min: Tensor, a_max: Tensor) -> Tensor: + return tf.clip_by_value(a, a_min, a_max) + def concat(self, a: Sequence[Tensor], axis: int = 0) -> Tensor: return tf.concat(a, axis=axis) @@ -612,6 +615,9 @@ def arange(self, start: int, stop: Optional[int] = None, step: int = 1) -> Tenso def mod(self, x: Tensor, y: Tensor) -> Tensor: return tf.math.mod(x, y) + def floor(self, x: Tensor) -> Tensor: + return tf.math.floor(x) + def right_shift(self, x: Tensor, y: Tensor) -> Tensor: return tf.bitwise.right_shift(x, y) diff --git a/tensorcircuit/basecircuit.py b/tensorcircuit/basecircuit.py index e39f0c6b..c7566d67 100644 --- a/tensorcircuit/basecircuit.py +++ b/tensorcircuit/basecircuit.py @@ -1,17 +1,23 @@ """ Quantum circuit: common methods for all circuit classes as MixIn + +Note: + - Supports qubit (d = 2) and qudit (d >= 2) systems. + - For string-encoded samples/counts when d <= 36, digits use base-d characters 0–9A–Z (A = 10, …, Z = 35). """ # pylint: disable=invalid-name -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from functools import partial +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union -import numpy as np import graphviz +import numpy as np import tensornetwork as tn from . import gates +from .abstractcircuit import AbstractCircuit +from .cons import npdtype, backend, dtypestr, contractor, rdtypestr, _ALPHABET from .quantum import ( QuOperator, QuVector, @@ -21,17 +27,39 @@ sample_int2bin, sample_bin2int, sample2all, + _infer_num_sites, ) -from .abstractcircuit import AbstractCircuit -from .cons import npdtype, backend, dtypestr, contractor, rdtypestr from .simplify import _split_two_qubit_gate from .utils import arg_alias - Gate = gates.Gate Tensor = Any +def _decode_basis_label(label: str, d: int, n: int) -> List[int]: + if d > 36: + raise NotImplementedError( + f"String basis label supports d<=36 (0–9A–Z). Got d={d}. " + "Use an integer array/tensor of length n instead." + ) + s = label.upper() + if len(s) != n: + raise ValueError(f"Basis label length mismatch: expect {n}, got {len(s)}") + digits = [] + for ch in s: + if ch not in _ALPHABET: + raise ValueError( + f"Invalid character '{ch}' in basis label (allowed 0–9A–Z)." + ) + v = _ALPHABET.index(ch) + if v >= d: + raise ValueError( + f"Digit '{ch}' (= {v}) out of range for base-d with d={d}." + ) + digits.append(v) + return digits + + class BaseCircuit(AbstractCircuit): _nodes: List[tn.Node] _front: List[tn.Edge] @@ -42,6 +70,7 @@ class BaseCircuit(AbstractCircuit): @staticmethod def all_zero_nodes(n: int, d: int = 2, prefix: str = "qb-") -> List[tn.Node]: + prefix = "qd-" if d > 2 else prefix l = [0.0 for _ in range(d)] l[0] = 1.0 nodes = [ @@ -289,7 +318,7 @@ def expectation_before( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) + op = backend.reshaped(op, d=self._d) op = backend.cast(op, dtype=dtypestr) op = gates.Gate(op) else: @@ -355,12 +384,12 @@ def to_qir(self) -> List[Dict[str, Any]]: def perfect_sampling(self, status: Optional[Tensor] = None) -> Tuple[str, float]: """ - Sampling bistrings from the circuit output based on quantum amplitudes. + Sampling base-d strings (0–9A–Z when d <= 36) from the circuit output based on quantum amplitudes. Reference: arXiv:1201.3974. :param status: external randomness, with shape [nqubits], defaults to None :type status: Optional[Tensor] - :return: Sampled bit string and the corresponding theoretical probability. + :return: Sampled base-d string and the corresponding theoretical probability. :rtype: Tuple[str, float] """ return self.measure_jit(*range(self._nqubits), with_prob=True, status=status) @@ -369,10 +398,10 @@ def measure_jit( self, *index: int, with_prob: bool = False, status: Optional[Tensor] = None ) -> Tuple[Tensor, Tensor]: """ - Take measurement to the given quantum lines. + Take measurement on the given site indices (computational basis). This method is jittable is and about 100 times faster than unjit version! - :param index: Measure on which quantum line. + :param index: Measure on which site (wire) index. :type index: int :param with_prob: If true, theoretical probability is also returned. :type with_prob: bool, optional @@ -383,9 +412,11 @@ def measure_jit( """ # finally jit compatible ! and much faster than unjit version ! (100x) sample: List[Tensor] = [] - p = 1.0 - p = backend.convert_to_tensor(p) - p = backend.cast(p, dtype=rdtypestr) + one_r = backend.cast(backend.convert_to_tensor(1.0), rdtypestr) + zero_r = backend.cast(backend.convert_to_tensor(0.0), rdtypestr) + tiny_r = backend.cast(backend.convert_to_tensor(1e-12), rdtypestr) + phi = backend.cast(backend.convert_to_tensor(0.6180339887498948), rdtypestr) + p = one_r for k, j in enumerate(index): if self.is_dm is False: nodes1, edge1 = self._copy() @@ -400,40 +431,87 @@ def measure_jit( if i != j: e ^ edge2[i] for i in range(k): - m = (1 - sample[i]) * gates.array_to_tensor(np.array([1, 0])) + sample[ - i - ] * gates.array_to_tensor(np.array([0, 1])) - newnodes.append(Gate(m)) - newnodes[-1].id = id(newnodes[-1]) - newnodes[-1].is_dagger = False - newnodes[-1].flag = "measurement" - newnodes[-1].get_edge(0) ^ edge1[index[i]] - newnodes.append(Gate(m)) - newnodes[-1].id = id(newnodes[-1]) - newnodes[-1].is_dagger = True - newnodes[-1].flag = "measurement" - newnodes[-1].get_edge(0) ^ edge2[index[i]] + if self._d == 2: + m = (1 - sample[i]) * gates.array_to_tensor( + np.array([1, 0]) + ) + sample[i] * gates.array_to_tensor(np.array([0, 1])) + g1 = Gate(m) + g1.id = id(g1) + g1.is_dagger = False + g1.flag = "measurement" + newnodes.append(g1) + g1.get_edge(0) ^ edge1[index[i]] + g2 = Gate(m) + g2.id = id(g2) + g2.is_dagger = True + g2.flag = "measurement" + newnodes.append(g2) + g2.get_edge(0) ^ edge2[index[i]] + else: + vec = backend.one_hot(backend.cast(sample[i], "int32"), self._d) + v = backend.cast(vec, dtypestr) + m = backend.tensordot(v, v, axes=0) + g = Gate(m) + g.id = id(g) + g.is_dagger = False + g.flag = "measurement" + newnodes.append(g) + g.get_edge(0) ^ edge1[index[i]] + g.get_edge(1) ^ edge2[index[i]] rho = ( 1 / backend.cast(p, dtypestr) * contractor(newnodes, output_edge_order=[edge1[j], edge2[j]]).tensor ) - pu = backend.real(rho[0, 0]) - if status is None: - r = backend.implicit_randu()[0] + if self._d == 2: + pu = backend.real(rho[0, 0]) + if status is None: + r = backend.implicit_randu()[0] + r = backend.real(backend.cast(r, rdtypestr)) + r = r + phi * backend.cast( + backend.convert_to_tensor(k + 1), rdtypestr + ) + r = r - backend.floor(r) + else: + r = backend.real(backend.cast(status[k], rdtypestr)) + eps = backend.cast( + backend.convert_to_tensor(0.31415926 * 1e-12), rdtypestr + ) + sign = backend.sign(r - pu + eps) / 2 + 0.5 + sign = backend.convert_to_tensor(sign) + sign = backend.cast(sign, rdtypestr) + sign_complex = backend.cast(sign, dtypestr) + sample.append(sign_complex) + p = p * (pu * (-1) ** sign + sign) else: - r = status[k] - r = backend.real(backend.cast(r, dtypestr)) - eps = 0.31415926 * 1e-12 - sign = backend.sign(r - pu + eps) / 2 + 0.5 # in case status is exactly 0.5 - sign = backend.convert_to_tensor(sign) - sign = backend.cast(sign, dtype=rdtypestr) - sign_complex = backend.cast(sign, dtypestr) - sample.append(sign_complex) - p = p * (pu * (-1) ** sign + sign) - - sample = backend.stack(sample) - sample = backend.real(sample) + pu = backend.real(backend.diagonal(rho)) + pu = backend.clip(pu, zero_r, one_r) + d = backend.shape_tuple(pu)[-1] + pu = pu + tiny_r * ( + backend.ones((d,), dtype=rdtypestr) + / backend.cast(backend.convert_to_tensor(float(d)), rdtypestr) + ) + pu = pu / backend.sum(pu) + cdf = backend.cumsum(pu) + if status is None: + r = backend.implicit_randu()[0] + r = backend.real(backend.cast(r, rdtypestr)) + r = r + phi * backend.cast( + backend.convert_to_tensor(k + 1), rdtypestr + ) + r = r - backend.floor(r) + r = backend.clip(r, zero_r, one_r - tiny_r) + else: + r = backend.real(backend.cast(status[k], rdtypestr)) + k_out = backend.searchsorted(cdf, r, side="right") + k_out = backend.clip( + k_out, + backend.cast(backend.convert_to_tensor(0), "int32"), + backend.cast(backend.convert_to_tensor(d - 1), "int32"), + ) + sample.append(backend.cast(k_out, rdtypestr)) + p = p * backend.cast(pu[k_out], rdtypestr) + sample = backend.real(backend.stack(sample)) if with_prob: return sample, p else: @@ -443,35 +521,56 @@ def measure_jit( def amplitude_before(self, l: Union[str, Tensor]) -> List[Gate]: r""" - Returns the tensornetwor nodes for the amplitude of the circuit given the bitstring l. - For state simulator, it computes :math:`\langle l\vert \psi\rangle`, - for density matrix simulator, it computes :math:`Tr(\rho \vert l\rangle \langle 1\vert)` + Returns the tensornetwor nodes for the amplitude of the circuit given a computational-basis label ``l``. + For a state simulator, it computes :math:`\langle l \vert \psi\rangle`; + for a density-matrix simulator, it computes :math:`\mathrm{Tr}(\rho \vert l\rangle\langle l\vert)`. Note how these two are different up to a square operation. - :param l: The bitstring of 0 and 1s. + :Example: + + >>> c = tc.Circuit(2) + >>> c.X(0) + >>> c.amplitude("10") # d=2, per-qubit digits + array(1.+0.j, dtype=complex64) + >>> c.CNOT(0, 1) + >>> c.amplitude("11") + array(1.+0.j, dtype=complex64) + + For qudits (d>2, d<=36): + >>> c = tc.Circuit(3, dim=12) + >>> c.amplitude("0A2") # base-12 string, A stands for 10 + + :param l: Basis label. + - If a string: it must be a base-d string of length ``nqubits``, using 0–9A–Z (A=10,…,Z=35) when ``d<=36``. + - If a tensor/array/list: it should contain per-site integers in ``[0, d-1]`` with length ``nqubits``. :type l: Union[str, Tensor] :return: The tensornetwork nodes for the amplitude of the circuit. :rtype: List[Gate] """ + + def _basis_nod(_k: int) -> Tensor: + _vec = np.zeros((self._d,), dtype=npdtype) + _vec[_k] = 1.0 + return _vec + no, d_edges = self._copy() ms = [] if self.is_dm: msconj = [] if isinstance(l, str): - for s in l: - if s == "1": - endn = np.array([0, 1], dtype=npdtype) - elif s == "0": - endn = np.array([1, 0], dtype=npdtype) - ms.append(tn.Node(endn)) + symbols = _decode_basis_label(l, d=self._d, n=self._nqubits) + for k in symbols: + n = _basis_nod(k) + ms.append(tn.Node(n)) if self.is_dm: - msconj.append(tn.Node(endn)) - else: # l is Tensor + msconj.append(tn.Node(n)) + else: l = backend.cast(l, dtype=dtypestr) for i in range(self._nqubits): - endn = l[i] * gates.array_to_tensor(np.array([0, 1])) + ( - 1 - l[i] - ) * gates.array_to_tensor(np.array([1, 0])) + endn = backend.cast( + backend.one_hot(backend.cast(l[i], "int32"), self._d), + dtype=dtypestr, + ) ms.append(tn.Node(endn)) if self.is_dm: msconj.append(tn.Node(endn)) @@ -522,17 +621,18 @@ def amplitude(self, l: Union[str, Tensor]) -> Tensor: def probability(self) -> Tensor: """ - get the 2^n length probability vector over computational basis + get the d^n length probability vector over computational basis - :return: probability vector + :return: probability vector of shape [d**n] :rtype: Tensor """ s = self.state() # type: ignore if self.is_dm is False: - p = backend.abs(s) ** 2 - + amp = backend.reshape(s, [-1]) + p = backend.real(backend.abs(amp) ** 2) else: - p = backend.abs(backend.diagonal(s)) + diag = backend.diagonal(s) + p = backend.real(backend.reshape(diag, [-1])) return p @partial(arg_alias, alias_dict={"format": ["format_"]}) @@ -546,7 +646,7 @@ def sample( status: Optional[Tensor] = None, jittable: bool = True, ) -> Any: - """ + r""" batched sampling from state or circuit tensor network directly :param batch: number of samples, defaults to None @@ -569,6 +669,7 @@ def sample( "count_tuple": # (np.array([0]), np.array([2])) "count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0} + for cases d\in [11, 36], use 0–9A–Z digits (e.g., 'A' -> 10, …, 'Z' -> 35); "count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0} @@ -620,7 +721,7 @@ def perfect_sampling(key: Any) -> Any: return r r = backend.stack([ri[0] for ri in r]) # type: ignore r = backend.cast(r, "int32") - ch = sample_bin2int(r, self._nqubits) + ch = sample_bin2int(r, self._nqubits, d=self._d) else: # allow_state if batch is None: nbatch = 1 @@ -645,7 +746,7 @@ def perfect_sampling(key: Any) -> Any: # 2, # ) if format is None: # for backward compatibility - confg = sample_int2bin(ch, self._nqubits) + confg = sample_int2bin(ch, self._nqubits, d=self._d) prob = backend.gather1d(p, ch) r = list(zip(confg, prob)) # type: ignore if batch is None: @@ -653,7 +754,9 @@ def perfect_sampling(key: Any) -> Any: return r if self._nqubits > 35: jittable = False - return sample2all(sample=ch, n=self._nqubits, format=format, jittable=jittable) + return sample2all( + sample=ch, n=self._nqubits, format=format, jittable=jittable, d=self._d + ) def sample_expectation_ps( self, @@ -725,6 +828,7 @@ def sample_expectation_ps( :return: [description] :rtype: Tensor """ + self._not_implemented_for_qudit() from .noisemodel import sample_expectation_ps_noisfy if noise_conf is None: @@ -817,6 +921,7 @@ def readouterror_bs( """ # if isinstance(readout_error, tuple): # readout_error = list[readout_error] # type: ignore + self._not_implemented_for_qudit() try: nqubit = int(readout_error.shape[0]) # type: ignore except AttributeError: @@ -853,9 +958,9 @@ def replace_inputs(self, inputs: Tensor) -> None: """ inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = _infer_num_sites(N, self._d) assert n == self._nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) if self.inputs is not None: self._nodes[0].tensor = inputs if self.is_dm: @@ -884,11 +989,12 @@ def cond_measurement(self, index: int, status: Optional[float] = None) -> Tensor matrix after this method is kept in mixed state without knowing the measuremet resuslts + .. note:: + This helper is qubit-only (d=2). For qudit (d>2), use explicit projectors/Kraus operators. - - :param index: the qubit for the z-basis measurement + :param index: the site index for the Z-basis measurement :type index: int - :return: 0 or 1 for z measurement on up and down freedom + :return: 0 or 1 for Z-basis measurement outcome :rtype: Tensor """ return self.general_kraus( # type: ignore @@ -967,8 +1073,8 @@ def get_quvector(self) -> QuVector: def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor: """ - remaining wavefunction or density matrix on qubits in left, with other qubits - fixed in 0 or 1 indicated by traceout + remaining wavefunction or density matrix on sites in ``left``, with other sites + fixed to given digits (0..d-1) as indicated by ``traceout`` :param traceout: can be jitted :type traceout: Tensor @@ -977,15 +1083,19 @@ def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor :return: _description_ :rtype: Tensor """ - end0, end1 = gates.array_to_tensor(np.array([1.0, 0]), np.array([0, 1.0])) + + def _basis_gate(k_tensor: Any) -> Gate: + vec = backend.one_hot(backend.cast(k_tensor, "int32"), self._d) + vec = backend.cast(vec, dtypestr) + return Gate(vec) + traceout = backend.cast(traceout, dtypestr) nodes, front = self._copy() L = self._nqubits edges = [] for i in range(len(traceout)): if i not in left: - b = traceout[i] - n = gates.Gate((1 - b) * end0 + b * end1) + n = _basis_gate(traceout[i]) nodes.append(n) front[i] ^ n[0] else: @@ -994,8 +1104,7 @@ def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor if self.is_dm: for i in range(len(traceout)): if i not in left: - b = traceout[i] - n = gates.Gate((1 - b) * end0 + b * end1) + n = _basis_gate(traceout[i]) nodes.append(n) front[i + L] ^ n[0] else: diff --git a/tensorcircuit/channels.py b/tensorcircuit/channels.py index 6b2fd959..5eb573c8 100644 --- a/tensorcircuit/channels.py +++ b/tensorcircuit/channels.py @@ -3,18 +3,17 @@ """ import sys -from typing import Any, Sequence, Union, Optional, Dict from functools import partial -import numpy as np +from typing import Any, Sequence, Union, Optional, Dict +import numpy as np from . import cons +from . import gates from . import interfaces from .cons import backend, dtypestr -from . import gates from .gates import array_to_tensor - thismodule = sys.modules[__name__] Gate = gates.Gate @@ -484,6 +483,8 @@ def _collect_channels() -> Sequence[str]: channels = _collect_channels() + + # channels = ["depolarizing", "amplitudedamping", "reset", "phasedamping"] @@ -694,7 +695,7 @@ def choi_to_kraus( output_dim = _safe_sqrt(dim[1]) # Get eigen-decomposition of Choi-matrix - e, v = backend.eigh(choi) # value of e is from minimal to maxmal + e, v = backend.eigh(choi) # value of e is from minimal to maxmal e = backend.real(e) v = backend.transpose(v) @@ -790,7 +791,7 @@ def super_to_kraus(superop: Matrix) -> Matrix: argnums=[0], gate_to_tensor=True, ) -def is_hermitian_matrix(mat: Matrix, rtol: float = 1e-8, atol: float = 1e-5): +def is_hermitian_matrix(mat: Matrix, rtol: float = 1e-8, atol: float = 1e-5) -> bool: """ Test if an array is a Hermitian matrix @@ -922,7 +923,7 @@ def evol_superop(density_matrix: Matrix, superop: Matrix) -> Matrix: ) def check_rep_transformation( kraus: Sequence[Gate], density_matrix: Matrix, verbose: bool = False -): +) -> None: """ Check the convertation between those representations. diff --git a/tensorcircuit/circuit.py b/tensorcircuit/circuit.py index b18bdf49..43c8056c 100644 --- a/tensorcircuit/circuit.py +++ b/tensorcircuit/circuit.py @@ -1,32 +1,35 @@ """ -Quantum circuit: the state simulator +Quantum circuit: the state simulator. +Supports qubit (d=2) and qudit (3 <= d <= 36) systems. + For string-encoded samples/counts, digits use 0–9A–Z where A=10, …, Z=35. """ # pylint: disable=invalid-name -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple from functools import reduce from operator import add +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple import numpy as np import tensornetwork as tn -from . import gates from . import channels -from .cons import backend, contractor, dtypestr, npdtype +from . import gates +from .basecircuit import BaseCircuit +from .cons import backend, contractor, dtypestr, npdtype, _ALPHABET +from .gates import Gate from .quantum import QuOperator, identity from .simplify import _full_light_cone_cancel -from .basecircuit import BaseCircuit -Gate = gates.Gate Tensor = Any class Circuit(BaseCircuit): - """ + r""" ``Circuit`` class. - Simple usage demo below. + Simple usage demos below. + Qubit quick example: .. code-block:: python c = tc.Circuit(3) @@ -35,6 +38,14 @@ class Circuit(BaseCircuit): c.RX(2, theta=tc.num_to_tensor(1.)) c.expectation([tc.gates.z(), (2, )]) # 0.54 + Qudit quick example (d=12): + .. code-block:: python + + c = tc.Circuit(2, d=12) + c.H(0) + c.X(0, 1) + s, prob = c.measure(0, with_prob=True) + # For d <= 36, string samples use base-d characters 0–9A–Z (A=10, ...). """ is_dm = False @@ -42,17 +53,20 @@ class Circuit(BaseCircuit): def __init__( self, nqubits: int, + dim: Optional[int] = None, inputs: Optional[Tensor] = None, mps_inputs: Optional[QuOperator] = None, split: Optional[Dict[str, Any]] = None, ) -> None: - """ + r""" Circuit object based on state simulator. :param nqubits: The number of qubits in the circuit. :type nqubits: int + :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36. + :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system. :param inputs: If not None, the initial state of the circuit is taken as ``inputs`` - instead of :math:`\\vert 0\\rangle^n` qubits, defaults to None. + instead of :math:`\vert 0 \rangle^n` qubits, defaults to None. :type inputs: Optional[Tensor], optional :param mps_inputs: QuVector for a MPS like initial wavefunction. :type mps_inputs: Optional[QuOperator] @@ -60,6 +74,7 @@ def __init__( ``max_singular_values`` and ``max_truncation_err``. :type split: Optional[Dict[str, Any]] """ + self._validate_dim(dim=dim) self.inputs = inputs self.mps_inputs = mps_inputs self.split = split @@ -67,21 +82,22 @@ def __init__( self.circuit_param = { "nqubits": nqubits, + "dim": dim, "inputs": inputs, "mps_inputs": mps_inputs, "split": split, } if (inputs is None) and (mps_inputs is None): - nodes = self.all_zero_nodes(nqubits) + nodes = self.all_zero_nodes(nqubits, d=self._d) self._front = [n.get_edge(0) for n in nodes] elif inputs is not None: # provide input function inputs = backend.convert_to_tensor(inputs) inputs = backend.cast(inputs, dtype=dtypestr) inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = int(np.log(N) / np.log(self._d)) assert n == nqubits or n == 2 * nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) inputs = Gate(inputs) nodes = [inputs] self._front = [inputs.get_edge(i) for i in range(n)] @@ -178,27 +194,14 @@ def mid_measurement(self, index: int, keep: int = 0) -> Tensor: :param index: The index of qubit that the Z direction postselection applied on. :type index: int - :param keep: 0 for spin up, 1 for spin down, defaults to be 0. + :param keep: the post-selected digit in {0, ..., d-1}, defaults to be 0. :type keep: int, optional """ # normalization not guaranteed - # assert keep in [0, 1] - if keep < 0.5: - gate = np.array( - [ - [1.0], - [0.0], - ], - dtype=npdtype, - ) - else: - gate = np.array( - [ - [0.0], - [1.0], - ], - dtype=npdtype, - ) + gate = np.array( + [[0.0] if _idx != keep else [1.0] for _idx in range(self._d)], + dtype=npdtype, + ) mg1 = tn.Node(gate) mg2 = tn.Node(gate) @@ -250,6 +253,8 @@ def depolarizing2( :return: Returns 0.0. The function modifies the circuit in place. :rtype: float """ + # This channel API is currently qubit-only (d=2). For d>2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() if status is None: status = backend.implicit_randu()[0] g = backend.cond( @@ -420,6 +425,8 @@ def index2gate(r: Tensor, kraus: Sequence[Tensor]) -> Tensor: r = backend.cast(r, dtype=dtypestr) return reduce(add, [r[i] * kraus[i] for i in range(l)]) + # This channel API is currently qubit-only (d=2). For d>2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() return self._unitary_kraus_template( kraus, *index, @@ -479,7 +486,7 @@ def step_function(x: Tensor) -> Tensor: if get_gate_from_index is None: raise ValueError("no `get_gate_from_index` implementation is provided") g = get_gate_from_index(r, kraus) - g = backend.reshape(g, [2 for _ in range(sites * 2)]) + g = backend.reshape(g, [self._d for _ in range(sites * 2)]) self.any(*index, unitary=g, name=name) # type: ignore return r @@ -503,8 +510,8 @@ def calculate_kraus_p(i: Tensor) -> Tensor: # self._copy seems slower than self._copy_state, but anyway the building time is unacceptable lnewnodes, lnewfront = self._copy(conj=True) kraus_i = backend.switch(i, kraus_tensor_f) - k = gates.Gate(kraus_i) - kc = gates.Gate(backend.conj(kraus_i)) + k = Gate(kraus_i) + kc = Gate(backend.conj(kraus_i)) # begin connect for ind, j in enumerate(index): newfront[j] ^ k[ind + sites] @@ -587,9 +594,9 @@ def calculate_kraus_p(i: int) -> Tensor: # i: Tensor as int of shape [] # kraus_i = backend.switch(i, kraus_tensor_f) kraus_i = kraus_tensor[i] - dm = gates.Gate(ntensor) - k = gates.Gate(kraus_i) - kc = gates.Gate(backend.conj(kraus_i)) + dm = Gate(ntensor) + k = Gate(kraus_i) + kc = Gate(backend.conj(kraus_i)) # begin connect for ind in range(sites): dm[ind] ^ k[ind + sites] @@ -637,6 +644,8 @@ def general_kraus( when the random number will be generated automatically :type status: Optional[float], optional """ + # This channel API is currently qubit-only (d=2). For d>2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() return self._general_kraus_2( kraus, *index, status=status, with_prob=with_prob, name=name ) @@ -654,6 +663,8 @@ def apply( name: Optional[str] = None, **vars: float, ) -> None: + # This channel API is currently qubit-only (d=2). For d>2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() kraus = krausf(**vars) if not is_unitary: self.apply_general_kraus(kraus, *index, status=status, name=name) @@ -679,8 +690,10 @@ def _meta_apply_channels(cls) -> None: doc = """ Apply %s quantum channel on the circuit. See :py:meth:`tensorcircuit.channels.%schannel` + These channel shorthands are qubit-only (d=2). + For qudit d>2, prefer explicit Kraus operators compatible with d. NotImplemented for qudit now. - :param index: Qubit number that the gate applies on. + :param index: Site index that the gate applies on. :type index: int. :param status: uniform external random number between 0 and 1 :type status: Tensor @@ -737,8 +750,8 @@ def get_quoperator(self) -> QuOperator: :return: ``QuOperator`` object for the circuit unitary (open indices for the input state) :rtype: QuOperator """ - mps = identity([2 for _ in range(self._nqubits)]) - c = Circuit(self._nqubits) + mps = identity([self._d for _ in range(self._nqubits)]) + c = Circuit(self._nqubits, self._d) ns, es = self._copy() c._nodes = ns c._front = es @@ -758,8 +771,8 @@ def matrix(self) -> Tensor: :return: The circuit unitary matrix :rtype: Tensor """ - mps = identity([2 for _ in range(self._nqubits)]) - c = Circuit(self._nqubits) + mps = identity([self._d for _ in range(self._nqubits)]) + c = Circuit(self._nqubits, self._d) ns, es = self._copy() c._nodes = ns c._front = es @@ -772,6 +785,9 @@ def measure_reference( """ Take measurement on the given quantum lines by ``index``. + Return format: + - For d <= 36, the sample is a base-d string using 0–9A–Z (A=10,…). + :Example: >>> c = tc.Circuit(3) @@ -800,10 +816,9 @@ def measure_reference( if i != j: e ^ edge2[i] for i in range(len(sample)): - if sample[i] == "0": - m = np.array([1, 0], dtype=npdtype) - else: - m = np.array([0, 1], dtype=npdtype) + m = np.array([0 for _ in range(self._d)], dtype=npdtype) + m[int(sample[i])] = 1 + nodes1.append(tn.Node(m)) nodes1[-1].get_edge(0) ^ edge1[index[i]] nodes2.append(tn.Node(m)) @@ -814,15 +829,13 @@ def measure_reference( / p * contractor(nodes1, output_edge_order=[edge1[j], edge2[j]]).tensor ) - pu = rho[0, 0] - r = backend.random_uniform([]) - r = backend.real(backend.cast(r, dtypestr)) - if r < backend.real(pu): - sample += "0" - p = p * pu - else: - sample += "1" - p = p * (1 - pu) + probs = backend.real(backend.diagonal(rho)) + probs /= backend.sum(probs) + outcome = np.random.choice(self._d, p=probs) + + sample += _ALPHABET[outcome] + p *= float(probs[outcome]) + if with_prob: return sample, p else: @@ -842,6 +855,10 @@ def expectation( ) -> Tensor: """ Compute the expectation of corresponding operators. + For qudit (d > 2), + ensure that operator tensor shapes are consistent with d (each site contributes two axes of size d). + + Noise shorthand (via noise_conf) is qubit-only; for d>2, use explicit operators. :Example: @@ -883,8 +900,6 @@ def expectation( :return: Tensor with one element :rtype: Tensor """ - from .noisemodel import expectation_noisfy - if noise_conf is None: # if not reuse: # nodes1, edge1 = self._copy() @@ -899,6 +914,10 @@ def expectation( nodes1 = _full_light_cone_cancel(nodes1) return contractor(nodes1).tensor else: + from .noisemodel import expectation_noisfy + + # This channel API is currently qubit-only (d = 2). For d > 2, a NotImplementedError will be raised. + self._not_implemented_for_qudit() return expectation_noisfy( self, *ops, @@ -916,12 +935,14 @@ def expectation( def expectation( *ops: Tuple[tn.Node, List[int]], ket: Tensor, + d: Optional[int] = None, bra: Optional[Tensor] = None, conj: bool = True, normalization: bool = False, ) -> Tensor: """ Compute :math:`\\langle bra\\vert ops \\vert ket\\rangle`. + For qudit systems (d>2), ops must be reshaped with per-site axes of length d. Example 1 (:math:`bra` is same as :math:`ket`) @@ -966,6 +987,8 @@ def expectation( :type ket: Tensor :param bra: :math:`bra`, defaults to None, which is the same as ``ket``. :type bra: Optional[Tensor], optional + :param d: dimension of the circuit (defaults to 2) + :type d: int, optional :param conj: :math:`bra` changes to the adjoint matrix of :math:`bra`, defaults to True. :type conj: bool, optional :param normalization: Normalize the :math:`ket` and :math:`bra`, defaults to False. @@ -974,6 +997,7 @@ def expectation( :return: The result of :math:`\\langle bra\\vert ops \\vert ket\\rangle`. :rtype: Tensor """ + d = 2 if d is None else d if bra is None: bra = ket if isinstance(ket, QuOperator): @@ -987,8 +1011,8 @@ def expectation( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) - op = gates.Gate(op) + op = backend.reshaped(op, d) + op = Gate(op) # need to be changed if isinstance(index, int): index = [index] noe = len(index) @@ -1011,8 +1035,8 @@ def expectation( if conj is True: bra = backend.conj(bra) ket = backend.reshape(ket, [-1]) - ket = backend.reshape2(ket) - bra = backend.reshape2(bra) + ket = backend.reshaped(ket, d) + bra = backend.reshaped(bra, d) n = len(backend.shape_tuple(ket)) ket = Gate(ket) bra = Gate(bra) @@ -1024,8 +1048,8 @@ def expectation( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) - op = gates.Gate(op) + op = backend.reshaped(op, d) + op = Gate(op) # need to be changed if isinstance(index, int): index = [index] noe = len(index) diff --git a/tensorcircuit/cons.py b/tensorcircuit/cons.py index c7db79ac..95fc7637 100644 --- a/tensorcircuit/cons.py +++ b/tensorcircuit/cons.py @@ -17,8 +17,8 @@ import tensornetwork as tn from tensornetwork.backend_contextmanager import get_default_backend -from .backends.numpy_backend import NumpyBackend from .backends import get_backend +from .backends.numpy_backend import NumpyBackend from .simplify import _multi_remove logger = logging.getLogger(__name__) @@ -63,6 +63,9 @@ def sorted_edges(edges: Iterator[tn.Edge]) -> List[tn.Edge]: npdtype = np.complex64 backend: NumpyBackend = get_backend("numpy") contractor = tn.contractors.auto +_ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + # these above lines are just for mypy, it is not very good at evaluating runtime object @@ -204,9 +207,9 @@ def set_dtype(dtype: Optional[str] = None, set_global: bool = True) -> Tuple[str setattr(sys.modules[module], "rdtypestr", rdtype) setattr(sys.modules[module], "npdtype", npdtype) - from .gates import meta_gate + from .gates import set_gates_for - meta_gate() + set_gates_for() return dtype, rdtype @@ -543,7 +546,6 @@ def _get_path_cache_friendly( get_tn_info = partial(_get_path_cache_friendly, algorithm=_identity) - # some contractor setup usages """ import cotengra as ctg diff --git a/tensorcircuit/densitymatrix.py b/tensorcircuit/densitymatrix.py index f819e058..0d4ff5d6 100644 --- a/tensorcircuit/densitymatrix.py +++ b/tensorcircuit/densitymatrix.py @@ -17,7 +17,7 @@ from .circuit import Circuit from .cons import backend, contractor, dtypestr from .basecircuit import BaseCircuit -from .quantum import QuOperator +from .quantum import QuOperator, _infer_num_sites Gate = gates.Gate Tensor = Any @@ -29,6 +29,7 @@ class DMCircuit(BaseCircuit): def __init__( self, nqubits: int, + dim: Optional[int] = None, empty: bool = False, inputs: Optional[Tensor] = None, mps_inputs: Optional[QuOperator] = None, @@ -55,6 +56,7 @@ def __init__( ``max_singular_values`` and ``max_truncation_err``. :type split: Optional[Dict[str, Any]] """ + self._validate_dim(dim=dim) if not empty: if ( (inputs is None) @@ -73,9 +75,9 @@ def __init__( inputs = backend.cast(inputs, dtype=dtypestr) inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = _infer_num_sites(N, self._d) assert n == nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) inputs_gate = Gate(inputs) self._nodes = [inputs_gate] self.coloring_nodes(self._nodes) @@ -94,7 +96,9 @@ def __init__( elif dminputs is not None: dminputs = backend.convert_to_tensor(dminputs) dminputs = backend.cast(dminputs, dtype=dtypestr) - dminputs = backend.reshape(dminputs, [2 for _ in range(2 * nqubits)]) + dminputs = backend.reshape( + dminputs, [self._d for _ in range(2 * nqubits)] + ) dminputs_gate = Gate(dminputs) nodes = [dminputs_gate] self._front = [dminputs_gate.get_edge(i) for i in range(2 * nqubits)] @@ -217,7 +221,7 @@ def apply_general_kraus( dd = dmc.densitymatrix() circuits.append(dd) tensor = reduce(add, circuits) - tensor = backend.reshape(tensor, [2 for _ in range(2 * self._nqubits)]) + tensor = backend.reshape(tensor, [self._d for _ in range(2 * self._nqubits)]) self._nodes = [Gate(tensor)] dangling = [e for e in self._nodes[0]] self._front = dangling @@ -255,7 +259,9 @@ def densitymatrix(self, check: bool = False, reuse: bool = True) -> Tensor: t = contractor(nodes, output_edge_order=d_edges) else: t = nodes[0] - dm = backend.reshape(t.tensor, shape=[2**self._nqubits, 2**self._nqubits]) + dm = backend.reshape( + t.tensor, shape=[self._d**self._nqubits, self._d**self._nqubits] + ) if check: self.check_density_matrix(dm) return dm @@ -274,7 +280,7 @@ def wavefunction(self) -> Tensor: dm = self.densitymatrix() e, v = backend.eigh(dm) np.testing.assert_allclose( - e[:-1], backend.zeros([2**self._nqubits - 1]), atol=1e-5 + e[:-1], backend.zeros([self._d**self._nqubits - 1]), atol=1e-5 ) return v[:, -1] @@ -375,7 +381,7 @@ def apply_general_kraus( # index = [index[0] for _ in range(len(kraus))] super_op = kraus_to_super_gate(kraus) nlegs = 4 * len(index) - super_op = backend.reshape(super_op, [2 for _ in range(nlegs)]) + super_op = backend.reshape(super_op, [self._d for _ in range(nlegs)]) super_op = Gate(super_op) o2i = int(nlegs / 2) r2l = int(nlegs / 4) diff --git a/tensorcircuit/gates/__init__.py b/tensorcircuit/gates/__init__.py new file mode 100644 index 00000000..3cf91edf --- /dev/null +++ b/tensorcircuit/gates/__init__.py @@ -0,0 +1,358 @@ +import os +import sys +import types +from copy import deepcopy +from functools import lru_cache +from importlib import import_module, util +from typing import Any, Optional, List, Union, Dict, Tuple, TypedDict + +import numpy as np + +try: + from numpy import ComplexWarning # type: ignore +except ImportError: # np2.0 compatibility + from numpy.exceptions import ComplexWarning # type: ignore + +import tensornetwork as tn + +from tensorcircuit.cons import backend, dtypestr, npdtype + + +__all__ = [ + "get_gate_module", + "matrix_for_gate", + "bmatrix", + "Gate", + "array_to_tensor", + "num_to_tensor", +] + + +class GateDict(TypedDict): + sgates: List[str] + vgates: List[str] + mpogates: List[str] + gate_aliases: List[List[str]] + + +_qubit_gates: GateDict = { + "sgates": ["i", "x", "y", "z", "h", "t", "s", "td", "sd", "wroot"] + + ["cnot", "cz", "swap", "cy", "ox", "oy", "oz"] + + ["toffoli", "fredkin"], + "vgates": [ + "r", + "cr", + "u", + "cu", + "rx", + "ry", + "rz", + "phase", + "rxx", + "ryy", + "rzz", + "cphase", + "crx", + "cry", + "crz", + "orx", + "ory", + "orz", + "iswap", + "any", + "exp", + "exp1", + ], + "mpogates": ["multicontrol", "mpo"], + "gate_aliases": [ + ["cnot", "cx"], + ["fredkin", "cswap"], + ["toffoli", "ccnot"], + ["toffoli", "ccx"], + ["any", "unitary"], + ["sd", "sdg"], + ["td", "tdg"], + ], +} + +_qudit_gates: GateDict = { + "sgates": ["i", "x", "z", "h", "s"], + "vgates": ["u8", "cphase", "csum", "any"], + "mpogates": [], # ["mpo"], + "gate_aliases": [ + ["any", "unitary"], + ], +} + +_FILE = os.path.join(os.path.dirname(__file__), "qudit_impl.py") + +Tensor = Any +Array = Any +Operator = Any # QuOperator + + +def merge_gate_dicts( + d1: GateDict, d2: GateDict +) -> Tuple[List[str], List[str], List[str], List[List[str]]]: + sgates = sorted(set(d1["sgates"]) | set(d2["sgates"])) + vgates = sorted(set(d1["vgates"]) | set(d2["vgates"])) + mpogates = sorted(set(d1["mpogates"]) | set(d2["mpogates"])) + + seen: set[tuple[str, ...]] = set() + gate_aliases: List[List[str]] = [] + for pair in d1["gate_aliases"] + d2["gate_aliases"]: + tup = tuple(pair) + if tup not in seen: + seen.add(tup) + gate_aliases.append(list(tup)) + + return sgates, vgates, mpogates, gate_aliases + + +sgates, vgates, mpogates, gate_aliases = merge_gate_dicts(_qubit_gates, _qudit_gates) + + +_EXPORTED_NAMES: set[str] = set() + + +def _populate_namespace(mod: types.ModuleType) -> None: + """ + Export non-dunder attributes from the gate implementation module to the current module namespace, + for example, allowing users to directly access `tc.gates._zz_matrix`. + It will clean up old exports before switching dimensions to avoid residual pollution. + """ + global _EXPORTED_NAMES + for name in _EXPORTED_NAMES: + globals().pop(name, None) + _EXPORTED_NAMES = set() + + for name in dir(mod): + if name.startswith("__"): + continue + globals()[name] = getattr(mod, name) + _EXPORTED_NAMES.add(name) + + +@lru_cache(maxsize=None) +def _load_qubit() -> types.ModuleType: + return import_module(".qubit_impl", package=__name__) + + +@lru_cache(maxsize=None) +def _load_qudit(dim: int) -> types.ModuleType: + mod_name = f"tensorcircuit.gates.qudit_impl_d{dim}" + spec = util.spec_from_file_location(mod_name, _FILE) + if spec is None: + raise ImportError(f"Cannot build ModuleSpec for {mod_name} from {_FILE}") + + mod = util.module_from_spec(spec) + loader = spec.loader + if loader is None: + raise ImportError(f"ModuleSpec has no loader for {mod_name} from {_FILE}") + + sys.modules[mod_name] = mod + loader.exec_module(mod) + mod.meta_gate(dim), mod.meta_vgate(dim) + return mod + + +def set_gates_for(dim: Optional[int] = None, export: bool = True) -> types.ModuleType: + """ + Select and load the specified dimension gates implementation module. + When export=True (default), + it will export the symbols to the tensorcircuit.gates namespace (all namespaces will be polluted); + when export=False, + it will only return the module object without modifying the global namespace (suitable for multiple d coexistence). + """ + dim = 2 if dim is None else dim + if not isinstance(dim, int) or dim < 2: + raise ValueError("Dimension must be an integer >=2.") + + mod = _load_qubit() if dim == 2 else _load_qudit(dim) + if export: + _populate_namespace(mod) + + return mod + + +def get_gate_module(d: int) -> types.ModuleType: + return set_gates_for(d, export=False) + + +class Gate(tn.Node): # type: ignore + """ + Wrapper of tn.Node, quantum gate + """ + + def __repr__(self) -> str: + """Formatted output of Gate + + :Example: + + >>> tc.gates.ry(0.5) + >>> # OR + >>> print(repr(tc.gates.ry(0.5))) + Gate( + name: '__unnamed_node__', + tensor: + , + edges: [ + Edge(Dangling Edge)[0], + Edge(Dangling Edge)[1] + ]) + """ + sp = " " * 4 + edges = self.get_all_edges() + edges_text = [edge.__repr__().replace("\n", "").strip() for edge in edges] + edges_out = f"[" + f"\n{sp * 2}" + f",\n{sp * 2}".join(edges_text) + f"\n{sp}]" + tensor_out = f"\n{sp * 2}" + self.tensor.__repr__().replace("\n", f"\n{sp * 2}") + return ( + f"{self.__class__.__name__}(\n" + f"{sp}name: {self.name!r},\n" + f"{sp}tensor:{tensor_out},\n" + f"{sp}edges: {edges_out})" + ) + + def copy(self, conjugate: bool = False) -> "Gate": + result = super().copy(conjugate=conjugate) + result.__class__ = Gate + return result # type: ignore + + +def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> Any: + r""" + Convert the inputs to Tensor with specified dtype. + + :Example: + + >>> from tensorcircuit.gates import num_to_tensor + >>> # OR + >>> from tensorcircuit.gates import array_to_tensor + >>> + >>> x, y, z = 0, 0.1, np.array([1]) + >>> + >>> tc.set_backend('numpy') + numpy_backend + >>> num_to_tensor(x, y, z) + [array(0.+0.j, dtype=complex64), array(0.1+0.j, dtype=complex64), array([1.+0.j], dtype=complex64)] + >>> + >>> tc.set_backend('tensorflow') + tensorflow_backend + >>> num_to_tensor(x, y, z) + [, + , + ] + >>> + >>> tc.set_backend('pytorch') + pytorch_backend + >>> num_to_tensor(x, y, z) + [tensor(0.+0.j), tensor(0.1000+0.j), tensor([1.+0.j])] + >>> + >>> tc.set_backend('jax') + jax_backend + >>> num_to_tensor(x, y, z) + [DeviceArray(0.+0.j, dtype=complex64), + DeviceArray(0.1+0.j, dtype=complex64), + DeviceArray([1.+0.j], dtype=complex64)] + + :param num: inputs + :type num: Union[float, Tensor] + :param dtype: dtype of the output Tensors + :type dtype: str, optional + :return: List of Tensors + :rtype: List[Tensor] + """ + # TODO(@YHPeter): fix __doc__ for same function with different names + + l = [] + if dtype is None: + dtype = dtypestr + for n in num: + if not backend.is_tensor(n): + l.append(backend.cast(backend.convert_to_tensor(n), dtype=dtype)) + else: + l.append(backend.cast(n, dtype=dtype)) + if len(l) == 1: + return l[0] + return l + + +array_to_tensor = num_to_tensor + + +def gate_wrapper(m: Tensor, n: Optional[str] = None) -> Gate: + if n is None: + n = "unknowngate" + m = m.astype(npdtype) + return Gate(deepcopy(m), name=n) + + +def matrix_for_gate(gate: Gate, tol: float = 1e-6) -> Tensor: + r""" + Convert Gate to numpy array. + + :Example: + + >>> gate = tc.gates.r_gate() + >>> tc.gates.matrix_for_gate(gate) + array([[1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j]], dtype=complex64) + + :param gate: input Gate + :type gate: Gate + :return: Corresponding Tensor + :rtype: Tensor + """ + + t = gate.tensor + t = backend.reshapem(t) + t = backend.numpy(t) + t.real[abs(t.real) < tol] = 0.0 + t.imag[abs(t.imag) < tol] = 0.0 + return t + + +def bmatrix(a: Array) -> str: + r""" + Returns a :math:`\LaTeX` bmatrix. + + :Example: + + >>> gate = tc.gates.r_gate() + >>> array = tc.gates.matrix_for_gate(gate) + >>> array + array([[1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j]], dtype=complex64) + >>> print(tc.gates.bmatrix(array)) + \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} + + Formatted Display: + + .. math:: + \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} + + :param a: 2D numpy array + :type a: np.array + :raises ValueError: ValueError("bmatrix can at most display two dimensions") + :return: :math:`\LaTeX`-formatted string for bmatrix of the array a + :rtype: str + """ + # Adopted from https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix/17131750 + + if len(a.shape) > 2: + raise ValueError("bmatrix can at most display two dimensions") + lines = str(a).replace("[", "").replace("]", "").splitlines() + rv = [r"\begin{bmatrix}"] + rv += [" " + " & ".join(l.split()) + r"\\" for l in lines] + rv[-1] = rv[-1][:-2] + rv += [r" \end{bmatrix}"] + return "".join(rv) + + +# --- Default behavior: Register and export qubit gates when importing tensorcircuit.gates --- +try: + _DEFAULT_GATE_MODULE: types.ModuleType = set_gates_for(2, export=True) +except Exception: + pass diff --git a/tensorcircuit/gates.py b/tensorcircuit/gates/qubit_impl.py similarity index 79% rename from tensorcircuit/gates.py rename to tensorcircuit/gates/qubit_impl.py index 459666ab..066a0678 100644 --- a/tensorcircuit/gates.py +++ b/tensorcircuit/gates/qubit_impl.py @@ -6,21 +6,17 @@ import warnings from copy import deepcopy from functools import reduce, partial -from typing import Any, Callable, Optional, Sequence, List, Union, Tuple from operator import mul +from typing import Any, Callable, Optional, Sequence, List, Union, Tuple import numpy as np - -try: - from numpy import ComplexWarning -except ImportError: # np2.0 compatibility - from numpy.exceptions import ComplexWarning # type: ignore - import tensornetwork as tn from scipy.stats import unitary_group -from .cons import backend, dtypestr, npdtype, runtime_backend -from .utils import arg_alias +from ..cons import backend, dtypestr, npdtype, runtime_backend +from ..utils import arg_alias +from . import ComplexWarning +from . import Gate, num_to_tensor, array_to_tensor thismodule = sys.modules[__name__] @@ -34,6 +30,12 @@ plus_state = 1.0 / np.sqrt(2) * (zero_state + one_state) minus_state = 1.0 / np.sqrt(2) * (zero_state - one_state) +# Common elements as np.ndarray objects +_i00 = np.array([[1.0, 0.0], [0.0, 0.0]]) +_i01 = np.array([[0.0, 1.0], [0.0, 0.0]]) +_i10 = np.array([[0.0, 0.0], [1.0, 0.0]]) +_i11 = np.array([[0.0, 0.0], [0.0, 1.0]]) + # Common single qubit gates as np.ndarray objects _h_matrix = 1 / np.sqrt(2) * np.array([[1.0, 1.0], [1.0, -1.0]]) _i_matrix = np.array([[1.0, 0.0], [0.0, 1.0]]) @@ -48,7 +50,6 @@ * np.array([[1, -1 / np.sqrt(2) * (1 + 1.0j)], [1 / np.sqrt(2) * (1 - 1.0j), 1]]) ) - _ii_matrix = np.kron(_i_matrix, _i_matrix) _xx_matrix = np.kron(_x_matrix, _x_matrix) _yy_matrix = np.kron(_y_matrix, _y_matrix) @@ -68,7 +69,6 @@ _zx_matrix = np.kron(_z_matrix, _x_matrix) _zy_matrix = np.kron(_z_matrix, _y_matrix) - _cnot_matrix = np.array( [ [1.0, 0.0, 0.0, 0.0], @@ -105,7 +105,6 @@ ] ) - _toffoli_matrix = np.array( [ [1.0, 0, 0, 0, 0, 0, 0, 0], @@ -141,121 +140,79 @@ def __rmul__(self: tn.Node, lvalue: Union[float, complex]) -> "Gate": tn.Node.__rmul__ = __rmul__ -class Gate(tn.Node): # type: ignore - """ - Wrapper of tn.Node, quantum gate - """ - - def __repr__(self) -> str: - """Formatted output of Gate - - :Example: - - >>> tc.gates.ry(0.5) - >>> # OR - >>> print(repr(tc.gates.ry(0.5))) - Gate( - name: '__unnamed_node__', - tensor: - , - edges: [ - Edge(Dangling Edge)[0], - Edge(Dangling Edge)[1] - ]) - """ - sp = " " * 4 - edges = self.get_all_edges() - edges_text = [edge.__repr__().replace("\n", "").strip() for edge in edges] - edges_out = f"[" + f"\n{sp*2}" + f",\n{sp*2}".join(edges_text) + f"\n{sp}]" - tensor_out = f"\n{sp*2}" + self.tensor.__repr__().replace("\n", f"\n{sp*2}") - return ( - f"{self.__class__.__name__}(\n" - f"{sp}name: {self.name!r},\n" - f"{sp}tensor:{tensor_out},\n" - f"{sp}edges: {edges_out})" - ) - - def copy(self, conjugate: bool = False) -> "Gate": - result = super().copy(conjugate=conjugate) - result.__class__ = Gate - return result # type: ignore - - -def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> Any: - r""" - Convert the inputs to Tensor with specified dtype. - - :Example: - - >>> from tensorcircuit.gates import num_to_tensor - >>> # OR - >>> from tensorcircuit.gates import array_to_tensor - >>> - >>> x, y, z = 0, 0.1, np.array([1]) - >>> - >>> tc.set_backend('numpy') - numpy_backend - >>> num_to_tensor(x, y, z) - [array(0.+0.j, dtype=complex64), array(0.1+0.j, dtype=complex64), array([1.+0.j], dtype=complex64)] - >>> - >>> tc.set_backend('tensorflow') - tensorflow_backend - >>> num_to_tensor(x, y, z) - [, - , - ] - >>> - >>> tc.set_backend('pytorch') - pytorch_backend - >>> num_to_tensor(x, y, z) - [tensor(0.+0.j), tensor(0.1000+0.j), tensor([1.+0.j])] - >>> - >>> tc.set_backend('jax') - jax_backend - >>> num_to_tensor(x, y, z) - [DeviceArray(0.+0.j, dtype=complex64), - DeviceArray(0.1+0.j, dtype=complex64), - DeviceArray([1.+0.j], dtype=complex64)] - - :param num: inputs - :type num: Union[float, Tensor] - :param dtype: dtype of the output Tensors - :type dtype: str, optional - :return: List of Tensors - :rtype: List[Tensor] - """ - # TODO(@YHPeter): fix __doc__ for same function with different names - - l = [] - if not dtype: - dtype = dtypestr - for n in num: - if not backend.is_tensor(n): - l.append(backend.cast(backend.convert_to_tensor(n), dtype=dtype)) - else: - l.append(backend.cast(n, dtype=dtype)) - if len(l) == 1: - return l[0] - return l - - -array_to_tensor = num_to_tensor - - -def gate_wrapper(m: Tensor, n: Optional[str] = None) -> Gate: - if not n: - n = "unknowngate" - m = m.astype(npdtype) - return Gate(deepcopy(m), name=n) +# def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> Any: +# r""" +# Convert the inputs to Tensor with specified dtype. +# +# :Example: +# +# >>> from tensorcircuit.gates import num_to_tensor +# >>> # OR +# >>> from tensorcircuit.gates import array_to_tensor +# >>> +# >>> x, y, z = 0, 0.1, np.array([1]) +# >>> +# >>> tc.set_backend('numpy') +# numpy_backend +# >>> num_to_tensor(x, y, z) +# [array(0.+0.j, dtype=complex64), array(0.1+0.j, dtype=complex64), array([1.+0.j], dtype=complex64)] +# >>> +# >>> tc.set_backend('tensorflow') +# tensorflow_backend +# >>> num_to_tensor(x, y, z) +# [, +# , +# ] +# >>> +# >>> tc.set_backend('pytorch') +# pytorch_backend +# >>> num_to_tensor(x, y, z) +# [tensor(0.+0.j), tensor(0.1000+0.j), tensor([1.+0.j])] +# >>> +# >>> tc.set_backend('jax') +# jax_backend +# >>> num_to_tensor(x, y, z) +# [DeviceArray(0.+0.j, dtype=complex64), +# DeviceArray(0.1+0.j, dtype=complex64), +# DeviceArray([1.+0.j], dtype=complex64)] +# +# :param num: inputs +# :type num: Union[float, Tensor] +# :param dtype: dtype of the output Tensors +# :type dtype: str, optional +# :return: List of Tensors +# :rtype: List[Tensor] +# """ +# # TODO(@YHPeter): fix __doc__ for same function with different names +# +# l = [] +# if dtype is None: +# dtype = dtypestr +# for n in num: +# if not backend.is_tensor(n): +# l.append(backend.cast(backend.convert_to_tensor(n), dtype=dtype)) +# else: +# l.append(backend.cast(n, dtype=dtype)) +# if len(l) == 1: +# return l[0] +# return l +# +# +# array_to_tensor = num_to_tensor + + +# def gate_wrapper(m: Tensor, n: Optional[str] = None) -> Gate: +# if n is None: +# n = "unknowngate" +# m = m.astype(npdtype) +# return Gate(deepcopy(m), name=n) class GateF: def __init__( self, m: Tensor, n: Optional[str] = None, ctrl: Optional[List[int]] = None ): - if not n: + if n is None: n = "unknowngate" self.m = m self.n = n @@ -310,7 +267,7 @@ def f(*args: Any, **kws: Any) -> Any: return Gate(cu, name="c" + self.n) - if not self.ctrl: + if self.ctrl is None: ctrl = [1] else: ctrl = [1] + self.ctrl @@ -330,7 +287,7 @@ def f(*args: Any, **kws: Any) -> Any: # TODO(@refraction-ray): ctrl convention to be finally determined return Gate(ocu, name="o" + self.n) - if not self.ctrl: + if self.ctrl is None: ctrl = [0] else: ctrl = [0] + self.ctrl @@ -349,7 +306,7 @@ def __init__( n: Optional[str] = None, ctrl: Optional[List[int]] = None, ): - if not n: + if n is None: n = "unknowngate" self.f = f self.n = n @@ -404,66 +361,66 @@ def meta_gate() -> None: pauli_gates = [i(), x(), y(), z()] # type: ignore -def matrix_for_gate(gate: Gate, tol: float = 1e-6) -> Tensor: - r""" - Convert Gate to numpy array. - - :Example: - - >>> gate = tc.gates.r_gate() - >>> tc.gates.matrix_for_gate(gate) - array([[1.+0.j, 0.+0.j], - [0.+0.j, 1.+0.j]], dtype=complex64) - - :param gate: input Gate - :type gate: Gate - :return: Corresponding Tensor - :rtype: Tensor - """ - - t = gate.tensor - t = backend.reshapem(t) - t = backend.numpy(t) - t.real[abs(t.real) < tol] = 0.0 - t.imag[abs(t.imag) < tol] = 0.0 - return t - - -def bmatrix(a: Array) -> str: - r""" - Returns a :math:`\LaTeX` bmatrix. - - :Example: - - >>> gate = tc.gates.r_gate() - >>> array = tc.gates.matrix_for_gate(gate) - >>> array - array([[1.+0.j, 0.+0.j], - [0.+0.j, 1.+0.j]], dtype=complex64) - >>> print(tc.gates.bmatrix(array)) - \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} - - Formatted Display: - - .. math:: - \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} - - :param a: 2D numpy array - :type a: np.array - :raises ValueError: ValueError("bmatrix can at most display two dimensions") - :return: :math:`\LaTeX`-formatted string for bmatrix of the array a - :rtype: str - """ - # Adopted from https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix/17131750 - - if len(a.shape) > 2: - raise ValueError("bmatrix can at most display two dimensions") - lines = str(a).replace("[", "").replace("]", "").splitlines() - rv = [r"\begin{bmatrix}"] - rv += [" " + " & ".join(l.split()) + r"\\" for l in lines] - rv[-1] = rv[-1][:-2] - rv += [r" \end{bmatrix}"] - return "".join(rv) +# def matrix_for_gate(gate: Gate, tol: float = 1e-6) -> Tensor: +# r""" +# Convert Gate to numpy array. +# +# :Example: +# +# >>> gate = tc.gates.r_gate() +# >>> tc.gates.matrix_for_gate(gate) +# array([[1.+0.j, 0.+0.j], +# [0.+0.j, 1.+0.j]], dtype=complex64) +# +# :param gate: input Gate +# :type gate: Gate +# :return: Corresponding Tensor +# :rtype: Tensor +# """ +# +# t = gate.tensor +# t = backend.reshapem(t) +# t = backend.numpy(t) +# t.real[abs(t.real) < tol] = 0.0 +# t.imag[abs(t.imag) < tol] = 0.0 +# return t + + +# def bmatrix(a: Array) -> str: +# r""" +# Returns a :math:`\LaTeX` bmatrix. +# +# :Example: +# +# >>> gate = tc.gates.r_gate() +# >>> array = tc.gates.matrix_for_gate(gate) +# >>> array +# array([[1.+0.j, 0.+0.j], +# [0.+0.j, 1.+0.j]], dtype=complex64) +# >>> print(tc.gates.bmatrix(array)) +# \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} +# +# Formatted Display: +# +# .. math:: +# \begin{bmatrix} 1.+0.j & 0.+0.j\\ 0.+0.j & 1.+0.j \end{bmatrix} +# +# :param a: 2D numpy array +# :type a: np.array +# :raises ValueError: ValueError("bmatrix can at most display two dimensions") +# :return: :math:`\LaTeX`-formatted string for bmatrix of the array a +# :rtype: str +# """ +# # Adopted from https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix/17131750 +# +# if len(a.shape) > 2: +# raise ValueError("bmatrix can at most display two dimensions") +# lines = str(a).replace("[", "").replace("]", "").splitlines() +# rv = [r"\begin{bmatrix}"] +# rv += [" " + " & ".join(l.split()) + r"\\" for l in lines] +# rv[-1] = rv[-1][:-2] +# rv += [r" \end{bmatrix}"] +# return "".join(rv) def phase_gate(theta: float = 0) -> Gate: @@ -483,7 +440,7 @@ def phase_gate(theta: float = 0) -> Gate: :rtype: Gate """ theta = array_to_tensor(theta) - i00, i11 = array_to_tensor(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, 1]])) + i00, i11 = array_to_tensor(_i00, _i11) unitary = i00 + backend.exp(1.0j * theta) * i11 return Gate(unitary) @@ -512,7 +469,7 @@ def get_u_parameter(m: Tensor) -> Tuple[float, float, float]: return theta, phi, lbd -def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: +def u_gate(theta: float = 0.0, phi: float = 0.0, lbd: float = 0.0) -> Gate: r""" IBMQ U gate following the converntion of OpenQASM3.0. See `OpenQASM doc `_ @@ -533,12 +490,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: :rtype: Gate """ theta, phi, lbd = array_to_tensor(theta, phi, lbd) - i00, i01, i10, i11 = array_to_tensor( - np.array([[1, 0], [0, 0]]), - np.array([[0, 1], [0, 0]]), - np.array([[0, 0], [1, 0]]), - np.array([[0, 0], [0, 1]]), - ) + i00, i01, i10, i11 = array_to_tensor(_i00, _i01, _i10, _i11) unitary = ( backend.cos(theta / 2) * i00 - backend.exp(1.0j * lbd) * backend.sin(theta / 2) * i01 @@ -548,7 +500,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: return Gate(unitary) -def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def r_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" General single qubit rotation gate @@ -582,7 +534,7 @@ def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: # r = r_gate -def rx_gate(theta: float = 0) -> Gate: +def rx_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`x` axis. @@ -603,7 +555,7 @@ def rx_gate(theta: float = 0) -> Gate: # rx = rx_gate -def ry_gate(theta: float = 0) -> Gate: +def ry_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`y` axis. @@ -624,7 +576,7 @@ def ry_gate(theta: float = 0) -> Gate: # ry = ry_gate -def rz_gate(theta: float = 0) -> Gate: +def rz_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`z` axis. @@ -645,7 +597,7 @@ def rz_gate(theta: float = 0) -> Gate: # rz = rz_gate -def rgate_theoretical(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def rgate_theoretical(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" Rotation gate implemented by matrix exponential. The output is the same as `rgate`. @@ -723,7 +675,7 @@ def iswap_gate(theta: float = 1.0) -> Gate: # iswap = iswap_gate -def cr_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def cr_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" Controlled rotation gate. When the control qubit is 1, `rgate` is applied to the target qubit. @@ -825,6 +777,8 @@ def exponential_gate(unitary: Tensor, theta: float, name: str = "none") -> Gate: exp_gate = exponential_gate + + # exp = exponential_gate @@ -933,7 +887,7 @@ def multicontrol_gate(unitary: Tensor, ctrl: Union[int, Sequence[int]] = 1) -> O for i in range(1, len(nodes) - 1): nodes[i][3] ^ nodes[i + 1][0] - from .quantum import QuOperator + from ..quantum import QuOperator l = int((len(nodes[-1].edges) - 1) / 2) gate = QuOperator( diff --git a/tensorcircuit/gates/qudit_impl.py b/tensorcircuit/gates/qudit_impl.py new file mode 100644 index 00000000..163edc78 --- /dev/null +++ b/tensorcircuit/gates/qudit_impl.py @@ -0,0 +1,485 @@ +""" +Declarations of single-qubit and two-qubit gates and their corresponding matrix. +""" + +import sys +from typing import Any, Callable, Optional, Union + +import numpy as np +from sympy import mod_inverse, Mod + +import tensornetwork as tn + +from ..cons import backend, dtypestr, npdtype +from . import Gate, array_to_tensor + + +thismodule = sys.modules[__name__] + +Tensor = Any +Array = Any +Operator = Any # QuOperator + + +def _is_prime(n: int) -> bool: + if n < 2: + return False + if n in (2, 3, 5, 7): + return True + if n % 2 == 0 or n % 3 == 0: + return False + + r = int(n**0.5) + 1 + for i in range(5, r, 6): + if n % i == 0 or n % (i + 2) == 0: + return False + return True + + +def _i_matrix_func(d: int) -> Tensor: + matrix = np.zeros((d, d), dtype=npdtype) + for i in range(d): + matrix[i, i] = 1.0 + return matrix + + +def _x_matrix_func(d: int) -> Tensor: + r""" + X_d\ket{j} = \ket{(j + 1) mod d} + """ + matrix = np.zeros((d, d), dtype=npdtype) + for j in range(d): + matrix[(j + 1) % d, j] = 1.0 + return matrix + + +def _z_matrix_func(d: int, omega: float) -> Tensor: + r""" + Z_d\ket{j} = \omega^{j}\ket{j} + """ + matrix = np.zeros((d, d), dtype=npdtype) + for j in range(d): + matrix[j, j] = omega**j + return matrix + + +def _h_matrix_func(d: int, omega: float) -> Tensor: + r""" + H_d\ket{j} = \frac{1}{\sqrt{d}}\sum_{k=0}^{d-1}\omega^{jk}\ket{k} + """ + matrix = np.zeros((d, d), dtype=npdtype) + for j in range(d): + for k in range(d): + matrix[j, k] = omega ** (j * k) / np.sqrt(d) + return matrix.T + + +def _s_matrix_func(d: int, omega: float) -> Tensor: + r""" + S_d\ket{j} = \omega^{j(j + p_d) / 2}\ket{j} + """ + _pd = 0 if d % 2 == 0 else 1 + matrix = np.zeros((d, d), dtype=complex) + for j in range(d): + phase_exp = (j * (j + _pd)) / 2 + matrix[j, j] = omega**phase_exp + return matrix + + +class GateMatrices: + def __init__(self, d: int): + assert d > 2 + self.d = d + self.omega = np.exp(2j * np.pi / self.d) + + # Common single qubit gates as np.ndarray objects + self._i_matrix = _i_matrix_func(self.d) + self._x_matrix = _x_matrix_func(self.d) + self._z_matrix = _z_matrix_func(self.d, self.omega) + self._h_matrix = _h_matrix_func(self.d, self.omega) + + self._ii_matrix = np.kron(self._i_matrix, self._i_matrix) + self._xx_matrix = np.kron(self._x_matrix, self._x_matrix) + self._zz_matrix = np.kron(self._z_matrix, self._z_matrix) + + self._ix_matrix = np.kron(self._i_matrix, self._x_matrix) + self._iz_matrix = np.kron(self._i_matrix, self._z_matrix) + self._xi_matrix = np.kron(self._x_matrix, self._i_matrix) + self._zi_matrix = np.kron(self._z_matrix, self._i_matrix) + + self._xz_matrix = np.kron(self._x_matrix, self._z_matrix) + self._zx_matrix = np.kron(self._z_matrix, self._x_matrix) + + +def meta_gate(dim: int) -> None: + gm = GateMatrices(dim) + for attr in dir(gm): + if not (attr.startswith("_") and attr.endswith("_matrix")): + continue + gate_name = attr[1:-7] + m = getattr(gm, attr) + + if getattr(m, "ndim", None) == 2: + n0, n1 = m.shape + if n0 != n1: + raise ValueError( + f"{gate_name}: gate matrix must be square, got {m.shape}" + ) + + tmp, k = n0, 0 + while tmp % dim == 0 and tmp > 1: + tmp //= dim + k += 1 + if tmp != 1: + raise ValueError(f"{gate_name}: size {n0} is not a power of dim={dim}") + if k >= 1: + m = np.reshape(m, newshape=[dim] * (2 * k)) + else: + if getattr(m, "ndim", 0) % 2 != 0: + raise ValueError( + f"{gate_name}: tensor order must be even, got {m.ndim}" + ) + if any(s != dim for s in m.shape): + raise ValueError( + f"{gate_name}: all tensor dims must equal dim={dim}, got {m.shape}" + ) + + gf = GateF(m, gate_name) + setattr(thismodule, gate_name + "gate", gf) + setattr(thismodule, gate_name + "_gate", gf) + setattr(thismodule, gate_name, gf) + + +def _u8_matrix_func( + d: int, gamma: float = 2.0, z: float = 1.0, eps: float = 0.0 +) -> Tensor: + if not _is_prime(d): + raise ValueError( + f"Dimension d={d} is not prime, U8 gate requires a prime dimension." + ) + if gamma == 0.0: + raise ValueError("gamma must be non-zero") + + vks = [0] * d + if d == 3: + vks = [0, 1, 8] + else: + try: + inv_12 = mod_inverse(12, d) + except ValueError: + raise ValueError( + f"Inverse of 12 mod {d} does not exist. Choose a prime d that does not divide 12." + ) + + for i in range(1, d): + a = inv_12 * i * (gamma + i * (6 * z + (2 * i - 3) * gamma)) + eps * i + vks[i] = Mod(a, d) + + # print(vks) + sum_vks = Mod(sum(vks), d) + if sum_vks != 0: + raise ValueError( + f"Sum of v_k's is not 0 mod {d}. Got {sum_vks}. Check parameters." + ) + + omega = np.exp(2j * np.pi / d) + matrix = np.zeros((d, d), dtype=npdtype) + for j in range(d): + matrix[j, j] = omega ** vks[j] + return matrix + + +def _cphase_matrix_func(d: int, cv: Optional[int] = None) -> Tensor: + r""" + Qudit Controlled-z gate + \ket{r}\ket{s} \rightarrow \omega^{rs}\ket{r}\ket{s} = \ket{r}Z^r\ket{s} + + This gate is also called SUMZ gate, where Z represents Z_d gate. + ┌─ ─┐ + │ I_d 0 0 ... 0 │ + │ 0 Z_d 0 ... 0 │ + SUMZ_d = │ 0 0 Z_d^2 ... 0 │ + │ . . . . . │ + │ 0 0 0 ... Z_d^{d-1} │ + └ ─┘ + """ + size = d**2 + omega = np.exp(2j * np.pi / d) + z_matrix = _z_matrix_func(d=d, omega=omega) + + if cv is None: + z_pows = [np.eye(d, dtype=npdtype)] + for _ in range(1, d): + z_pows.append(z_pows[-1] @ z_matrix) + + matrix = np.zeros((size, size), dtype=npdtype) + for a in range(d): + rs = a * d + matrix[rs : rs + d, rs : rs + d] = z_pows[a] + return matrix + + if not (0 <= cv < d): + raise ValueError(f"cv must be in [0, {d - 1}], got {cv}") + + matrix = np.eye(size, dtype=npdtype) + rs = cv * d + matrix[rs : rs + d, rs : rs + d] = z_matrix + + return matrix + + +def _csum_matrix_func(d: int, cv: Optional[int] = None) -> Tensor: + r""" + Qudit Controlled-NOT gate + \ket{r}\ket{s} \rightarrow \ket{r}\ket{r+s} = \ket{r}X^r\ket{s} = \ket{r}\ket{(r+s) mod d} + + This gate is also called SUMX gate, where X represents X_d gate. + ┌─ ─┐ + │ I_d 0 0 ... 0 │ + │ 0 X_d 0 ... 0 │ + SUMX_d = │ 0 0 X_d^2 ... 0 │ + │ . . . . . │ + │ 0 0 0 ... X_d^{d-1} │ + └ ─┘ + """ + size = d**2 + x_matrix = _x_matrix_func(d=d) + + if cv is None: + x_pows = [np.eye(d, dtype=npdtype)] + for _ in range(1, d): + x_pows.append(x_pows[-1] @ x_matrix) + + matrix = np.zeros((size, size), dtype=npdtype) + for a in range(d): + rs = a * d + matrix[rs : rs + d, rs : rs + d] = x_pows[a] + return matrix + + if not (0 <= cv < d): + raise ValueError(f"cv must be in [0, {d - 1}], got {cv}") + matrix = np.eye(size, dtype=npdtype) + rs = cv * d + matrix[rs : rs + d, rs : rs + d] = x_matrix + + return matrix + + +class VGateMatrices: + def __init__(self, d: int): + assert d > 2 + self.d = d + + def u8_gate(self, gamma: float = 2.0, z: float = 1.0, eps: float = 0.0) -> Gate: + m = _u8_matrix_func(self.d, gamma, z, eps) + t = array_to_tensor(m) + t = backend.cast(t, dtypestr) + t = backend.reshaped(t, self.d) + return Gate(t, name="u8") + + def cphase_gate(self, cv: Optional[int] = None) -> Gate: + r""" + Qudit controlled-Z gate. + + Args: + cv (Optional[int]): Control value. + - None: Apply Z_d^r for any control state |r\rangle. + - int (0 <= cv < d): Apply Z_d only when the control qudit is in state |cv\rangle. + """ + m = _cphase_matrix_func(self.d, cv) + t = array_to_tensor(m) + t = backend.cast(t, dtypestr) + t = backend.reshaped(t, self.d) + return Gate(t, name="cz") + + def csum_gate(self, cv: Optional[int] = None) -> Gate: + """ + Qudit controlled-X (SUM) gate. + + Args: + cv (Optional[int]): Control value. + - None: Apply X_d^r for any control state |r\rangle. + - int (0 <= cv < d): Apply X_d only when the control qudit is in state |cv\rangle. + """ + m = _csum_matrix_func(self.d, cv) + t = array_to_tensor(m) + t = backend.cast(t, dtypestr) + t = backend.reshaped(t, self.d) + return Gate(t, name="cnot") + + def mpo_gate(self, mpo: Operator, name: str = "mpo") -> Operator: + raise NotImplementedError("MPO gate not implemented.") + # return mpo + + def any_gate(self, unitary: Tensor, name: str = "any") -> Gate: + """ + Note one should provide the gate with properly reshaped. + + :param unitary: corresponding gate + :type unitary: Tensor + :param name: The name of the gate. + :type name: str + :return: the resulted gate + :rtype: Gate + """ + # deepcopy roadblocks tf.function, pls take care of the unitary outside + if isinstance(unitary, Gate): + unitary.tensor = backend.cast(unitary.tensor, dtypestr) + return unitary + unitary = backend.cast(unitary, dtypestr) + try: + unitary = backend.reshaped(unitary, self.d) + except ValueError: + raise ValueError( + "The dimension of the unitary must be the same as the input dimension." + ) + return Gate(unitary, name=name) + + +def meta_vgate(dim: int) -> None: + vgm = VGateMatrices(dim) + for attr in ["csum", "cphase", "u8", "any", "mpo"]: + gvf = GateVF(getattr(vgm, attr + "_gate"), attr) + for funcname in (attr, attr + "gate", attr + "_gate"): + setattr(thismodule, funcname, gvf) + + +def __rmul__(self: tn.Node, lvalue: Union[float, complex]) -> "Gate": + newg = Gate(lvalue * self.tensor) + return newg + + +tn.Node.__rmul__ = __rmul__ + + +class GateF: + def __init__(self, m: Tensor, n: Optional[str] = None): + if n is None: + n = "unknowngate" + self.m = m + self.n = n + + def __call__(self, *args: Any, **kws: Any) -> Gate: + m1 = array_to_tensor(self.m) + m1 = backend.cast(m1, dtypestr) + return Gate(m1, name=self.n) + + def adjoint(self) -> "GateF": + m = self.__call__() + shape0 = backend.shape_tuple(m.tensor) + m0 = backend.reshapem(m.tensor) + ma = backend.adjoint(m0) + name = self.n + "d" + ma = backend.reshape(ma, shape0) + return GateF(ma, name) + + def ided(self, before: bool = True) -> "GateF": + raise NotImplementedError("Function is not available for qudits.") + + def controlled(self) -> "GateF": + raise NotImplementedError("Function is not available for qudits.") + + def ocontrolled(self) -> "GateF": + raise NotImplementedError("Function is not available for qudits.") + + def __str__(self) -> str: + return self.n + + __repr__ = __str__ + + +class GateVF(GateF): + def __init__( + self, + f: Callable[..., Gate], + n: Optional[str] = None, + ): + if n is None: + n = "unknowngate" + self.f = f + self.n = n + + def __call__(self, *args: Any, **kws: Any) -> Gate: + return self.f(*args, **kws) + + def adjoint(self) -> "GateVF": + def f(*args: Any, **kws: Any) -> Gate: + m = self.__call__(*args, **kws) + shape0 = backend.shape_tuple(m.tensor) + m0 = backend.reshapem(m.tensor) + ma = backend.adjoint(m0) + # if np.allclose(m0, ma, atol=1e-5): + # name = self.n + # else: + name = self.n + "d" + ma = backend.reshape(ma, shape0) + return Gate(ma, name) + + return GateVF(f, self.n + "d") + + +# @partial(arg_alias, alias_dict={"unitary": ["hermitian", "hamiltonian"]}) +# def exponential_gate(unitary: Tensor, theta: float, name: str = "none") -> Gate: +# r""" +# Exponential gate. +# +# .. math:: +# \textrm{exp}(U) = e^{-j \theta U} +# +# :param unitary: input unitary :math:`U` +# :type unitary: Tensor +# :param theta: angle in radians +# :type theta: float +# :param name: suffix of Gate name +# :return: Exponential Gate +# :rtype: Gate +# """ +# theta, unitary = num_to_tensor(theta, unitary) +# mat = backend.expm(-backend.i() * theta * unitary) +# dimension = reduce(mul, mat.shape) +# nolegs = int(np.log(dimension) / np.log(2)) +# mat = backend.reshape(mat, shape=[2 for _ in range(nolegs)]) +# return Gate(mat, name="exp-" + name) + + +# exp_gate = exponential_gate +# exp = exponential_gate + + +# @partial(arg_alias, alias_dict={"unitary": ["hermitian", "hamiltonian"]}) +# def exponential_gate_unity( +# unitary: Tensor, theta: float, half: bool = False, name: str = "none" +# ) -> Gate: +# r""" +# Faster exponential gate directly implemented based on RHS. Only works when :math:`U^2 = I` is an identity matrix. +# +# .. math:: +# \textrm{exp}(U) &= e^{-j \theta U} \\ +# &= \cos(\theta) I - j \sin(\theta) U \\ +# +# :param unitary: input unitary :math:`U` +# :type unitary: Tensor +# :param theta: angle in radians +# :type theta: float +# :param half: if True, the angel theta is mutiplied by 1/2, +# defaults to False +# :type half: bool +# :param name: suffix of Gate name +# :type name: str, optional +# :return: Exponential Gate +# :rtype: Gate +# """ +# theta, unitary = num_to_tensor(theta, unitary) +# size = int(reduce(mul, unitary.shape)) +# n = int(np.log2(size)) +# i = np.eye(2 ** (int(n / 2))) +# i = i.reshape([2 for _ in range(n)]) +# unitary = backend.reshape(unitary, [2 for _ in range(n)]) +# it = array_to_tensor(i) +# if half is True: +# theta = theta / 2.0 +# mat = backend.cos(theta) * it - 1.0j * backend.sin(theta) * unitary +# return Gate(mat, name="exp1-" + name) +# +# +# exp1_gate = exponential_gate_unity diff --git a/tensorcircuit/mpscircuit.py b/tensorcircuit/mpscircuit.py index dcd8f0c8..d5cb1af1 100644 --- a/tensorcircuit/mpscircuit.py +++ b/tensorcircuit/mpscircuit.py @@ -8,18 +8,19 @@ from typing import Any, List, Optional, Sequence, Tuple, Dict, Union from copy import copy import logging +import types import numpy as np import tensornetwork as tn -from . import gates +from .gates import Gate from .cons import backend, npdtype, contractor, rdtypestr, dtypestr from .quantum import QuOperator, QuVector, extract_tensors_from_qop from .mps_base import FiniteMPS from .abstractcircuit import AbstractCircuit +from .basecircuit import _decode_basis_label from .utils import arg_alias -Gate = gates.Gate Tensor = Any logger = logging.getLogger(__name__) @@ -87,6 +88,7 @@ class MPSCircuit(AbstractCircuit): def __init__( self, nqubits: int, + dim: Optional[int] = None, center_position: Optional[int] = None, tensors: Optional[Sequence[Tensor]] = None, wavefunction: Optional[Union[QuVector, Tensor]] = None, @@ -97,6 +99,8 @@ def __init__( :param nqubits: The number of qubits in the circuit. :type nqubits: int + :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36. + :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system. :param center_position: The center position of MPS, default to 0 :type center_position: int, optional :param tensors: If not None, the initial state of the circuit is taken as ``tensors`` @@ -109,6 +113,7 @@ def __init__( :param split: Split rules :type split: Any """ + self._validate_dim(dim=dim) self.circuit_param = { "nqubits": nqubits, "center_position": center_position, @@ -137,7 +142,9 @@ def __init__( wavefunction, split=self.split ) else: # full wavefunction - tensors = self.wavefunction_to_tensors(wavefunction, split=self.split) + tensors = self.wavefunction_to_tensors( + wavefunction, dim_phys=self._d, split=self.split + ) assert len(tensors) == nqubits self._mps = FiniteMPS(tensors, canonicalize=False) self._mps.center_position = 0 @@ -151,8 +158,13 @@ def __init__( self._mps = FiniteMPS(tensors, canonicalize=True, center_position=0) else: tensors = [ - np.array([1.0, 0.0], dtype=npdtype)[None, :, None] - for i in range(nqubits) + np.concatenate( + [ + np.array([1.0], dtype=npdtype), + np.zeros((self._d - 1,), dtype=npdtype), + ] + )[None, :, None] + for _ in range(nqubits) ] self._mps = FiniteMPS(tensors, canonicalize=False) if center_position is not None: @@ -289,18 +301,19 @@ def consecutive_swap( consistent with the split option of the class. :type split: Optional[Dict[str, Any]], optional """ + self._not_implemented_for_qudit() if split is None: split = self.split self.position(index_from) if index_from < index_to: for i in range(index_from, index_to): self.apply_adjacent_double_gate( - gates.swap(), i, i + 1, center_position=i + 1, split=split # type: ignore + self.gates.swap(), i, i + 1, center_position=i + 1, split=split # type: ignore ) elif index_from > index_to: for i in range(index_from, index_to, -1): self.apply_adjacent_double_gate( - gates.swap(), i - 1, i, center_position=i - 1, split=split # type: ignore + self.gates.swap(), i - 1, i, center_position=i - 1, split=split # type: ignore ) else: # index_from == index_to @@ -370,42 +383,65 @@ def gate_to_MPO( # b # index must be ordered - assert np.all(np.diff(index) > 0) - index_left = np.min(index) + if len(index) == 0: + raise ValueError("`index` must contain at least one site.") + if not all(index[i] < index[i + 1] for i in range(len(index) - 1)): + raise AssertionError("`index` must be strictly increasing.") + + index_left = int(np.min(index)) if isinstance(gate, tn.Node): gate = backend.copy(gate.tensor) - index = np.array(index) - index_left + + gshape = tuple(backend.shape_tuple(gate)) nindex = len(index) - # transform gate from (in1, in2, ..., out1, out2 ...) to - # (in1, out1, in2, out2, ...) - order = tuple(np.arange(2 * nindex).reshape((2, nindex)).T.flatten()) - shape = (4,) * nindex - gate = backend.reshape(backend.transpose(gate, order), shape) - argsort = np.argsort(index) - # reorder the gate according to the site positions - gate = backend.transpose(gate, tuple(argsort)) - index = index[argsort] # type: ignore - # split the gate into tensors assuming they are adjacent - main_tensors = cls.wavefunction_to_tensors(gate, dim_phys=4, norm=False) - # each tensor is in shape of (i, a, b, j) - tensors = [] - previous_i = None - for i, main_tensor in zip(index, main_tensors): - # insert identites in the middle + + if len(gshape) != 2 * nindex: + raise AssertionError("The gate rank must be 2*n (in..., out...).") + + in_dims = gshape[:nindex] + d = int(in_dims[0]) + if any(int(x) != d for x in in_dims): + raise AssertionError("All input physical dimensions must be equal.") + + dim_phys_mpo = d * d + + order_interleave = tuple( + np.arange(2 * nindex).reshape(2, nindex).T.flatten().tolist() + ) + gate = backend.transpose(gate, order_interleave) + + index_arr = np.array(index, dtype=int) - index_left + pair_order = np.argsort(index_arr) + + pair_axis_perm = np.ravel( + np.column_stack([2 * pair_order, 2 * pair_order + 1]) + ).astype(int) + pair_axis_perm = tuple(pair_axis_perm.tolist()) # type: ignore + gate = backend.transpose(gate, pair_axis_perm) + index_arr = index_arr[pair_order] # type: ignore + + gate = backend.reshape(gate, (dim_phys_mpo,) * nindex) + main_tensors = cls.wavefunction_to_tensors( + gate, dim_phys=dim_phys_mpo, norm=False + ) + + tensors: list[Tensor] = [] + previous_i: Optional[int] = None + + for i, main_tensor in zip(index_arr, main_tensors): if previous_i is not None: - for _ in range(previous_i + 1, i): - bond_dim = tensors[-1].shape[-1] - I = ( - np.eye(bond_dim * 2) - .reshape((bond_dim, 2, bond_dim, 2)) - .transpose((0, 1, 3, 2)) - .astype(dtypestr) - ) - tensors.append(backend.convert_to_tensor(I)) - nleft, _, nright = main_tensor.shape - tensor = backend.reshape(main_tensor, (nleft, 2, 2, nright)) + for _gap_site in range(int(previous_i) + 1, int(i)): + bond_dim = int(backend.shape_tuple(tensors[-1])[-1]) + eye2d = backend.eye(bond_dim * d, dtype=backend.dtype(tensors[-1])) + I4 = backend.reshape(eye2d, (bond_dim, d, bond_dim, d)) + I4 = backend.transpose(I4, (0, 1, 3, 2)) + tensors.append(I4) + + nleft, _, nright = backend.shape_tuple(main_tensor) + tensor = backend.reshape(main_tensor, (int(nleft), d, d, int(nright))) tensors.append(tensor) - previous_i = i + previous_i = int(i) + return tensors, index_left @classmethod @@ -448,15 +484,15 @@ def reduce_tensor_dimension( """ if split is None: split = {} - ni = tensor_left.shape[0] - nk = tensor_right.shape[-1] + ni, di = tensor_left.shape[0], tensor_right.shape[1] + nk, dk = tensor_right.shape[-1], tensor_right.shape[-2] T = backend.einsum("iaj,jbk->iabk", tensor_left, tensor_right) - T = backend.reshape(T, (ni * 2, nk * 2)) + T = backend.reshape(T, (ni * di, nk * dk)) new_tensor_left, new_tensor_right = split_tensor( T, center_left=center_left, split=split ) - new_tensor_left = backend.reshape(new_tensor_left, (ni, 2, -1)) - new_tensor_right = backend.reshape(new_tensor_right, (-1, 2, nk)) + new_tensor_left = backend.reshape(new_tensor_left, (ni, di, -1)) + new_tensor_right = backend.reshape(new_tensor_right, (-1, dk, nk)) return new_tensor_left, new_tensor_right def reduce_dimension( @@ -550,10 +586,11 @@ def apply_MPO( for i, idx in zip(i_list, idx_list): O = tensors[i] T = self._mps.tensors[idx] - ni, _, _, nj = O.shape + ni, d_in, _, nj = O.shape nk, _, nl = T.shape OT = backend.einsum("iabj,kbl->ikajl", O, T) - OT = backend.reshape(OT, (ni * nk, 2, nj * nl)) + OT = backend.reshape(OT, (ni * nk, d_in, nj * nl)) + self._mps.tensors[idx] = OT # canonicalize @@ -660,8 +697,7 @@ def mid_measurement(self, index: int, keep: int = 0) -> None: :type keep: int, optional """ # normalization not guaranteed - assert keep in [0, 1] - gate = backend.zeros((2, 2), dtype=dtypestr) + gate = backend.zeros((self._d, self._d), dtype=dtypestr) gate = backend.scatter( gate, backend.convert_to_tensor([[keep, keep]]), @@ -692,7 +728,7 @@ def is_valid(self) -> bool: def wavefunction_to_tensors( cls, wavefunction: Tensor, - dim_phys: int = 2, + dim_phys: Optional[int] = None, norm: bool = True, split: Optional[Dict[str, Any]] = None, ) -> List[Tensor]: @@ -710,6 +746,7 @@ def wavefunction_to_tensors( :return: The tensors :rtype: List[Tensor] """ + dim_phys = dim_phys if dim_phys is not None else 2 if split is None: split = {} wavefunction = backend.reshape(wavefunction, (-1, 1)) @@ -768,10 +805,16 @@ def copy_without_tensor(self) -> "MPSCircuit": for key in vars(self): if key == "_mps": continue - if backend.is_tensor(info[key]): - copied_value = backend.copy(info[key]) + val = info[key] + if backend.is_tensor(val): + copied_value = backend.copy(val) + elif isinstance(val, types.ModuleType): + copied_value = val else: - copied_value = copy(info[key]) + try: + copied_value = copy(val) + except TypeError: + copied_value = val setattr(result, key, copied_value) return result @@ -815,7 +858,8 @@ def normalize(self) -> None: def amplitude(self, l: str) -> Tensor: assert len(l) == self._nqubits - tensors = [self._mps.tensors[i][:, int(s), :] for i, s in enumerate(l)] + idx_list = _decode_basis_label(l, self._d, self._nqubits) + tensors = [self._mps.tensors[i][:, idx, :] for i, idx in enumerate(idx_list)] return reduce(backend.matmul, tensors)[0, 0] def proj_with_mps(self, other: "MPSCircuit", conj: bool = True) -> Tensor: @@ -873,6 +917,7 @@ def slice(self, begin: int, end: int) -> "MPSCircuit": mps = self.__class__( nqubits, + dim=self._d, tensors=tensors, center_position=center_position, split=self.split.copy(), @@ -1000,8 +1045,6 @@ def measure( # set the center to the left side, then gradually move to the right and do measurement at sites """ mps = self.copy() - up = backend.convert_to_tensor(np.array([1, 0]).astype(dtypestr)) - down = backend.convert_to_tensor(np.array([0, 1]).astype(dtypestr)) p = 1.0 p = backend.convert_to_tensor(p) @@ -1009,26 +1052,32 @@ def measure( sample = [] for k, site in enumerate(index): mps.position(site) - # do measurement tensor = mps._mps.tensors[site] ps = backend.real( backend.einsum("iaj,iaj->a", tensor, backend.conj(tensor)) ) ps /= backend.sum(ps) - pu = ps[0] + if status is None: r = backend.implicit_randu()[0] else: r = status[k] r = backend.real(backend.cast(r, dtypestr)) - eps = 0.31415926 * 1e-12 - sign = backend.sign(r - pu + eps) / 2 + 0.5 # in case status is exactly 0.5 - sign = backend.convert_to_tensor(sign) - sign = backend.cast(sign, dtype=rdtypestr) - sign_complex = backend.cast(sign, dtypestr) - sample.append(sign_complex) - p = p * (pu * (-1) ** sign + sign) - m = (1 - sign_complex) * up + sign_complex * down + + cdf = backend.cumsum(ps) + choice = backend.sum(backend.cast(r >= cdf, "int32")) + + choice_f = backend.cast(choice, dtypestr) + sample.append(choice_f) + + m = backend.zeros((ps.shape[0],), dtype=dtypestr) + m = backend.scatter( + m, + backend.convert_to_tensor([[backend.cast(choice, "int32")]]), + backend.convert_to_tensor(np.array([1.0], dtype=dtypestr)), + ) + + p = p * backend.sum(ps * m) mps._mps.tensors[site] = backend.einsum("iaj,a->ij", tensor, m)[:, None, :] sample = backend.stack(sample) sample = backend.real(sample) diff --git a/tensorcircuit/noisemodel.py b/tensorcircuit/noisemodel.py index d8953d15..e3d4446f 100644 --- a/tensorcircuit/noisemodel.py +++ b/tensorcircuit/noisemodel.py @@ -203,6 +203,9 @@ def circuit_with_noise( :return: A newly constructed circuit with noise :rtype: AbstractCircuit """ + if getattr(c, "_d", 2) != 2: + c._not_implemented_for_qudit() + qir = c.to_qir() cnew: AbstractCircuit if isinstance(c, DMCircuit): @@ -251,6 +254,8 @@ def sample_expectation_ps_noisfy( :return: sample expectation value with noise :rtype: Tensor """ + if getattr(c, "_d", 2) != 2: + c._not_implemented_for_qudit() if noise_conf is None: noise_conf = NoiseConf() @@ -323,6 +328,8 @@ def expectation_noisfy( :return: expectation value with noise :rtype: Tensor """ + if getattr(c, "_d", 2) != 2: + c._not_implemented_for_qudit() if noise_conf is None: noise_conf = NoiseConf() diff --git a/tensorcircuit/quantum.py b/tensorcircuit/quantum.py index d28e550c..578402c8 100644 --- a/tensorcircuit/quantum.py +++ b/tensorcircuit/quantum.py @@ -56,6 +56,30 @@ def get_all_nodes(edges: Iterable[Edge]) -> List[Node]: return nodes +def _infer_num_sites(D: int, d: int) -> int: + """ + Infer the number of sites (n) from a Hilbert space dimension D + and local dimension d, assuming D = d**n. + + :param D: total Hilbert space dimension (int) + :param d: local dimension per site (int) + :return: n such that D == d**n + :raises ValueError: if D is not an exact power of d + """ + if not (isinstance(D, int) and D > 0): + raise ValueError(f"D must be a positive integer, got {D}") + if not (isinstance(d, int) and d >= 2): + raise ValueError(f"d must be an integer >= 2, got {d}") + + tmp, n = D, 0 + while tmp % d == 0 and tmp > 1: + tmp //= d + n += 1 + if tmp != 1: + raise ValueError(f"Dimension {D} is not a power of local dim {d}") + return n + + def _reachable(nodes: List[AbstractNode]) -> List[AbstractNode]: if not nodes: raise ValueError("Reachable requires at least 1 node.") @@ -2150,7 +2174,10 @@ def entanglement_entropy(state: Tensor, cut: Union[int, List[int]]) -> Tensor: def reduced_wavefunction( - state: Tensor, cut: List[int], measure: Optional[List[int]] = None + state: Tensor, + cut: List[int], + measure: Optional[List[int]] = None, + d: Optional[int] = None, ) -> Tensor: """ Compute the reduced wavefunction from the quantum state ``state``. @@ -2165,20 +2192,22 @@ def reduced_wavefunction( :type measure: List[int] :return: _description_ :rtype: Tensor + :param d: dimension of qudit system + :type d: int """ + d = 2 if d is None else d if measure is None: measure = [0 for _ in cut] - s = backend.reshape2(state) + s = backend.reshaped(state, d) n = len(backend.shape_tuple(s)) s_node = Gate(s) end_nodes = [] for c, m in zip(cut, measure): - rt = backend.cast(backend.convert_to_tensor(1 - m), dtypestr) * backend.cast( - backend.convert_to_tensor(np.array([1.0, 0.0])), dtypestr - ) + backend.cast(backend.convert_to_tensor(m), dtypestr) * backend.cast( - backend.convert_to_tensor(np.array([0.0, 1.0])), dtypestr + oh = backend.cast( + backend.one_hot(backend.cast(backend.convert_to_tensor(m), "int32"), d), + dtypestr, ) - end_node = Gate(rt) + end_node = Gate(backend.convert_to_tensor(oh)) end_nodes.append(end_node) s_node[c] ^ end_node[0] new_node = contractor( @@ -2193,8 +2222,9 @@ def reduced_density_matrix( cut: Union[int, List[int]], p: Optional[Tensor] = None, normalize: bool = True, + d: Optional[int] = None, ) -> Union[Tensor, QuOperator]: - """ + r""" Compute the reduced density matrix from the quantum state ``state``. :param state: The quantum state in form of Tensor or QuOperator. @@ -2206,8 +2236,12 @@ def reduced_density_matrix( :type p: Optional[Tensor] :return: The reduced density matrix. :rtype: Union[Tensor, QuOperator] - :normalize: if True, returns a trace 1 density matrix. Otherwise does not normalize. + :param normalize: if True, returns a trace 1 density matrix. Otherwise does not normalize. + :type normalize: bool + :param d: dimension of qudit system + :type d: int """ + d = 2 if d is None else d if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set): traceout = list(cut) else: @@ -2220,21 +2254,23 @@ def reduced_density_matrix( return state.partial_trace(traceout) if len(state.shape) == 2 and state.shape[0] == state.shape[1]: # density operator - freedomexp = backend.sizen(state) - # traceout = sorted(traceout)[::-1] - freedom = int(np.log2(freedomexp) / 2) - # traceout2 = [i + freedom for i in traceout] + freedom = _infer_num_sites(state.shape[0], d) + # freedomexp = backend.sizen(state) + # # traceout = sorted(traceout)[::-1] + # freedom = int(np.log2(freedomexp) / 2) + # # traceout2 = [i + freedom for i in traceout] left = traceout + [i for i in range(freedom) if i not in traceout] right = [i + freedom for i in left] - rho = backend.reshape(state, [2 for _ in range(2 * freedom)]) + + rho = backend.reshape(state, [d] * (2 * freedom)) rho = backend.transpose(rho, perm=left + right) rho = backend.reshape( rho, [ - 2 ** len(traceout), - 2 ** (freedom - len(traceout)), - 2 ** len(traceout), - 2 ** (freedom - len(traceout)), + d ** len(traceout), + d ** (freedom - len(traceout)), + d ** len(traceout), + d ** (freedom - len(traceout)), ], ) if p is None: @@ -2247,20 +2283,22 @@ def reduced_density_matrix( p = backend.reshape(p, [-1]) rho = backend.einsum("a,aiaj->ij", p, rho) rho = backend.reshape( - rho, [2 ** (freedom - len(traceout)), 2 ** (freedom - len(traceout))] + rho, [d ** (freedom - len(traceout)), d ** (freedom - len(traceout))] ) if normalize: rho /= backend.trace(rho) else: w = state / backend.norm(state) - freedomexp = backend.sizen(state) - freedom = int(np.log(freedomexp) / np.log(2)) + size = int(backend.sizen(state)) # total number of elements + freedom = _infer_num_sites(size, d) + # freedomexp = backend.sizen(state) + # freedom = int(np.log(freedomexp) / np.log(2)) perm = [i for i in range(freedom) if i not in traceout] perm = perm + traceout - w = backend.reshape(w, [2 for _ in range(freedom)]) + w = backend.reshape(w, [d for _ in range(freedom)]) w = backend.transpose(w, perm=perm) - w = backend.reshape(w, [-1, 2 ** len(traceout)]) + w = backend.reshape(w, [-1, d ** len(traceout)]) if p is None: rho = w @ backend.adjoint(w) else: @@ -2403,7 +2441,9 @@ def truncated_free_energy( @op2tensor -def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: +def partial_transpose( + rho: Tensor, transposed_sites: List[int], d: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2411,10 +2451,13 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: :type rho: Tensor :param transposed_sites: sites int list to be transposed :type transposed_sites: List[int] + :param d: dimension of qudit system + :type d: int :return: _description_ :rtype: Tensor """ - rho = backend.reshape2(rho) + d = 2 if d is None else d + rho = backend.reshaped(rho, d) rho_node = Gate(rho) n = len(rho.shape) // 2 left_edges = [] @@ -2432,7 +2475,9 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: @op2tensor -def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor: +def entanglement_negativity( + rho: Tensor, transposed_sites: List[int], d: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2440,6 +2485,8 @@ def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor: :type rho: Tensor :param transposed_sites: _description_ :type transposed_sites: List[int] + :param d: dimension of qudit system + :type d: int :return: _description_ :rtype: Tensor """ @@ -2450,7 +2497,9 @@ def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor: @op2tensor -def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") -> Tensor: +def log_negativity( + rho: Tensor, transposed_sites: List[int], base: str = "e", d: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2460,10 +2509,13 @@ def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") -> :type transposed_sites: List[int] :param base: whether use 2 based log or e based log, defaults to "e" :type base: str, optional + :param d: dimension of qudit system + :type d: int :return: _description_ :rtype: Tensor """ - rhot = partial_transpose(rho, transposed_sites) + d = 2 if d is None else d + rhot = partial_transpose(rho, transposed_sites, d) es = backend.eigvalsh(rhot) rhot_m = backend.sum(backend.abs(es)) een = backend.log(rhot_m) @@ -2549,7 +2601,9 @@ def double_state(h: Tensor, beta: float = 1) -> Tensor: @op2tensor -def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: +def mutual_information( + s: Tensor, cut: Union[int, List[int]], d: Optional[int] = None +) -> Tensor: """ Mutual information between AB subsystem described by ``cut``. @@ -2557,9 +2611,12 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: :type s: Tensor :param cut: The AB subsystem. :type cut: Union[int, List[int]] + :param d: The diagonal matrix in form of Tensor. + :type d: Tensor :return: The mutual information between AB subsystem described by ``cut``. :rtype: Tensor """ + d = 2 if d is None else d if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set): traceout = list(cut) else: @@ -2567,22 +2624,22 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: if len(s.shape) == 2 and s.shape[0] == s.shape[1]: # mixed state - n = int(np.log2(backend.sizen(s)) / 2) + n = _infer_num_sites(s.shape[0], d=d) hab = entropy(s) # subsystem a - rhoa = reduced_density_matrix(s, traceout) + rhoa = reduced_density_matrix(s, traceout, d=d) ha = entropy(rhoa) # need subsystem b as well other = tuple(i for i in range(n) if i not in traceout) - rhob = reduced_density_matrix(s, other) # type: ignore + rhob = reduced_density_matrix(s, other, d=d) # type: ignore hb = entropy(rhob) # pure system else: hab = 0.0 - rhoa = reduced_density_matrix(s, traceout) + rhoa = reduced_density_matrix(s, traceout, d=d) ha = hb = entropy(rhoa) return ha + hb - hab @@ -2591,7 +2648,7 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: # measurement results and transformations and correlations below -def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor: +def count_s2d(srepr: Tuple[Tensor, Tensor], n: int, d: Optional[int] = None) -> Tensor: """ measurement shots results, sparse tuple representation to dense representation count_vector to count_tuple @@ -2600,11 +2657,14 @@ def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor: :type srepr: Tuple[Tensor, Tensor] :param n: number of qubits :type n: int + :param d: [description], defaults to None + :type d: int, optional :return: [description] :rtype: Tensor """ + d = 2 if d is None else d return backend.scatter( - backend.cast(backend.zeros([2**n]), srepr[1].dtype), + backend.cast(backend.zeros([d**n]), srepr[1].dtype), backend.reshape(srepr[0], [-1, 1]), srepr[1], ) @@ -2647,25 +2707,36 @@ def count_d2s(drepr: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]: count_t2v = count_d2s -def sample_int2bin(sample: Tensor, n: int) -> Tensor: +def sample_int2bin(sample: Tensor, n: int, d: Optional[int] = None) -> Tensor: """ - int sample to bin sample + Convert linear-index samples to per-site digits (base-d). - :param sample: in shape [trials] of int elements in the range [0, 2**n) + :param sample: shape [trials], integers in [0, d**n) :type sample: Tensor - :param n: number of qubits + :param n: number of sites :type n: int - :return: in shape [trials, n] of element (0, 1) + :param d: local dimension, defaults to 2 + :type d: int, optional + :return: shape [trials, n], entries in [0, d-1] :rtype: Tensor """ - confg = backend.mod( - backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))), - 2, - ) - return confg + d = 2 if d is None else d + if d == 2: + return backend.mod( + backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))), + 2, + ) + else: + pos = backend.reverse(backend.arange(n)) + base = backend.power(d, pos) + digits = backend.mod( + backend.floor(backend.divide(sample[..., None], base)), # ⌊sample / d**pos⌋ + d, + ) + return backend.cast(digits, "int32") -def sample_bin2int(sample: Tensor, n: int) -> Tensor: +def sample_bin2int(sample: Tensor, n: int, d: Optional[int] = None) -> Tensor: """ bin sample to int sample @@ -2676,88 +2747,104 @@ def sample_bin2int(sample: Tensor, n: int) -> Tensor: :return: in shape [trials] :rtype: Tensor """ - power = backend.convert_to_tensor([2**j for j in reversed(range(n))]) + d = 2 if d is None else d + power = backend.convert_to_tensor([d**j for j in reversed(range(n))]) return backend.sum(sample * power, axis=-1) def sample2count( - sample: Tensor, n: int, jittable: bool = True + sample: Tensor, + n: int, + jittable: bool = True, + d: Optional[int] = None, ) -> Tuple[Tensor, Tensor]: """ - sample_int to count_tuple + sample_int to count_tuple (indices, counts), size = d**n - :param sample: _description_ - :type sample: Tensor - :param n: _description_ - :type n: int - :param jittable: _description_, defaults to True - :type jittable: bool, optional - :return: _description_ - :rtype: Tuple[Tensor, Tensor] + :param sample: linear-index samples, shape [shots] + :param n: number of sites + :param jittable: whether to return fixed-size outputs (backend dependent) + :param d: local dimension per site, default 2 (qubit) + :return: (unique_indices, counts) """ - d = 2**n + d = 2 if d is None else d + size = d**n if not jittable: results = backend.unique_with_counts(sample) # non-jittable - else: # jax specified - results = backend.unique_with_counts(sample, size=d, fill_value=-1) + else: # jax specified / fixed-size + results = backend.unique_with_counts(sample, size=size, fill_value=-1) return results -def count_vector2dict(count: Tensor, n: int, key: str = "bin") -> Dict[Any, int]: +def count_vector2dict( + count: Tensor, n: int, key: str = "bin", d: Optional[int] = None +) -> Dict[Any, int]: """ - convert_vector to count_dict_bin or count_dict_int + Convert count_vector to count_dict_bin or count_dict_int. + For d>10 cases, a base-d string (0-9A-Z) is used. - :param count: tensor in shape [2**n] + :param count: tensor in shape [d**n] :type count: Tensor - :param n: number of qubits + :param n: number of sites :type n: int :param key: can be "int" or "bin", defaults to "bin" :type key: str, optional - :return: _description_ - :rtype: _type_ + :param d: local dimension (default 2) + :type d: int, optional + :return: mapping from configuration to count + :rtype: Dict[Any, int] """ from .interfaces import which_backend + d = 2 if d is None else d b = which_backend(count) - d = {i: b.numpy(count[i]).item() for i in range(2**n)} + size = d**n + + out_int = {i: b.numpy(count[i]).item() for i in range(size)} + if key == "int": - return d + return out_int else: - dn = {} - for k, v in d.items(): - kn = str(bin(k))[2:].zfill(n) - dn[kn] = v - return dn + out_str = {} + for k, v in out_int.items(): + kn = np.base_repr(k, base=d).zfill(n) + out_str[kn] = v + return out_str def count_tuple2dict( - count: Tuple[Tensor, Tensor], n: int, key: str = "bin" + count: Tuple[Tensor, Tensor], n: int, key: str = "bin", d: Optional[int] = None ) -> Dict[Any, int]: """ count_tuple to count_dict_bin or count_dict_int - :param count: count_tuple format + :param count: count_tuple format (indices, counts) :type count: Tuple[Tensor, Tensor] - :param n: number of qubits + :param n: number of sites (qubits or qudits) :type n: int :param key: can be "int" or "bin", defaults to "bin" :type key: str, optional + :param d: local dimension, defaults to 2 + :type d: int, optional :return: count_dict - :rtype: _type_ + :rtype: Dict[Any, int] """ - d = { + d = 2 if d is None else d + + out_int = { backend.numpy(i).item(): backend.numpy(j).item() for i, j in zip(count[0], count[1]) if i >= 0 } + if key == "int": - return d + return out_int else: - dn = {} - for k, v in d.items(): - kn = str(bin(k))[2:].zfill(n) - dn[kn] = v - return dn + out_str = {} + for k, v in out_int.items(): + kn = np.base_repr(k, base=d).zfill(n) + out_str[kn] = v + return out_str @partial(arg_alias, alias_dict={"counts": ["shots"], "format": ["format_"]}) @@ -2769,6 +2856,7 @@ def measurement_counts( random_generator: Optional[Any] = None, status: Optional[Tensor] = None, jittable: bool = False, + d: Optional[int] = None, ) -> Any: """ Simulate the measuring of each qubit of ``p`` in the computational basis, @@ -2785,6 +2873,7 @@ def measurement_counts( "count_tuple": # (np.array([0]), np.array([2])) "count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0} + / for cases d\in [10, 36], "10" -> "A", ..., "35" -> "Z" "count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0} @@ -2836,21 +2925,22 @@ def measurement_counts( state /= backend.norm(state) pi = backend.real(backend.conj(state) * state) pi = backend.reshape(pi, [-1]) - d = int(backend.shape_tuple(pi)[0]) - n = int(np.log(d) / np.log(2) + 1e-8) + + local_d = 2 if d is None else d + total_dim = int(backend.shape_tuple(pi)[0]) + n = _infer_num_sites(total_dim, local_d) + if (counts is None) or counts <= 0: if format == "count_vector": return pi elif format == "count_tuple": return count_d2s(pi) elif format == "count_dict_bin": - return count_vector2dict(pi, n, key="bin") + return count_vector2dict(pi, n, key="bin", d=local_d) elif format == "count_dict_int": - return count_vector2dict(pi, n, key="int") + return count_vector2dict(pi, n, key="int", d=local_d) else: - raise ValueError( - "unsupported format %s for analytical measurement" % format - ) + raise ValueError(f"unsupported format {format} for analytical measurement") else: raw_counts = backend.probability_sample( counts, pi, status=status, g=random_generator @@ -2861,7 +2951,7 @@ def measurement_counts( # raw_counts = backend.stateful_randc( # random_generator, a=drange, shape=counts, p=pi # ) - return sample2all(raw_counts, n, format=format, jittable=jittable) + return sample2all(raw_counts, n, format=format, jittable=jittable, d=local_d) measurement_results = measurement_counts @@ -2869,52 +2959,62 @@ def measurement_counts( @partial(arg_alias, alias_dict={"format": ["format_"]}) def sample2all( - sample: Tensor, n: int, format: str = "count_vector", jittable: bool = False + sample: Tensor, + n: int, + format: str = "count_vector", + jittable: bool = False, + d: Optional[int] = None, ) -> Any: """ - transform ``sample_int`` or ``sample_bin`` form results to other forms specified by ``format`` + transform ``sample_int`` or ``sample_bin`` results to other forms specified by ``format`` - :param sample: measurement shots results in ``sample_int`` or ``sample_bin`` format + :param sample: measurement shots results in ``sample_int`` (shape [shots]) or ``sample_bin`` (shape [shots, n]) :type sample: Tensor - :param n: number of qubits + :param n: number of sites :type n: int - :param format: see the doc in the doc in :py:meth:`tensorcircuit.quantum.measurement_results`, - defaults to "count_vector" + :param format: see :py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "count_vector" :type format: str, optional :param jittable: only applicable to count transformation in jax backend, defaults to False :type jittable: bool, optional + :param d: local dimension (2 for qubit; >2 for qudit), defaults to 2 + :type d: Optional[int] :return: measurement results specified as ``format`` :rtype: Any """ + d = 2 if d is None else int(d) + if len(backend.shape_tuple(sample)) == 1: sample_int = sample - sample_bin = sample_int2bin(sample, n) + sample_bin = sample_int2bin(sample, n, d=d) elif len(backend.shape_tuple(sample)) == 2: - sample_int = sample_bin2int(sample, n) + sample_int = sample_bin2int(sample, n, d=d) sample_bin = sample else: raise ValueError("unrecognized tensor shape for sample") + if format == "sample_int": return sample_int elif format == "sample_bin": return sample_bin else: - count_tuple = sample2count(sample_int, n, jittable) + count_tuple = sample2count(sample_int, n, jittable=jittable, d=d) if format == "count_tuple": return count_tuple elif format == "count_vector": - return count_s2d(count_tuple, n) + return count_s2d(count_tuple, n, d=d) elif format == "count_dict_bin": - return count_tuple2dict(count_tuple, n, key="bin") + return count_tuple2dict(count_tuple, n, key="bin", d=d) elif format == "count_dict_int": - return count_tuple2dict(count_tuple, n, key="int") + return count_tuple2dict(count_tuple, n, key="int", d=d) else: raise ValueError( - "unsupported format %s for finite shots measurement" % format + f"unsupported format {format} for finite shots measurement" ) -def spin_by_basis(n: int, m: int, elements: Tuple[int, int] = (1, -1)) -> Tensor: +def spin_by_basis( + n: int, m: int, elements: Tuple[int, int] = (1, -1), d: Optional[int] = None +) -> Tensor: """ Generate all n-bitstrings as an array, each row is a bitstring basis. Return m-th col. @@ -2934,67 +3034,109 @@ def spin_by_basis(n: int, m: int, elements: Tuple[int, int] = (1, -1)) -> Tensor all bitstring basis. :rtype: Tensor """ - s = backend.tile( - backend.cast( - backend.convert_to_tensor(np.array([[elements[0]], [elements[1]]])), "int32" - ), - [2**m, int(2 ** (n - m - 1))], - ) + d = len(elements) if d is None else d + + col = backend.convert_to_tensor(np.array(elements, dtype=np.int32).reshape(-1, 1)) + s = backend.tile(backend.cast(col, "int32"), [d**m, int(d ** (n - m - 1))]) return backend.reshape(s, [-1]) -def correlation_from_samples(index: Sequence[int], results: Tensor, n: int) -> Tensor: +def correlation_from_samples( + index: Sequence[int], + results: Tensor, + n: int, + d: int = 2, + elements: Optional[Sequence[float]] = None, +) -> Tensor: r""" - Compute :math:`\prod_{i\in \\text{index}} s_i (s=\pm 1)`, - Results is in the format of "sample_int" or "sample_bin" + Compute :math:`\prod_{i\in \text{index}} s_i` from measurement shots, + where each site value :math:`s_i` is mapped from the digit outcome. - :param index: list of int, indicating the position in the bitstring - :type index: Sequence[int] - :param results: sample tensor - :type results: Tensor - :param n: number of qubits - :type n: int - :return: Correlation expectation from measurement shots - :rtype: Tensor + Results can be "sample_int" ([shots]) or "sample_bin" ([shots, n]). + + :param index: positions in the basis string + :param results: samples tensor + :param n: number of sites + :param d: local dimension (default 2) + :param elements: optional mapping of length d from outcome {0..d-1} to values s. + If None and d==2, defaults to (1, -1) via the original formula. + :return: correlation estimate (mean over shots) """ if len(backend.shape_tuple(results)) == 1: - results = sample_int2bin(results, n) - results = 1 - results * 2 - r = results[:, index[0]] + results = sample_int2bin(results, n, d=d) + + if d == 2 and elements is None: + svals = 1 - results * 2 # 0->+1, 1->-1 + r = svals[:, index[0]] + for i in index[1:]: + r *= svals[:, i] + r = backend.cast(r, rdtypestr) + return backend.mean(r) + + if elements is None: + raise ValueError( + f"correlation_from_samples requires `elements` mapping for d={d}; " + f"e.g., for qutrit you might pass elements=(1.0,0.0,-1.0)." + ) + if len(elements) != d: + raise ValueError(f"`elements` length {len(elements)} != d={d}") + + evec = backend.cast(backend.convert_to_tensor(np.asarray(elements)), rdtypestr) + + col = backend.cast(results[:, index[0]], "int32") + r = backend.gather1d(evec, col) # shape [shots] + for i in index[1:]: - r *= results[:, i] + col = backend.cast(results[:, i], "int32") + r *= backend.gather1d(evec, col) + r = backend.cast(r, rdtypestr) return backend.mean(r) -def correlation_from_counts(index: Sequence[int], results: Tensor) -> Tensor: +def correlation_from_counts( + index: Sequence[int], + results: Tensor, + d: Optional[int] = None, + elements: Optional[Sequence[float]] = None, +) -> Tensor: r""" - Compute :math:`\prod_{i\in \\text{index}} s_i`, - where the probability for each bitstring is given as a vector ``results``. - Results is in the format of "count_vector" + Compute :math:`\prod_{i\in \text{index}} s_i` where the probability for each + basis label is given by a count/probability vector ``results`` ("count_vector"). - :Example: - - >>> prob = tc.array_to_tensor(np.array([0.6, 0.4, 0, 0])) - >>> qu.correlation_from_counts([0, 1], prob) - (0.20000002+0j) - >>> qu.correlation_from_counts([1], prob) - (0.20000002+0j) - - :param index: list of int, indicating the position in the bitstring - :type index: Sequence[int] - :param results: probability vector of shape 2^n - :type results: Tensor - :return: Correlation expectation from measurement shots. - :rtype: Tensor + :param index: positions in the basis string + :param results: probability/count vector of shape d**n (will be normalized) + :param d: local dimension (default 2) + :param elements: optional mapping of length d from digit {0..d-1} to values s. + If None and d==2, defaults to (1, -1). For d>2, must be provided. + :return: correlation expectation from counts """ + d = 2 if d is None else int(d) + if d != 2: + raise NotImplementedError(f"`d={d}` not implemented.") + results = backend.reshape(results, [-1]) results = backend.cast(results, rdtypestr) results /= backend.sum(results) - n = int(np.log(results.shape[0]) / np.log(2)) + + n = _infer_num_sites(int(results.shape[0]), d=d) + + if d == 2 and elements is None: + elems = (1, -1) + else: + if elements is None or len(elements) != d: + raise ValueError( + f"`elements` must be provided with length d={d} for qudit; got {elements}." + ) + elems = tuple(elements) # type: ignore + + acc = results for i in index: - results = results * backend.cast(spin_by_basis(n, i), results.dtype) - return backend.sum(results) + acc = acc * backend.cast( + spin_by_basis(n, int(i), elements=elems, d=d), acc.dtype + ) + + return backend.sum(acc) # @op2tensor diff --git a/tensorcircuit/results/counts.py b/tensorcircuit/results/counts.py index 5bf0e870..b38dada8 100644 --- a/tensorcircuit/results/counts.py +++ b/tensorcircuit/results/counts.py @@ -2,10 +2,11 @@ dict related functionalities """ -from typing import Any, Dict, Optional, Sequence +from typing import Any, Dict, Optional, Sequence, List import numpy as np +from ..cons import _ALPHABET Tensor = Any ct = Dict[str, int] @@ -91,7 +92,13 @@ def marginal_count(count: ct, keep_list: Sequence[int]) -> ct: def count2vec(count: ct, normalization: bool = True) -> Tensor: """ - Convert count dictionary to probability vector. + Convert a dictionary of counts (with string keys) to a probability/count vector. + + Support: + - base-d string (d <= 36), characters taken from 0-9A-Z (case-insensitive) + For example: + qubit: '0101' + qudit: '012' or '09A' (A represents 10, which means [0, 9, 10]) :param count: A dictionary mapping bit strings to counts :type count: ct @@ -105,44 +112,90 @@ def count2vec(count: ct, normalization: bool = True) -> Tensor: >>> count2vec({"00": 2, "10": 3, "11": 5}) array([0.2, 0. , 0.3, 0.5]) """ - nqubit = len(list(count.keys())[0]) - probability = [0] * 2**nqubit - shots = sum([v for k, v in count.items()]) + if not count: + return np.array([], dtype=float) + + sample_key = next(iter(count)).upper() + n = len(sample_key) + d = 0 + for k in count: + s = k.upper() + if len(s) != n: + raise ValueError( + f"The length of all keys should be the same ({n}), received '{k}'." + ) + for ch in s: + if ch not in _ALPHABET: + raise ValueError( + f"Key '{k}' contains illegal character '{ch}' (only 0-9A-Z are allowed)." + ) + d = max(d, _ALPHABET.index(ch) + 1) + if d < 2: + raise ValueError(f"Inferred local dimension d={d} is illegal (must be >=2).") + + def parse_key(_k: str) -> List[int]: + return [_ALPHABET.index(_ch) for _ch in _k.upper()] + + size = d**n + prob = np.zeros(size, dtype=float) + shots = float(sum(count.values())) if normalization else 1.0 + if shots == 0: + return prob + + powers = [d**p for p in range(n)][::-1] for k, v in count.items(): - if normalization is True: - v /= shots # type: ignore - probability[int(k, 2)] = v - return np.array(probability) + digits = parse_key(k) + idx = sum(dig * p for dig, p in zip(digits, powers)) + prob[idx] = (v / shots) if normalization else v + + return prob def vec2count(vec: Tensor, prune: bool = False) -> ct: """ - Convert probability vector to count dictionary. - - :param vec: Probability vector - :type vec: Tensor - :param prune: Whether to remove near-zero probabilities, defaults to False - :type prune: bool, optional - :return: Count dictionary - :rtype: ct - - :Example: + Map a count/probability vector of length D to a dictionary with base-d string keys (0-9A-Z). + Only generate string keys when d ≤ 36; if d is inferred to be > 36, raise a NotImplementedError. - >>> vec2count(np.array([0.2, 0.3, 0.1, 0.4])) - {'00': 0.2, '01': 0.3, '10': 0.1, '11': 0.4} + :param vec: A one-dimensional vector of length D = d**n + :param prune: Whether to prune near-zero elements (threshold 1e-8) + :return: {base-d string key: value}, key length n """ - from ..quantum import count_vector2dict + from ..quantum import count_vector2dict, _infer_num_sites if isinstance(vec, list): vec = np.array(vec) - n = int(np.log(vec.shape[0]) / np.log(2) + 1e-9) - c = count_vector2dict(vec, n, key="bin") - if prune is True: - nc = c.copy() - for k, v in c.items(): - if np.abs(v) < 1e-8: - del nc[k] - return nc + vec = np.asarray(vec) + if vec.ndim != 1: + raise ValueError("vec2count expects a one-dimensional vector.") + + D = int(vec.shape[0]) + if D <= 0: + return {} + + def _is_power_of_two(x: int) -> bool: + return x > 0 and (x & (x - 1)) == 0 + + if _is_power_of_two(D): + n = int(np.log(D) / np.log(2) + 1e-9) + d: Optional[int] = 2 + else: + d = n = None + upper = int(np.sqrt(D)) + 1 + for d_try in range(2, max(upper, 3)): + try: + n_try = _infer_num_sites(D, d_try) + except ValueError: + continue + d, n = d_try, n_try + break + if d is None: + d, n = D, 1 + + c: ct = count_vector2dict(vec, n, key="bin", d=d) # type: ignore + + if prune: + c = {k: v for k, v in c.items() if np.abs(v) >= 1e-8} + return c diff --git a/tensorcircuit/results/qem/qem_methods.py b/tensorcircuit/results/qem/qem_methods.py index 68199e49..a9abe2ef 100644 --- a/tensorcircuit/results/qem/qem_methods.py +++ b/tensorcircuit/results/qem/qem_methods.py @@ -261,7 +261,7 @@ def dd_rule(slack_length: int, spacing: int = -1) -> Any: def rc_candidates(gate: Gate) -> List[Any]: - pauli = [m.tensor for m in gates.pauli_gates] + pauli = [m.tensor for m in gates.pauli_gates] # type: ignore if isinstance(gate, gates.Gate): gate = gate.tensor gatem = backend.reshapem(gate) diff --git a/tensorcircuit/simplify.py b/tensorcircuit/simplify.py index 1cbcfc6a..917e6d54 100644 --- a/tensorcircuit/simplify.py +++ b/tensorcircuit/simplify.py @@ -121,7 +121,9 @@ def _split_two_qubit_gate( if fixed_choice == 2: # swap one return n3, n4, True # swap s2 = n3.tensor.shape[-1] - if (s1 >= 4) and (s2 >= 4): + if (s1 >= n[0].dimension * n[2].dimension) and ( + s2 >= n[1].dimension * n[3].dimension + ): # jax jit unspport split_node with trun_err anyway # tf function doesn't work either, though I believe it may work on tf side # CANNOT DONE(@refraction-ray): tf.function version with trun_err set diff --git a/tensorcircuit/stabilizercircuit.py b/tensorcircuit/stabilizercircuit.py index 4fe5e506..16ee72f0 100644 --- a/tensorcircuit/stabilizercircuit.py +++ b/tensorcircuit/stabilizercircuit.py @@ -32,18 +32,27 @@ class StabilizerCircuit(AbstractCircuit): } def __init__( - self, nqubits: int, inputs: Tensor = None, tableau_inputs: Tensor = None + self, + nqubits: int, + dim: Optional[int] = None, + inputs: Tensor = None, + tableau_inputs: Tensor = None, ) -> None: """ ``StabilizerCircuit`` class based on stim package :param nqubits: Number of qubits :type nqubits: int + :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36. + :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system. :param inputs: initial state by stabilizers, defaults to None :type inputs: Tensor, optional :param tableau_inputs: initial state by **inverse** tableau, defaults to None :type tableau_inputs: Tensor, optional """ + self._validate_dim(dim=dim) + self._not_implemented_for_qudit() + self._nqubits = nqubits self._stim_circuit = stim.Circuit() self._qir: List[Dict[str, Any]] = [] diff --git a/tensorcircuit/templates/blocks.py b/tensorcircuit/templates/blocks.py index cd834211..73680625 100644 --- a/tensorcircuit/templates/blocks.py +++ b/tensorcircuit/templates/blocks.py @@ -89,14 +89,14 @@ def QAOA_block( c.exp1( e1, e2, - unitary=G._zz_matrix, + unitary=G._zz_matrix, # type: ignore theta=paramzz * g[e1][e2].get("weight", 1.0), **kws, ) else: i = 0 for e1, e2 in g.edges: - c.exp1(e1, e2, unitary=G._zz_matrix, theta=paramzz[i], **kws) + c.exp1(e1, e2, unitary=G._zz_matrix, theta=paramzz[i], **kws) # type: ignore i += 1 if backend.sizen(paramx) == 1: @@ -145,7 +145,7 @@ def example_block( for j in range(nlayers): for i in range(n - 1): c.exp1( - i, i + 1, unitary=G._zz_matrix, theta=param[2 * j, i], split=split_conf + i, i + 1, unitary=G._zz_matrix, theta=param[2 * j, i], split=split_conf # type: ignore ) for i in range(n): c.rx(i, theta=param[2 * j + 1, i]) diff --git a/tensorcircuit/templates/measurements.py b/tensorcircuit/templates/measurements.py index 63f5c5b7..8dd509ad 100644 --- a/tensorcircuit/templates/measurements.py +++ b/tensorcircuit/templates/measurements.py @@ -72,7 +72,7 @@ def any_measurements( sum( [ structuresc[i, k] * g.tensor - for k, g in enumerate(G.pauli_gates) + for k, g in enumerate(G.pauli_gates) # type: ignore ] ) ), @@ -137,7 +137,7 @@ def any_local_measurements( sum( [ structuresc[i, k] * g.tensor - for k, g in enumerate(G.pauli_gates) + for k, g in enumerate(G.pauli_gates) # type: ignore ] ) ), diff --git a/tensorcircuit/translation.py b/tensorcircuit/translation.py index c092cc6a..afea6d92 100644 --- a/tensorcircuit/translation.py +++ b/tensorcircuit/translation.py @@ -536,17 +536,23 @@ def qiskit2tc( else: ctrl_state = ctrl_str2ctrl_state(gate_name[5:], len(idx) - 1) tc_circuit.multicontrol( - *idx, ctrl=ctrl_state, unitary=gates._x_matrix, name="x" + *idx, + ctrl=ctrl_state, + unitary=gates._x_matrix, # type: ignore + name="x", ) elif gate_name[:3] == "mcx": if gate_name[3:] == "": tc_circuit.multicontrol( - *idx, ctrl=[1] * (len(idx) - 1), unitary=gates._x_matrix, name="x" + *idx, ctrl=[1] * (len(idx) - 1), unitary=gates._x_matrix, name="x" # type: ignore ) else: ctrl_state = ctrl_str2ctrl_state(gate_name[5:], len(idx) - 1) tc_circuit.multicontrol( - *idx, ctrl=ctrl_state, unitary=gates._x_matrix, name="x" + *idx, + ctrl=ctrl_state, + unitary=gates._x_matrix, # type: ignore + name="x", ) elif gate_name[0] == "c" and gate_name[:7] != "circuit" and gate_name != "cu": # qiskit cu bug, see https://github.com/tencent-quantum-lab/tensorcircuit/issues/199 @@ -657,7 +663,7 @@ def qir2json( nm = backend.reshapem(r["gate"].tensor) nmr, nmi = tensor_to_json(nm) if backend.shape_tuple(nm)[0] == backend.shape_tuple(nm)[1] == 2: - uparams = list(gates.get_u_parameter(backend.numpy(nm))) + uparams = list(gates.get_u_parameter(backend.numpy(nm))) # type: ignore else: uparams = [] params = r.get("parameters", {}) diff --git a/test.ipynb b/test.ipynb new file mode 100644 index 00000000..c21b880b --- /dev/null +++ b/test.ipynb @@ -0,0 +1,122 @@ +{ + "cells": [ + { + "cell_type": "code", + "id": "initial_id", + "metadata": { + "collapsed": true, + "ExecuteTime": { + "end_time": "2025-08-14T08:12:23.631483Z", + "start_time": "2025-08-14T08:12:22.053618Z" + } + }, + "source": "import tensorcircuit as tc", + "outputs": [], + "execution_count": 1 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-08-14T08:13:16.526969Z", + "start_time": "2025-08-14T08:13:16.524397Z" + } + }, + "cell_type": "code", + "source": "circ = tc.Circuit(3, 3)", + "id": "c53c20a08a64ace8", + "outputs": [], + "execution_count": 2 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-08-14T08:14:21.237006Z", + "start_time": "2025-08-14T08:14:21.059310Z" + } + }, + "cell_type": "code", + "source": [ + "circ.h(0)\n", + "for i in range(1, 3):\n", + " circ.csum(i - 1, i)" + ], + "id": "10cd39770cf8295d", + "outputs": [], + "execution_count": 3 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-08-14T08:20:27.268526Z", + "start_time": "2025-08-14T08:20:26.108701Z" + } + }, + "cell_type": "code", + "source": "circ.sample(batch=1024, format=\"count_dict_bin\")", + "id": "d4862af178080574", + "outputs": [ + { + "data": { + "text/plain": [ + "{'000': 347, '111': 343, '222': 334}" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 14 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-08-14T08:21:13.371169Z", + "start_time": "2025-08-14T08:21:13.369116Z" + } + }, + "cell_type": "code", + "source": "circ.draw()", + "id": "20029d2cab494b0f", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "execution_count": 16 + }, + { + "metadata": {}, + "cell_type": "code", + "outputs": [], + "execution_count": null, + "source": "", + "id": "10ac6415ebf51e81" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From e6f520899967c4f0aff8f97945aa3f56474ee5fb Mon Sep 17 00:00:00 2001 From: Weiguo Ma Date: Thu, 14 Aug 2025 18:59:32 +0800 Subject: [PATCH 2/2] Fixed a tiny bug. --- tensorcircuit/mpscircuit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorcircuit/mpscircuit.py b/tensorcircuit/mpscircuit.py index d5cb1af1..203b54fa 100644 --- a/tensorcircuit/mpscircuit.py +++ b/tensorcircuit/mpscircuit.py @@ -1077,7 +1077,7 @@ def measure( backend.convert_to_tensor(np.array([1.0], dtype=dtypestr)), ) - p = p * backend.sum(ps * m) + p = p * backend.sum(ps * backend.cast(m, dtype=rdtypestr)) mps._mps.tensors[site] = backend.einsum("iaj,a->ij", tensor, m)[:, None, :] sample = backend.stack(sample) sample = backend.real(sample)