Skip to content

Commit eec96ff

Browse files
add enable lightcone jittable example
1 parent b0846b0 commit eec96ff

File tree

5 files changed

+105
-6
lines changed

5 files changed

+105
-6
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66

77
- Add `enable_lightcone` option in circuit `expectation` method, where only gates within casual lightcone of local observable is contracted.
88

9+
- Add `benchmark` function into utils
10+
911
### Fixed
1012

1113
- Fixed a vital bug on circuit expectation evaluation, a wrongly transposed operator connection is fixed.

examples/lightcone_simplify.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
"""
2+
comparison between expectation evaluation with/wo lightcone simplification
3+
"""
4+
5+
import numpy as np
6+
import tensorcircuit as tc
7+
8+
K = tc.set_backend("tensorflow")
9+
10+
11+
def brickwall_ansatz(c, params, gatename, nlayers):
12+
n = c._nqubits
13+
params = K.reshape(params, [nlayers, n, 2])
14+
for j in range(nlayers):
15+
for i in range(0, n, 2):
16+
getattr(c, gatename)(i, (i + 1) % n, theta=params[j, i, 0])
17+
for i in range(1, n, 2):
18+
getattr(c, gatename)(i, (i + 1) % n, theta=params[j, i, 1])
19+
return c
20+
21+
22+
def loss(params, n, nlayers, enable_lightcone):
23+
c = tc.Circuit(n)
24+
for i in range(n):
25+
c.h(i)
26+
c = brickwall_ansatz(c, params, "rzz", nlayers)
27+
expz = K.stack(
28+
[c.expectation_ps(z=[i], enable_lightcone=enable_lightcone) for i in range(n)]
29+
)
30+
return K.real(K.sum(expz))
31+
32+
33+
vg1 = K.jit(K.value_and_grad(loss), static_argnums=(1, 2, 3))
34+
35+
36+
def efficiency():
37+
for n in range(6, 40, 4):
38+
for nlayers in range(2, 6, 2):
39+
print(n, nlayers)
40+
print("w lightcone")
41+
(v2, g2), _, _ = tc.utils.benchmark(
42+
vg1, K.ones([nlayers * n * 2]), n, nlayers, True
43+
)
44+
if n < 16:
45+
print("wo lightcone")
46+
(v1, g1), _, _ = tc.utils.benchmark(
47+
vg1, K.ones([nlayers * n * 2]), n, nlayers, False
48+
)
49+
np.testing.assert_allclose(v1, v2, atol=1e-5)
50+
np.testing.assert_allclose(g1, g2, atol=1e-5)
51+
52+
53+
## further correctness check
54+
def correctness(n, nlayers):
55+
for _ in range(5):
56+
v1, g1 = vg1(K.implicit_randn([nlayers * n * 2]), n, nlayers, False)
57+
v2, g2 = vg1(K.implicit_randn([nlayers * n * 2]), n, nlayers, True)
58+
np.testing.assert_allclose(v1, v2, atol=1e-5)
59+
np.testing.assert_allclose(g1, g2, atol=1e-5)
60+
61+
62+
if __name__ == "__main__":
63+
efficiency()
64+
correctness(7, 3)

tensorcircuit/circuit.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -443,6 +443,8 @@ def apply_general_gate(
443443
mpo: bool = False,
444444
ir_dict: Optional[Dict[str, Any]] = None,
445445
) -> None:
446+
if name is None:
447+
name = ""
446448
gate_dict = {
447449
"gate": gate,
448450
"index": index,

tensorcircuit/simplify.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -204,9 +204,8 @@ def _full_rank_simplify(nodes: List[Any]) -> List[Any]:
204204

205205
def _light_cone_cancel(nodes: List[Any]) -> Tuple[List[Any], bool]:
206206
is_changed = False
207-
for n in nodes:
208-
if getattr(n, "is_dagger", None) is None:
209-
break
207+
for ind in range(len(nodes) // 2, 0, -1):
208+
n = nodes[ind]
210209
if n.is_dagger is True:
211210
continue
212211
noe = len(n.shape)
@@ -216,8 +215,6 @@ def _light_cone_cancel(nodes: List[Any]) -> Tuple[List[Any], bool]:
216215
n1, n2 = e.node1, e.node2 # one of them is n itself
217216
if n1 is None or n2 is None:
218217
continue
219-
if getattr(n2, "is_dagger", None) is None:
220-
break
221218
if n1.is_dagger == n2.is_dagger:
222219
continue
223220
if n1.id != n2.id:
@@ -276,6 +273,9 @@ def _full_light_cone_cancel(nodes: List[Any]) -> List[Any]:
276273
:return: _description_
277274
:rtype: List[Any]
278275
"""
276+
for n in nodes:
277+
if getattr(n, "is_dagger", None) is None:
278+
return nodes
279279
nodes, is_changed = _light_cone_cancel(nodes)
280280
while is_changed:
281281
nodes, is_changed = _light_cone_cancel(nodes)

tensorcircuit/utils.py

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,10 @@
22
Helper functions
33
"""
44

5-
from typing import Any, Callable, Union, Sequence
5+
from typing import Any, Callable, Union, Sequence, Tuple
66
from functools import wraps
77
import platform
8+
import time
89

910

1011
def return_partial(
@@ -96,3 +97,33 @@ def is_m1mac() -> bool:
9697
if not platform.platform().startswith("macOS"):
9798
return False
9899
return True
100+
101+
102+
def benchmark(
103+
f: Any, *args: Any, tries: int = 5, verbose: bool = True
104+
) -> Tuple[Any, float, float]:
105+
"""
106+
benchmark jittable function with staging time and running time
107+
108+
:param f: _description_
109+
:type f: Any
110+
:param tries: _description_, defaults to 5
111+
:type tries: int, optional
112+
:param verbose: _description_, defaults to True
113+
:type verbose: bool, optional
114+
:return: _description_
115+
:rtype: Tuple[Any, float, float]
116+
"""
117+
time0 = time.time()
118+
r = f(*args)
119+
time1 = time.time()
120+
for _ in range(tries):
121+
r = f(*args)
122+
time2 = time.time()
123+
if tries == 0:
124+
rt = 0
125+
else:
126+
rt = (time2 - time1) / tries # type: ignore
127+
if verbose:
128+
print("staging time: ", time1 - time0, "running time: ", rt)
129+
return r, time1 - time0, rt

0 commit comments

Comments
 (0)