Skip to content

Commit b4404e2

Browse files
authored
Merge pull request #6 from tencent-quantum-lab/master
local sync
2 parents cb26531 + e05b75c commit b4404e2

File tree

4 files changed

+71
-27
lines changed

4 files changed

+71
-27
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,10 @@
1414

1515
- Add circuit copy method that avoid shallow copy issue `Circuit.copy()`
1616

17+
### Fixed
18+
19+
- improve the `adaptive_vmap` to support internal jit and pytree output
20+
1721
### Changed
1822

1923
- The static method `BaseCircuit.copy` is renamed as `BaseCircuit.copy_nodes`

requirements/requirements-extra.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# extra dependencies for ci
2-
qiskit
2+
qiskit==0.43
33
qiskit-nature
44
cirq
55
torch

tensorcircuit/applications/physics/fss.py

Lines changed: 53 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""
22
finite size scaling tools
33
"""
4-
from typing import List, Tuple
4+
from typing import List, Tuple, Optional
55

66
import numpy as np
77

@@ -14,6 +14,8 @@ def data_collapse(
1414
nu: float,
1515
beta: float = 0,
1616
obs_type: int = 1,
17+
fit_type: int = 0,
18+
dobs: Optional[List[List[float]]] = None,
1719
) -> Tuple[List[float], List[List[float]], List[List[float]], float]:
1820
xL: List[List[float]] = [] # x=(p-pc)L^(1/\nv)
1921
yL: List[List[float]] = [] # y=S(p,L)-S(pc,L) or S(p,L)
@@ -33,27 +35,61 @@ def data_collapse(
3335
yL[n0].append(obs[n0][p0] * n[n0] ** beta)
3436
# tripartite mutual information
3537
pc_list.append(pc_L)
36-
xL_all = []
37-
for i in range(len(xL)):
38-
xL_all.extend(xL[i])
39-
yL_ave = []
40-
loss = []
41-
for x0 in range(len(xL_all)):
42-
ybar_list = []
43-
for n0 in range(len(n)):
44-
if xL_all[x0] >= xL[n0][0] and xL_all[x0] <= xL[n0][-1]:
45-
yinsert = pc_linear_interpolation(xL[n0], yL[n0], xL_all[x0])
46-
ybar_list.append(yinsert)
38+
if fit_type == 0:
39+
xL_all = []
40+
for i in range(len(xL)):
41+
xL_all.extend(xL[i])
42+
yL_ave = []
43+
loss = []
44+
for x0 in range(len(xL_all)):
45+
ybar_list = []
46+
for n0 in range(len(n)):
47+
if xL_all[x0] >= xL[n0][0] and xL_all[x0] <= xL[n0][-1]:
48+
yinsert = pc_linear_interpolation(xL[n0], yL[n0], xL_all[x0])
49+
ybar_list.append(yinsert)
50+
51+
ybar = np.mean(ybar_list)
52+
mean_squared = [(ybar_list[i] - ybar) ** 2 for i in range(len(ybar_list))]
53+
loss.append(np.sum(mean_squared))
54+
yL_ave.append(ybar)
55+
loss = np.sum(loss)
4756

48-
ybar = np.mean(ybar_list)
49-
mean_squared = [(ybar_list[i] - ybar) ** 2 for i in range(len(ybar_list))]
50-
loss.append(np.sum(mean_squared))
51-
yL_ave.append(ybar)
52-
loss = np.sum(loss)
57+
return pc_list, xL, yL, loss # type: ignore
5358

59+
# fit_type == 1
60+
if dobs is None:
61+
raise ValueError("uncertainty of each y has to be specified in `dobs`")
62+
63+
datas = []
64+
for n0 in range(len(n)):
65+
for i in range(len(xL[n0])):
66+
datas.append([xL[n0][i], yL[n0][i], dobs[n0][i]])
67+
datas = sorted(datas, key=lambda x: x[0])
68+
loss = _quality_objective_v2(datas) # type: ignore
5469
return pc_list, xL, yL, loss # type: ignore
5570

5671

72+
def _quality_objective_v2(datas: List[List[float]]) -> float:
73+
# https://journals.aps.org/prb/supplemental/10.1103/PhysRevB.101.060301/Supplement.pdf
74+
loss = []
75+
for i in range(len(datas) - 2):
76+
# i, i+1, i+2
77+
x, y, d = datas[i + 1]
78+
x1, y1, d1 = datas[i]
79+
x2, y2, d2 = datas[i + 2]
80+
if np.abs(x - x1) < 1e-4 or np.abs(x - x2) < 1e-4:
81+
continue
82+
ybar = ((x2 - x) * y1 - (x1 - x) * y2) / (x2 - x1)
83+
delta = (
84+
d**2
85+
+ d1**2 * (x2 - x) ** 2 / (x2 - x1) ** 2
86+
+ d2**2 * (x1 - x) ** 2 / (x2 - x1) ** 2
87+
)
88+
w = (y - ybar) ** 2 / delta
89+
loss.append(w)
90+
return np.mean(loss) # type: ignore
91+
92+
5793
def pc_linear_interpolation(p: List[float], SA: List[float], pc_input: float) -> float:
5894
if pc_input in p:
5995
pc_index = p.index(pc_input)

tensorcircuit/experimental.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
def adaptive_vmap(
1818
f: Callable[..., Any],
1919
vectorized_argnums: Union[int, Sequence[int]] = 0,
20+
static_argnums: Optional[Union[int, Sequence[int]]] = None,
2021
chunk_size: Optional[int] = None,
2122
) -> Callable[..., Any]:
2223
if chunk_size is None:
@@ -46,7 +47,10 @@ def wrapper(*args: Any, **kws: Any) -> Tensor:
4647
reshape_args.append(arg)
4748
if s2 != 0:
4849
rest_args.append(arg_rest)
49-
_vmap = backend.vmap(f, vectorized_argnums)
50+
_vmap = backend.jit(
51+
backend.vmap(f, vectorized_argnums=vectorized_argnums),
52+
static_argnums=static_argnums,
53+
)
5054
r = []
5155
for i in range(s1):
5256
# currently using naive python loop for simplicity
@@ -55,16 +59,16 @@ def wrapper(*args: Any, **kws: Any) -> Tensor:
5559
for j, a in enumerate(reshape_args)
5660
]
5761
r.append(_vmap(*nreshape_args, **kws))
58-
r = backend.stack(r)
59-
rshape = list(backend.shape_tuple(r))
60-
if len(rshape) == 2:
61-
nshape = [rshape[0] * rshape[1]]
62-
else:
63-
nshape = [rshape[0] * rshape[1], -1]
64-
r = backend.reshape(r, nshape)
62+
r = backend.tree_map(lambda *x: backend.concat(x), *r)
63+
# rshape = list(backend.shape_tuple(r))
64+
# if len(rshape) == 2:
65+
# nshape = [rshape[0] * rshape[1]]
66+
# else:
67+
# nshape = [rshape[0] * rshape[1], -1]
68+
# r = backend.reshape(r, nshape)
6569
if s2 != 0:
6670
rest_r = _vmap(*rest_args, **kws)
67-
return backend.concat([r, rest_r])
71+
return backend.tree_map(lambda *x: backend.concat(x), r, rest_r)
6872
return r
6973

7074
return wrapper

0 commit comments

Comments
 (0)