Skip to content

Commit f0c1c80

Browse files
committed
chore: add example
1 parent 54bd507 commit f0c1c80

File tree

1 file changed

+142
-0
lines changed

1 file changed

+142
-0
lines changed

examples/cotengra_setting_bench.py

Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
"""
2+
Optimization for performance comparison with different cotengra settings.
3+
(random layouts averaged).
4+
"""
5+
6+
import itertools
7+
import sys
8+
import warnings
9+
10+
import cotengra as ctg
11+
import networkx as nx
12+
import numpy as np
13+
14+
sys.path.insert(0, "../")
15+
import tensorcircuit as tc
16+
17+
try:
18+
import kahypar
19+
except ImportError:
20+
print("kahypar not installed, please install it to run this script.")
21+
exit()
22+
23+
24+
# suppress the warning from cotengra
25+
warnings.filterwarnings(
26+
"ignore",
27+
message="The inputs or output of this tree are not ordered."
28+
"Costs will be accurate but actually contracting requires "
29+
"ordered indices corresponding to array axes.",
30+
)
31+
32+
K = tc.set_backend("jax")
33+
34+
35+
def generate_circuit(param, g, n, nlayers):
36+
# construct the circuit ansatz
37+
c = tc.Circuit(n)
38+
for i in range(n):
39+
c.H(i)
40+
for j in range(nlayers):
41+
c = tc.templates.blocks.QAOA_block(c, g, param[j, 0], param[j, 1])
42+
return c
43+
44+
45+
def trigger_cotengra_optimization(n, nlayers, d):
46+
g = nx.random_regular_graph(d, n)
47+
48+
# define the loss function
49+
def loss_f(params, n, nlayers):
50+
51+
c = generate_circuit(params, g, n, nlayers)
52+
53+
# calculate the loss function, max cut
54+
loss = 0.0
55+
for e in g.edges:
56+
loss += c.expectation_ps(z=[e[0], e[1]])
57+
58+
return K.real(loss)
59+
60+
params = K.implicit_randn(shape=[nlayers, 2])
61+
62+
# run only once to trigger the compilation
63+
K.jit(
64+
K.value_and_grad(loss_f, argnums=0),
65+
static_argnums=(1, 2),
66+
)(params, n, nlayers)
67+
68+
69+
# define the cotengra optimizer parameters
70+
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
71+
"greedy",
72+
"kahypar",
73+
"labels",
74+
# "spinglass", # requires igraph
75+
# "labelprop", # requires igraph
76+
# "betweenness", # requires igraph
77+
# "walktrap", # requires igraph
78+
# "quickbb", # requires https://github.com/dechterlab/quickbb
79+
# "flowcutter", # requires https://github.com/kit-algo/flow-cutter-pace17
80+
]
81+
82+
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
83+
"optuna", # pip install optuna
84+
"random",
85+
# "baytune", # pip install baytune
86+
# "nevergrad", # pip install nevergrad
87+
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
88+
# "skopt", # pip install scikit-optimize
89+
]
90+
91+
post_processing_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#slicing-and-subtree-reconfiguration
92+
(None, None),
93+
("slicing_opts", {"target_size": 2**28}),
94+
("slicing_reconf_opts", {"target_size": 2**28}),
95+
("reconf_opts", {}),
96+
("simulated_annealing_opts", {}),
97+
]
98+
99+
100+
def get_optimizer(method, optlib, post_processing):
101+
if post_processing[0] is None:
102+
return ctg.HyperOptimizer(
103+
methods=method,
104+
optlib=optlib,
105+
minimize="flops",
106+
parallel=True,
107+
max_time=30,
108+
max_repeats=30,
109+
progbar=True,
110+
)
111+
else:
112+
return ctg.HyperOptimizer(
113+
methods=method,
114+
optlib=optlib,
115+
minimize="flops",
116+
parallel=True,
117+
max_time=30,
118+
max_repeats=30,
119+
progbar=True,
120+
**{post_processing[0]: post_processing[1]},
121+
)
122+
123+
124+
if __name__ == "__main__":
125+
# define the parameters
126+
n = 20
127+
nlayers = 15
128+
d = 3
129+
130+
for method, optlib, post_processing in itertools.product(
131+
methods_args, optlib_args, post_processing_args
132+
):
133+
print(f"method: {method}, optlib: {optlib}, post_processing: {post_processing}")
134+
tc.set_contractor(
135+
"custom",
136+
optimizer=get_optimizer(method, optlib, post_processing),
137+
contraction_info=True,
138+
preprocessing=True,
139+
debug_level=2, # no computation
140+
)
141+
trigger_cotengra_optimization(n, nlayers, d)
142+
print("-------------------------")

0 commit comments

Comments
 (0)