Skip to content

Commit ed378cb

Browse files
improve cotengra benchmark example
1 parent 9827b39 commit ed378cb

File tree

1 file changed

+14
-17
lines changed

1 file changed

+14
-17
lines changed

examples/cotengra_setting_bench.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,7 @@ def loss_f(params, n, nlayers):
4848

4949
c = generate_circuit(params, graph, n, nlayers)
5050

51-
# calculate the loss function, max cut
52-
loss = 0.0
53-
for e in graph.edges:
54-
loss += c.expectation_ps(z=[e[0], e[1]])
51+
loss = c.expectation_ps(z=[0, 1, 2], reuse=False)
5552

5653
return K.real(loss)
5754

@@ -65,8 +62,8 @@ def loss_f(params, n, nlayers):
6562

6663

6764
# define the benchmark parameters
68-
n = 10
69-
nlayers = 15
65+
n = 12
66+
nlayers = 12
7067

7168
# define the cotengra optimizer parameters
7269
graph_args = {
@@ -84,7 +81,7 @@ def loss_f(params, n, nlayers):
8481
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
8582
"greedy",
8683
"kahypar",
87-
"labels",
84+
# "labels",
8885
# "spinglass", # requires igraph
8986
# "labelprop", # requires igraph
9087
# "betweenness", # requires igraph
@@ -94,26 +91,26 @@ def loss_f(params, n, nlayers):
9491
]
9592

9693
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
97-
# "optuna", # pip install optuna
98-
"random", # default when no library is installed
94+
"optuna", # pip install optuna
95+
# "random", # default when no library is installed
9996
# "baytune", # pip install baytune
100-
"nevergrad", # pip install nevergrad
97+
# "nevergrad", # pip install nevergrad
10198
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
10299
# "skopt", # pip install scikit-optimize
103100
]
104101

105102
post_processing_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#slicing-and-subtree-reconfiguration
106103
(None, None),
107-
("slicing_opts", {"target_size": 2**28}),
108-
("slicing_reconf_opts", {"target_size": 2**28}),
104+
# ("slicing_opts", {"target_size": 2**28}),
105+
# ("slicing_reconf_opts", {"target_size": 2**28}),
109106
("reconf_opts", {}),
110107
("simulated_annealing_opts", {}),
111108
]
112109

113110
minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
114-
"flops", # minimize the total number of scalar operations
115-
"size", # minimize the size of the largest intermediate tensor
116-
"write", # minimize the sum of sizes of all intermediate tensors
111+
# "flops", # minimize the total number of scalar operations
112+
# "size", # minimize the size of the largest intermediate tensor
113+
# "write", # minimize the sum of sizes of all intermediate tensors
117114
"combo", # minimize the sum of FLOPS + α * WRITE where α is 64
118115
]
119116

@@ -125,7 +122,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
125122
optlib=optlib,
126123
minimize=minimize,
127124
parallel=True,
128-
max_time=30,
125+
max_time=60,
129126
max_repeats=128,
130127
progbar=True,
131128
)
@@ -135,7 +132,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
135132
optlib=optlib,
136133
minimize=minimize,
137134
parallel=True,
138-
max_time=30,
135+
max_time=60,
139136
max_repeats=128,
140137
progbar=True,
141138
**{post_processing[0]: post_processing[1]},

0 commit comments

Comments
 (0)