Skip to content

Commit 9827b39

Browse files
committed
feat: add more settings for cotengra benchmark example
1 parent 13b543a commit 9827b39

File tree

1 file changed

+47
-23
lines changed

1 file changed

+47
-23
lines changed

examples/cotengra_setting_bench.py

Lines changed: 47 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -41,17 +41,16 @@ def generate_circuit(param, g, n, nlayers):
4141
return c
4242

4343

44-
def trigger_cotengra_optimization(n, nlayers, d):
45-
g = nx.random_regular_graph(d, n)
44+
def trigger_cotengra_optimization(n, nlayers, graph):
4645

4746
# define the loss function
4847
def loss_f(params, n, nlayers):
4948

50-
c = generate_circuit(params, g, n, nlayers)
49+
c = generate_circuit(params, graph, n, nlayers)
5150

5251
# calculate the loss function, max cut
5352
loss = 0.0
54-
for e in g.edges:
53+
for e in graph.edges:
5554
loss += c.expectation_ps(z=[e[0], e[1]])
5655

5756
return K.real(loss)
@@ -60,12 +59,28 @@ def loss_f(params, n, nlayers):
6059

6160
# run only once to trigger the compilation
6261
K.jit(
63-
K.value_and_grad(loss_f, argnums=0),
62+
loss_f,
6463
static_argnums=(1, 2),
6564
)(params, n, nlayers)
6665

6766

67+
# define the benchmark parameters
68+
n = 10
69+
nlayers = 15
70+
6871
# define the cotengra optimizer parameters
72+
graph_args = {
73+
"1D lattice": nx.convert_node_labels_to_integers(
74+
nx.grid_graph((n, 1))
75+
), # 1D lattice
76+
"2D lattice": nx.convert_node_labels_to_integers(
77+
nx.grid_graph((n // 5, n // (n // 5)))
78+
), # 2D lattice
79+
"all-to-all connected": nx.convert_node_labels_to_integers(
80+
nx.complete_graph(n)
81+
), # all-to-all connected
82+
}
83+
6984
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
7085
"greedy",
7186
"kahypar",
@@ -79,10 +94,10 @@ def loss_f(params, n, nlayers):
7994
]
8095

8196
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
82-
"optuna", # pip install optuna
83-
"random",
97+
# "optuna", # pip install optuna
98+
"random", # default when no library is installed
8499
# "baytune", # pip install baytune
85-
# "nevergrad", # pip install nevergrad
100+
"nevergrad", # pip install nevergrad
86101
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
87102
# "skopt", # pip install scikit-optimize
88103
]
@@ -95,47 +110,56 @@ def loss_f(params, n, nlayers):
95110
("simulated_annealing_opts", {}),
96111
]
97112

113+
minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
114+
"flops", # minimize the total number of scalar operations
115+
"size", # minimize the size of the largest intermediate tensor
116+
"write", # minimize the sum of sizes of all intermediate tensors
117+
"combo", # minimize the sum of FLOPS + α * WRITE where α is 64
118+
]
119+
98120

99-
def get_optimizer(method, optlib, post_processing):
121+
def get_optimizer(method, optlib, post_processing, minimize):
100122
if post_processing[0] is None:
101123
return ctg.HyperOptimizer(
102124
methods=method,
103125
optlib=optlib,
104-
minimize="flops",
126+
minimize=minimize,
105127
parallel=True,
106128
max_time=30,
107-
max_repeats=30,
129+
max_repeats=128,
108130
progbar=True,
109131
)
110132
else:
111133
return ctg.HyperOptimizer(
112134
methods=method,
113135
optlib=optlib,
114-
minimize="flops",
136+
minimize=minimize,
115137
parallel=True,
116138
max_time=30,
117-
max_repeats=30,
139+
max_repeats=128,
118140
progbar=True,
119141
**{post_processing[0]: post_processing[1]},
120142
)
121143

122144

123145
if __name__ == "__main__":
124-
# define the parameters
125-
n = 20
126-
nlayers = 15
127-
d = 3
128-
129-
for method, optlib, post_processing in itertools.product(
130-
methods_args, optlib_args, post_processing_args
146+
for graph, method, optlib, post_processing, minimize in itertools.product(
147+
graph_args.keys(),
148+
methods_args,
149+
optlib_args,
150+
post_processing_args,
151+
minimize_args,
131152
):
132-
print(f"method: {method}, optlib: {optlib}, post_processing: {post_processing}")
153+
print(
154+
f"graph: {graph}, method: {method}, optlib: {optlib}, "
155+
f"post_processing: {post_processing}, minimize: {minimize}"
156+
)
133157
tc.set_contractor(
134158
"custom",
135-
optimizer=get_optimizer(method, optlib, post_processing),
159+
optimizer=get_optimizer(method, optlib, post_processing, minimize),
136160
contraction_info=True,
137161
preprocessing=True,
138162
debug_level=2, # no computation
139163
)
140-
trigger_cotengra_optimization(n, nlayers, d)
164+
trigger_cotengra_optimization(n, nlayers, graph_args[graph])
141165
print("-------------------------")

0 commit comments

Comments
 (0)