@@ -41,17 +41,16 @@ def generate_circuit(param, g, n, nlayers):
41
41
return c
42
42
43
43
44
- def trigger_cotengra_optimization (n , nlayers , d ):
45
- g = nx .random_regular_graph (d , n )
44
+ def trigger_cotengra_optimization (n , nlayers , graph ):
46
45
47
46
# define the loss function
48
47
def loss_f (params , n , nlayers ):
49
48
50
- c = generate_circuit (params , g , n , nlayers )
49
+ c = generate_circuit (params , graph , n , nlayers )
51
50
52
51
# calculate the loss function, max cut
53
52
loss = 0.0
54
- for e in g .edges :
53
+ for e in graph .edges :
55
54
loss += c .expectation_ps (z = [e [0 ], e [1 ]])
56
55
57
56
return K .real (loss )
@@ -60,12 +59,28 @@ def loss_f(params, n, nlayers):
60
59
61
60
# run only once to trigger the compilation
62
61
K .jit (
63
- K . value_and_grad ( loss_f , argnums = 0 ) ,
62
+ loss_f ,
64
63
static_argnums = (1 , 2 ),
65
64
)(params , n , nlayers )
66
65
67
66
67
+ # define the benchmark parameters
68
+ n = 10
69
+ nlayers = 15
70
+
68
71
# define the cotengra optimizer parameters
72
+ graph_args = {
73
+ "1D lattice" : nx .convert_node_labels_to_integers (
74
+ nx .grid_graph ((n , 1 ))
75
+ ), # 1D lattice
76
+ "2D lattice" : nx .convert_node_labels_to_integers (
77
+ nx .grid_graph ((n // 5 , n // (n // 5 )))
78
+ ), # 2D lattice
79
+ "all-to-all connected" : nx .convert_node_labels_to_integers (
80
+ nx .complete_graph (n )
81
+ ), # all-to-all connected
82
+ }
83
+
69
84
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
70
85
"greedy" ,
71
86
"kahypar" ,
@@ -79,10 +94,10 @@ def loss_f(params, n, nlayers):
79
94
]
80
95
81
96
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
82
- "optuna" , # pip install optuna
83
- "random" ,
97
+ # "optuna", # pip install optuna
98
+ "random" , # default when no library is installed
84
99
# "baytune", # pip install baytune
85
- # "nevergrad", # pip install nevergrad
100
+ "nevergrad" , # pip install nevergrad
86
101
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
87
102
# "skopt", # pip install scikit-optimize
88
103
]
@@ -95,47 +110,56 @@ def loss_f(params, n, nlayers):
95
110
("simulated_annealing_opts" , {}),
96
111
]
97
112
113
+ minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
114
+ "flops" , # minimize the total number of scalar operations
115
+ "size" , # minimize the size of the largest intermediate tensor
116
+ "write" , # minimize the sum of sizes of all intermediate tensors
117
+ "combo" , # minimize the sum of FLOPS + α * WRITE where α is 64
118
+ ]
119
+
98
120
99
- def get_optimizer (method , optlib , post_processing ):
121
+ def get_optimizer (method , optlib , post_processing , minimize ):
100
122
if post_processing [0 ] is None :
101
123
return ctg .HyperOptimizer (
102
124
methods = method ,
103
125
optlib = optlib ,
104
- minimize = "flops" ,
126
+ minimize = minimize ,
105
127
parallel = True ,
106
128
max_time = 30 ,
107
- max_repeats = 30 ,
129
+ max_repeats = 128 ,
108
130
progbar = True ,
109
131
)
110
132
else :
111
133
return ctg .HyperOptimizer (
112
134
methods = method ,
113
135
optlib = optlib ,
114
- minimize = "flops" ,
136
+ minimize = minimize ,
115
137
parallel = True ,
116
138
max_time = 30 ,
117
- max_repeats = 30 ,
139
+ max_repeats = 128 ,
118
140
progbar = True ,
119
141
** {post_processing [0 ]: post_processing [1 ]},
120
142
)
121
143
122
144
123
145
if __name__ == "__main__" :
124
- # define the parameters
125
- n = 20
126
- nlayers = 15
127
- d = 3
128
-
129
- for method , optlib , post_processing in itertools .product (
130
- methods_args , optlib_args , post_processing_args
146
+ for graph , method , optlib , post_processing , minimize in itertools .product (
147
+ graph_args .keys (),
148
+ methods_args ,
149
+ optlib_args ,
150
+ post_processing_args ,
151
+ minimize_args ,
131
152
):
132
- print (f"method: { method } , optlib: { optlib } , post_processing: { post_processing } " )
153
+ print (
154
+ f"graph: { graph } , method: { method } , optlib: { optlib } , "
155
+ f"post_processing: { post_processing } , minimize: { minimize } "
156
+ )
133
157
tc .set_contractor (
134
158
"custom" ,
135
- optimizer = get_optimizer (method , optlib , post_processing ),
159
+ optimizer = get_optimizer (method , optlib , post_processing , minimize ),
136
160
contraction_info = True ,
137
161
preprocessing = True ,
138
162
debug_level = 2 , # no computation
139
163
)
140
- trigger_cotengra_optimization (n , nlayers , d )
164
+ trigger_cotengra_optimization (n , nlayers , graph_args [ graph ] )
141
165
print ("-------------------------" )
0 commit comments