@@ -48,10 +48,7 @@ def loss_f(params, n, nlayers):
48
48
49
49
c = generate_circuit (params , graph , n , nlayers )
50
50
51
- # calculate the loss function, max cut
52
- loss = 0.0
53
- for e in graph .edges :
54
- loss += c .expectation_ps (z = [e [0 ], e [1 ]])
51
+ loss = c .expectation_ps (z = [0 , 1 , 2 ], reuse = False )
55
52
56
53
return K .real (loss )
57
54
@@ -65,8 +62,8 @@ def loss_f(params, n, nlayers):
65
62
66
63
67
64
# define the benchmark parameters
68
- n = 10
69
- nlayers = 15
65
+ n = 12
66
+ nlayers = 12
70
67
71
68
# define the cotengra optimizer parameters
72
69
graph_args = {
@@ -84,7 +81,7 @@ def loss_f(params, n, nlayers):
84
81
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
85
82
"greedy" ,
86
83
"kahypar" ,
87
- "labels" ,
84
+ # "labels",
88
85
# "spinglass", # requires igraph
89
86
# "labelprop", # requires igraph
90
87
# "betweenness", # requires igraph
@@ -94,26 +91,26 @@ def loss_f(params, n, nlayers):
94
91
]
95
92
96
93
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
97
- # "optuna", # pip install optuna
98
- "random" , # default when no library is installed
94
+ "optuna" , # pip install optuna
95
+ # "random", # default when no library is installed
99
96
# "baytune", # pip install baytune
100
- "nevergrad" , # pip install nevergrad
97
+ # "nevergrad", # pip install nevergrad
101
98
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
102
99
# "skopt", # pip install scikit-optimize
103
100
]
104
101
105
102
post_processing_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#slicing-and-subtree-reconfiguration
106
103
(None , None ),
107
- ("slicing_opts" , {"target_size" : 2 ** 28 }),
108
- ("slicing_reconf_opts" , {"target_size" : 2 ** 28 }),
104
+ # ("slicing_opts", {"target_size": 2**28}),
105
+ # ("slicing_reconf_opts", {"target_size": 2**28}),
109
106
("reconf_opts" , {}),
110
107
("simulated_annealing_opts" , {}),
111
108
]
112
109
113
110
minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
114
- "flops" , # minimize the total number of scalar operations
115
- "size" , # minimize the size of the largest intermediate tensor
116
- "write" , # minimize the sum of sizes of all intermediate tensors
111
+ # "flops", # minimize the total number of scalar operations
112
+ # "size", # minimize the size of the largest intermediate tensor
113
+ # "write", # minimize the sum of sizes of all intermediate tensors
117
114
"combo" , # minimize the sum of FLOPS + α * WRITE where α is 64
118
115
]
119
116
@@ -125,7 +122,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
125
122
optlib = optlib ,
126
123
minimize = minimize ,
127
124
parallel = True ,
128
- max_time = 30 ,
125
+ max_time = 60 ,
129
126
max_repeats = 128 ,
130
127
progbar = True ,
131
128
)
@@ -135,7 +132,7 @@ def get_optimizer(method, optlib, post_processing, minimize):
135
132
optlib = optlib ,
136
133
minimize = minimize ,
137
134
parallel = True ,
138
- max_time = 30 ,
135
+ max_time = 60 ,
139
136
max_repeats = 128 ,
140
137
progbar = True ,
141
138
** {post_processing [0 ]: post_processing [1 ]},
0 commit comments