8
8
9
9
#include < cmath>
10
10
#include < cstdlib>
11
- #include < functional>
12
11
#include < iostream>
13
12
#include < memory>
14
- #include < random >
13
+ #include < utility >
15
14
#include < vector>
16
15
17
16
using namespace torch ::nn;
18
17
using namespace torch ::optim;
19
18
20
19
template <typename OptimizerClass, typename Options>
21
- bool test_optimizer_xor (Options options) {
20
+ static bool test_optimizer_xor (Options options) {
22
21
torch::manual_seed (0 );
23
22
24
23
Sequential model (
@@ -30,9 +29,9 @@ bool test_optimizer_xor(Options options) {
30
29
const int64_t kBatchSize = 200 ;
31
30
const int64_t kMaximumNumberOfEpochs = 3000 ;
32
31
33
- OptimizerClass optimizer (model->parameters (), options);
32
+ OptimizerClass optimizer (model->parameters (), std::move ( options) );
34
33
35
- float running_loss = 1 ;
34
+ double running_loss = 1 ;
36
35
int epoch = 0 ;
37
36
while (running_loss > 0.1 ) {
38
37
auto inputs = torch::empty ({kBatchSize , 2 });
@@ -46,8 +45,8 @@ bool test_optimizer_xor(Options options) {
46
45
47
46
auto step = [&](OptimizerClass& optimizer,
48
47
Sequential model,
49
- torch::Tensor inputs,
50
- torch::Tensor labels) {
48
+ const torch::Tensor& inputs,
49
+ const torch::Tensor& labels) {
51
50
auto closure = [&]() {
52
51
optimizer.zero_grad ();
53
52
auto x = model->forward (inputs);
@@ -60,11 +59,10 @@ bool test_optimizer_xor(Options options) {
60
59
61
60
torch::Tensor loss = step (optimizer, model, inputs, labels);
62
61
63
- // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,bugprone-narrowing-conversions)
64
- running_loss = running_loss * 0.99 + loss.item <float >() * 0.01 ;
62
+ running_loss = running_loss * 0.99 + loss.item <double >() * 0.01 ;
65
63
if (epoch > kMaximumNumberOfEpochs ) {
66
64
std::cout << " Loss is too high after epoch " << epoch << " : "
67
- << running_loss << std::endl ;
65
+ << running_loss << ' \n ' ;
68
66
return false ;
69
67
}
70
68
epoch++;
@@ -73,18 +71,18 @@ bool test_optimizer_xor(Options options) {
73
71
}
74
72
75
73
template <typename Parameters>
76
- void assign_parameter (
74
+ static void assign_parameter (
77
75
const Parameters& parameters,
78
76
const char * name,
79
- torch::Tensor new_tensor) {
77
+ const torch::Tensor& new_tensor) {
80
78
auto parameter = parameters[name];
81
79
parameter.set_requires_grad (false );
82
80
parameter.flatten ().copy_ (new_tensor);
83
81
parameter.set_requires_grad (true );
84
82
}
85
83
86
84
template <typename OptimizerClass, typename Options>
87
- void check_exact_values (
85
+ static void check_exact_values (
88
86
Options options,
89
87
std::vector<std::vector<torch::Tensor>> expected_parameters) {
90
88
const size_t kIterations = 1001 ;
@@ -119,7 +117,7 @@ void check_exact_values(
119
117
assign_parameter (
120
118
parameters, " 2.bias" , torch::tensor ({-0.0711 }, torch::kFloat64 ));
121
119
122
- auto optimizer = OptimizerClass (parameters.values (), options);
120
+ auto optimizer = OptimizerClass (parameters.values (), std::move ( options) );
123
121
torch::Tensor input =
124
122
torch::tensor ({0.1 , 0.2 , 0.3 , 0.4 , 0.5 , 0.6 }, torch::kFloat64 )
125
123
.reshape ({3 , 2 });
@@ -145,8 +143,7 @@ void check_exact_values(
145
143
expected_parameters.at (i / kSampleEvery ).at (p).to (torch::kFloat64 );
146
144
if (!computed.allclose (expected, /* rtol=*/ 1e-3 , /* atol=*/ 5e-4 )) {
147
145
std::cout << " Iteration " << i << " : " << computed
148
- << " != " << expected << " (parameter " << p << " )"
149
- << std::endl;
146
+ << " != " << expected << " (parameter " << p << " )" << ' \n ' ;
150
147
ASSERT_TRUE (false );
151
148
}
152
149
}
@@ -166,8 +163,7 @@ TEST(OptimTest, OptimizerAccessors) {
166
163
ASSERT_TRUE (options == options_);
167
164
// test for param_groups() with non-const reference return
168
165
auto & params_groups = optimizer.param_groups ();
169
- // NOLINTNEXTLINE(modernize-use-emplace)
170
- params_groups.push_back (OptimizerParamGroup (params));
166
+ params_groups.emplace_back (params);
171
167
auto & params_1 = params_groups[1 ].params ();
172
168
for (const auto i : c10::irange (params_1.size ())) {
173
169
torch::equal (params[i], params_1[i]);
@@ -204,7 +200,7 @@ TEST(OptimTest, OptimizerAccessors) {
204
200
205
201
struct MyOptimizerOptions
206
202
: public OptimizerCloneableOptions<MyOptimizerOptions> {
207
- MyOptimizerOptions (double lr = 1.0 ) : lr_(lr){};
203
+ MyOptimizerOptions (double lr = 1.0 ) : lr_(lr) {}
208
204
TORCH_ARG (double , lr) = 1.0 ;
209
205
};
210
206
@@ -216,27 +212,24 @@ TEST(OptimTest, OldInterface) {
216
212
}
217
213
explicit MyOptimizer (
218
214
std::vector<at::Tensor> params,
219
- MyOptimizerOptions defaults = {})
220
- : // NOLINTNEXTLINE(performance-move-const-arg)
221
- Optimizer(
222
- {std::move (OptimizerParamGroup (params))},
215
+ const MyOptimizerOptions& defaults = {})
216
+ : Optimizer(
217
+ std::move (params),
223
218
std::make_unique<MyOptimizerOptions>(defaults)) {}
224
219
};
225
220
std::vector<torch::Tensor> parameters = {
226
221
torch::ones ({2 , 3 }), torch::zeros ({2 , 3 }), torch::rand ({2 , 3 })};
227
222
{
228
223
MyOptimizer optimizer (parameters);
229
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
230
- size_t size;
224
+ size_t size = 0 ;
231
225
OLD_INTERFACE_WARNING_CHECK (size = optimizer.size ());
232
226
ASSERT_EQ (size, parameters.size ());
233
227
}
234
228
{
235
229
std::vector<at::Tensor> params;
236
230
MyOptimizer optimizer (params);
237
231
238
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
239
- size_t size;
232
+ size_t size = 0 ;
240
233
OLD_INTERFACE_WARNING_CHECK (size = optimizer.size ());
241
234
ASSERT_EQ (size, 0 );
242
235
@@ -255,8 +248,7 @@ TEST(OptimTest, OldInterface) {
255
248
Linear linear (3 , 4 );
256
249
MyOptimizer optimizer (linear->parameters ());
257
250
258
- // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
259
- size_t size;
251
+ size_t size = 0 ;
260
252
OLD_INTERFACE_WARNING_CHECK (size = optimizer.size ());
261
253
ASSERT_EQ (size, linear->parameters ().size ());
262
254
}
@@ -480,7 +472,7 @@ TEST(OptimTest, AddParameter_LBFGS) {
480
472
481
473
// Check whether the learning rate of the parameter groups in the optimizer are
482
474
// the same as the expected learning rates given in the epoch:learning rate map
483
- void check_lr_change (
475
+ static void check_lr_change (
484
476
Optimizer& optimizer,
485
477
LRScheduler& lr_scheduler,
486
478
std::map<unsigned , double > expected_epoch_lrs) {
@@ -512,7 +504,7 @@ void check_lr_change(
512
504
// Very similar to check_lr_change, but for ReduceLROnPlateauScheduler
513
505
// which does not inherit from LRScheduler and requires a metrics
514
506
// input to step().
515
- void check_lr_change_for_reduce_on_plateau (
507
+ static void check_lr_change_for_reduce_on_plateau (
516
508
Optimizer& optimizer,
517
509
ReduceLROnPlateauScheduler& lr_scheduler,
518
510
std::map<unsigned , double > expected_epoch_lrs) {
0 commit comments