Skip to content

Commit 075905b

Browse files
cyyeverseemethere
authored andcommitted
[14/N] Fix extra warnings brought by clang-tidy-17 (pytorch#141644)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#141644 Approved by: https://github.com/ezyang Co-authored-by: Eli Uriegas <[email protected]>
1 parent 72fd7ab commit 075905b

File tree

24 files changed

+86
-65
lines changed

24 files changed

+86
-65
lines changed

aten/src/ATen/MatrixRef.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ class MatrixRef {
9292
/// The declaration here is extra complicated so that "arrayRef = {}"
9393
/// continues to select the move assignment operator.
9494
template <typename U>
95-
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
9695
std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
96+
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
9797
U&& Temporary) = delete;
9898

9999
/// Disallow accidental assignment from a temporary.

aten/src/ATen/cuda/CUDABlas.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
106106
namespace {
107107

108108
static cublasOperation_t _cublasOpFromChar(char op) {
109+
// NOLINTNEXTLINE(bugprone-switch-missing-default-case)
109110
switch (op) {
110111
case 'n':
111112
case 'N':

test/cpp/api/optim.cpp

+23-31
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,16 @@
88

99
#include <cmath>
1010
#include <cstdlib>
11-
#include <functional>
1211
#include <iostream>
1312
#include <memory>
14-
#include <random>
13+
#include <utility>
1514
#include <vector>
1615

1716
using namespace torch::nn;
1817
using namespace torch::optim;
1918

2019
template <typename OptimizerClass, typename Options>
21-
bool test_optimizer_xor(Options options) {
20+
static bool test_optimizer_xor(Options options) {
2221
torch::manual_seed(0);
2322

2423
Sequential model(
@@ -30,9 +29,9 @@ bool test_optimizer_xor(Options options) {
3029
const int64_t kBatchSize = 200;
3130
const int64_t kMaximumNumberOfEpochs = 3000;
3231

33-
OptimizerClass optimizer(model->parameters(), options);
32+
OptimizerClass optimizer(model->parameters(), std::move(options));
3433

35-
float running_loss = 1;
34+
double running_loss = 1;
3635
int epoch = 0;
3736
while (running_loss > 0.1) {
3837
auto inputs = torch::empty({kBatchSize, 2});
@@ -46,8 +45,8 @@ bool test_optimizer_xor(Options options) {
4645

4746
auto step = [&](OptimizerClass& optimizer,
4847
Sequential model,
49-
torch::Tensor inputs,
50-
torch::Tensor labels) {
48+
const torch::Tensor& inputs,
49+
const torch::Tensor& labels) {
5150
auto closure = [&]() {
5251
optimizer.zero_grad();
5352
auto x = model->forward(inputs);
@@ -60,11 +59,10 @@ bool test_optimizer_xor(Options options) {
6059

6160
torch::Tensor loss = step(optimizer, model, inputs, labels);
6261

63-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,bugprone-narrowing-conversions)
64-
running_loss = running_loss * 0.99 + loss.item<float>() * 0.01;
62+
running_loss = running_loss * 0.99 + loss.item<double>() * 0.01;
6563
if (epoch > kMaximumNumberOfEpochs) {
6664
std::cout << "Loss is too high after epoch " << epoch << ": "
67-
<< running_loss << std::endl;
65+
<< running_loss << '\n';
6866
return false;
6967
}
7068
epoch++;
@@ -73,18 +71,18 @@ bool test_optimizer_xor(Options options) {
7371
}
7472

7573
template <typename Parameters>
76-
void assign_parameter(
74+
static void assign_parameter(
7775
const Parameters& parameters,
7876
const char* name,
79-
torch::Tensor new_tensor) {
77+
const torch::Tensor& new_tensor) {
8078
auto parameter = parameters[name];
8179
parameter.set_requires_grad(false);
8280
parameter.flatten().copy_(new_tensor);
8381
parameter.set_requires_grad(true);
8482
}
8583

8684
template <typename OptimizerClass, typename Options>
87-
void check_exact_values(
85+
static void check_exact_values(
8886
Options options,
8987
std::vector<std::vector<torch::Tensor>> expected_parameters) {
9088
const size_t kIterations = 1001;
@@ -119,7 +117,7 @@ void check_exact_values(
119117
assign_parameter(
120118
parameters, "2.bias", torch::tensor({-0.0711}, torch::kFloat64));
121119

122-
auto optimizer = OptimizerClass(parameters.values(), options);
120+
auto optimizer = OptimizerClass(parameters.values(), std::move(options));
123121
torch::Tensor input =
124122
torch::tensor({0.1, 0.2, 0.3, 0.4, 0.5, 0.6}, torch::kFloat64)
125123
.reshape({3, 2});
@@ -145,8 +143,7 @@ void check_exact_values(
145143
expected_parameters.at(i / kSampleEvery).at(p).to(torch::kFloat64);
146144
if (!computed.allclose(expected, /*rtol=*/1e-3, /*atol=*/5e-4)) {
147145
std::cout << "Iteration " << i << ": " << computed
148-
<< " != " << expected << " (parameter " << p << ")"
149-
<< std::endl;
146+
<< " != " << expected << " (parameter " << p << ")" << '\n';
150147
ASSERT_TRUE(false);
151148
}
152149
}
@@ -166,8 +163,7 @@ TEST(OptimTest, OptimizerAccessors) {
166163
ASSERT_TRUE(options == options_);
167164
// test for param_groups() with non-const reference return
168165
auto& params_groups = optimizer.param_groups();
169-
// NOLINTNEXTLINE(modernize-use-emplace)
170-
params_groups.push_back(OptimizerParamGroup(params));
166+
params_groups.emplace_back(params);
171167
auto& params_1 = params_groups[1].params();
172168
for (const auto i : c10::irange(params_1.size())) {
173169
torch::equal(params[i], params_1[i]);
@@ -204,7 +200,7 @@ TEST(OptimTest, OptimizerAccessors) {
204200

205201
struct MyOptimizerOptions
206202
: public OptimizerCloneableOptions<MyOptimizerOptions> {
207-
MyOptimizerOptions(double lr = 1.0) : lr_(lr){};
203+
MyOptimizerOptions(double lr = 1.0) : lr_(lr) {}
208204
TORCH_ARG(double, lr) = 1.0;
209205
};
210206

@@ -216,27 +212,24 @@ TEST(OptimTest, OldInterface) {
216212
}
217213
explicit MyOptimizer(
218214
std::vector<at::Tensor> params,
219-
MyOptimizerOptions defaults = {})
220-
: // NOLINTNEXTLINE(performance-move-const-arg)
221-
Optimizer(
222-
{std::move(OptimizerParamGroup(params))},
215+
const MyOptimizerOptions& defaults = {})
216+
: Optimizer(
217+
std::move(params),
223218
std::make_unique<MyOptimizerOptions>(defaults)) {}
224219
};
225220
std::vector<torch::Tensor> parameters = {
226221
torch::ones({2, 3}), torch::zeros({2, 3}), torch::rand({2, 3})};
227222
{
228223
MyOptimizer optimizer(parameters);
229-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
230-
size_t size;
224+
size_t size = 0;
231225
OLD_INTERFACE_WARNING_CHECK(size = optimizer.size());
232226
ASSERT_EQ(size, parameters.size());
233227
}
234228
{
235229
std::vector<at::Tensor> params;
236230
MyOptimizer optimizer(params);
237231

238-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
239-
size_t size;
232+
size_t size = 0;
240233
OLD_INTERFACE_WARNING_CHECK(size = optimizer.size());
241234
ASSERT_EQ(size, 0);
242235

@@ -255,8 +248,7 @@ TEST(OptimTest, OldInterface) {
255248
Linear linear(3, 4);
256249
MyOptimizer optimizer(linear->parameters());
257250

258-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
259-
size_t size;
251+
size_t size = 0;
260252
OLD_INTERFACE_WARNING_CHECK(size = optimizer.size());
261253
ASSERT_EQ(size, linear->parameters().size());
262254
}
@@ -480,7 +472,7 @@ TEST(OptimTest, AddParameter_LBFGS) {
480472

481473
// Check whether the learning rate of the parameter groups in the optimizer are
482474
// the same as the expected learning rates given in the epoch:learning rate map
483-
void check_lr_change(
475+
static void check_lr_change(
484476
Optimizer& optimizer,
485477
LRScheduler& lr_scheduler,
486478
std::map<unsigned, double> expected_epoch_lrs) {
@@ -512,7 +504,7 @@ void check_lr_change(
512504
// Very similar to check_lr_change, but for ReduceLROnPlateauScheduler
513505
// which does not inherit from LRScheduler and requires a metrics
514506
// input to step().
515-
void check_lr_change_for_reduce_on_plateau(
507+
static void check_lr_change_for_reduce_on_plateau(
516508
Optimizer& optimizer,
517509
ReduceLROnPlateauScheduler& lr_scheduler,
518510
std::map<unsigned, double> expected_epoch_lrs) {

torch/csrc/CudaIPCTypes.cpp

+5
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,10 @@ struct CudaIPCGlobalEntities {
3636
CudaIPCGlobalEntities() {
3737
alive = true;
3838
}
39+
CudaIPCGlobalEntities(const CudaIPCGlobalEntities&) = delete;
40+
CudaIPCGlobalEntities(CudaIPCGlobalEntities&&) = delete;
41+
CudaIPCGlobalEntities& operator=(const CudaIPCGlobalEntities&) = delete;
42+
CudaIPCGlobalEntities& operator=(CudaIPCGlobalEntities&&) = delete;
3943
~CudaIPCGlobalEntities() {
4044
CudaIPCSentDataLimbo_.collect();
4145
safe_clean_current_file();
@@ -202,6 +206,7 @@ CudaIPCSentData::~CudaIPCSentData() {
202206
}
203207
cuda_ipc_global_entities.sync_events_used_--;
204208
}
209+
// NOLINTNEXTLINE(bugprone-empty-catch)
205210
} catch (...) { /* No throw */
206211
}
207212
#endif

torch/csrc/Generator.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ using namespace torch;
3030

3131
PyObject* THPGeneratorClass = nullptr;
3232

33-
PyObject* THPGenerator_initDefaultGenerator(const at::Generator& cdata) {
33+
PyObject* THPGenerator_initDefaultGenerator(at::Generator cdata) {
3434
auto type = (PyTypeObject*)THPGeneratorClass;
3535
auto self = THPObjectPtr{type->tp_alloc(type, 0)};
3636
if (!self)
@@ -401,8 +401,7 @@ PyObject* THPGenerator_Wrap(const Generator& gen) {
401401
return obj;
402402
}
403403

404-
return THPGenerator_NewWithVar(
405-
(PyTypeObject*)THPGeneratorClass, std::move(gen));
404+
return THPGenerator_NewWithVar((PyTypeObject*)THPGeneratorClass, gen);
406405
}
407406

408407
at::Generator THPGenerator_Unwrap(PyObject* state) {

torch/csrc/Generator.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ struct THPGenerator {
1414
// is borrowed. The caller should ensure that the at::Generator object lifetime
1515
// last at least as long as the Python wrapper.
1616
TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator(
17-
const at::Generator& cdata);
17+
at::Generator cdata);
1818

1919
#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)
2020

torch/csrc/PyInterpreter.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,10 @@ class PyInterpreterHolder {
159159
is_main_interpreter_(
160160
at::impl::PythonOpRegistrationTrampoline::registerInterpreter(
161161
impl_)) {}
162+
PyInterpreterHolder(const PyInterpreterHolder&) = delete;
163+
PyInterpreterHolder(PyInterpreterHolder&&) = delete;
164+
PyInterpreterHolder& operator=(const PyInterpreterHolder&) = delete;
165+
PyInterpreterHolder& operator=(PyInterpreterHolder&&) = delete;
162166
// NB: intentionally leaks the PyInterpreter, as there may still be
163167
// references to it that are live, living in objects that aren't being
164168
// destructed while Python is being cleaned up.

torch/csrc/api/include/torch/cuda.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
#pragma once
22

3-
#include <torch/csrc/Export.h>
3+
#include <c10/core/Device.h>
4+
#include <c10/macros/Export.h>
45

5-
#include <cstddef>
66
#include <cstdint>
77

88
namespace torch::cuda {
99

1010
/// Returns the number of CUDA devices available.
11-
size_t TORCH_API device_count();
11+
c10::DeviceIndex TORCH_API device_count();
1212

1313
/// Returns true if at least one CUDA device is available.
1414
bool TORCH_API is_available();

torch/csrc/api/include/torch/data/dataloader/base.h

+4
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ class DataLoaderBase {
3737
main_thread_dataset_(std::move(main_thread_dataset)),
3838
sequencer_(new_sequencer()) {}
3939

40+
DataLoaderBase(const DataLoaderBase&) = delete;
41+
DataLoaderBase(DataLoaderBase&&) = delete;
42+
DataLoaderBase& operator=(const DataLoaderBase&) = delete;
43+
DataLoaderBase& operator=(DataLoaderBase&&) = delete;
4044
// NOLINTNEXTLINE(bugprone-exception-escape)
4145
virtual ~DataLoaderBase() {
4246
join();

torch/csrc/api/include/torch/nn/modules/container/any_value.h

+3
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ class AnyValue {
2121
/// behavior of move for `std::unique_ptr`.
2222
AnyValue(AnyValue&&) = default;
2323
AnyValue& operator=(AnyValue&&) = default;
24+
~AnyValue() = default;
2425

2526
/// Copy construction and assignment is allowed.
2627
AnyValue(const AnyValue& other) : content_(other.content_->clone()) {}
@@ -89,6 +90,8 @@ class AnyValue {
8990
: type_info(type_info_) {}
9091
Placeholder(const Placeholder&) = default;
9192
Placeholder(Placeholder&&) = default;
93+
Placeholder& operator=(const Placeholder&) = delete;
94+
Placeholder& operator=(Placeholder&&) = delete;
9295
virtual ~Placeholder() = default;
9396
virtual std::unique_ptr<Placeholder> clone() const {
9497
TORCH_CHECK(false, "clone() should only be called on `AnyValue::Holder`");

torch/csrc/api/include/torch/optim/adagrad.h

-5
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,6 @@ struct TORCH_API AdagradParamState
4141
TORCH_ARG(int64_t, step) = 0;
4242

4343
public:
44-
AdagradParamState() = default;
45-
AdagradParamState(const AdagradParamState&) = default;
46-
AdagradParamState& operator=(const AdagradParamState&) = default;
47-
AdagradParamState(AdagradParamState&&) noexcept = default;
48-
AdagradParamState& operator=(AdagradParamState&&) noexcept = default;
4944
void serialize(torch::serialize::InputArchive& archive) override;
5045
void serialize(torch::serialize::OutputArchive& archive) const override;
5146
TORCH_API friend bool operator==(

torch/csrc/api/include/torch/optim/optimizer.h

+6
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ class TORCH_API OptimizerParamGroup {
8585
options_(
8686
param_group.has_options() ? param_group.options().clone()
8787
: nullptr) {}
88+
OptimizerParamGroup(OptimizerParamGroup&& param_group) = default;
8889
OptimizerParamGroup(std::vector<Tensor> params)
8990
: params_(std::move(params)) {}
9091
OptimizerParamGroup(
@@ -94,6 +95,9 @@ class TORCH_API OptimizerParamGroup {
9495

9596
OptimizerParamGroup& operator=(const OptimizerParamGroup& param_group) =
9697
delete;
98+
OptimizerParamGroup& operator=(OptimizerParamGroup&& param_group) noexcept =
99+
default;
100+
~OptimizerParamGroup() = default;
97101
bool has_options() const;
98102
OptimizerOptions& options();
99103
const OptimizerOptions& options() const;
@@ -112,6 +116,8 @@ class TORCH_API Optimizer {
112116
// `state_dict` / `load_state_dict` API to copy an optimizer instead.
113117
Optimizer(const Optimizer& optimizer) = delete;
114118
Optimizer(Optimizer&& optimizer) = default;
119+
Optimizer& operator=(const Optimizer& optimizer) = delete;
120+
Optimizer& operator=(Optimizer&& optimizer) = default;
115121

116122
explicit Optimizer(
117123
const std::vector<OptimizerParamGroup>& param_groups,

torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h

+1
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ class TORCH_API LRScheduler {
3232
private:
3333
void set_optimizer_lrs(const std::vector<double>& learning_rates);
3434

35+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
3536
torch::optim::Optimizer& optimizer_;
3637
};
3738
} // namespace torch::optim

torch/csrc/api/include/torch/ordered_dict.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ Value& OrderedDict<Key, Value>::insert(Key key, Value&& value) {
379379
template <typename Key, typename Value>
380380
void OrderedDict<Key, Value>::update(OrderedDict&& other) {
381381
reserve(size() + other.size());
382-
for (auto& item : other) {
382+
for (auto&& item : std::move(other)) {
383383
// We want to call `insert()` to prevent duplicate keys.
384384
insert(std::move(item.key()), std::move(item.value()));
385385
}

torch/csrc/api/src/cuda.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,9 @@
44
#include <c10/core/DeviceGuard.h>
55
#include <c10/util/irange.h>
66

7-
#include <cstddef>
8-
97
namespace torch::cuda {
108

11-
size_t device_count() {
9+
c10::DeviceIndex device_count() {
1210
return at::detail::getCUDAHooks().deviceCount();
1311
}
1412

@@ -54,7 +52,7 @@ void synchronize(int64_t device_index) {
5452
TORCH_CHECK(is_available(), "No CUDA GPUs are available");
5553
auto num_gpus = cuda::device_count();
5654
TORCH_CHECK(
57-
device_index < 0 || static_cast<size_t>(device_index) < num_gpus,
55+
device_index < 0 || device_index < num_gpus,
5856
"Device index out of range: ",
5957
device_index);
6058
at::detail::getCUDAHooks().deviceSynchronize(

torch/csrc/autograd/autograd_not_implemented_fallback.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ struct WarnNotImplemented : public Node {
8989
size_t num_outputs;
9090
};
9191

92+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
9293
auto WarnNotImplemented::apply(variable_list&& inputs) -> variable_list {
9394
auto inputsLocal = std::move(inputs);
9495
warnAutogradNotImplemented(op_name);

torch/csrc/autograd/custom_function.h

+3
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,9 @@ struct TORCH_API AutogradContext {
122122
AutogradContext() = default;
123123
AutogradContext(const AutogradContext& other) = delete;
124124
AutogradContext& operator=(const AutogradContext& other) = delete;
125+
AutogradContext(AutogradContext&& other) = delete;
126+
AutogradContext& operator=(AutogradContext&& other) = delete;
127+
~AutogradContext() = default;
125128

126129
/// Can be used to save non-variable data for `backward`.
127130
ska::flat_hash_map<std::string, at::IValue> saved_data;

0 commit comments

Comments
 (0)