Skip to content

Commit 64d9ee8

Browse files
cyyeverpytorchmergebot
authored andcommitted
[11/N] Fix extra warnings brought by clang-tidy-17 (pytorch#139599)
Follows pytorch#139385 Pull Request resolved: pytorch#139599 Approved by: https://github.com/sraikund16
1 parent 3f248a5 commit 64d9ee8

26 files changed

+54
-34
lines changed

.clang-tidy

+2-1
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,14 @@ cppcoreguidelines-*,
3535
hicpp-exception-baseclass,
3636
hicpp-avoid-goto,
3737
misc-*,
38+
-misc-confusable-identifiers,
3839
-misc-const-correctness,
3940
-misc-include-cleaner,
4041
-misc-use-anonymous-namespace,
4142
-misc-unused-parameters,
4243
-misc-no-recursion,
4344
-misc-non-private-member-variables-in-classes,
44-
-misc-confusable-identifiers,
45+
-misc-unused-using-decls,
4546
modernize-*,
4647
-modernize-macro-to-enum,
4748
-modernize-return-braced-init-list,

.lintrunner.toml

+3
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,9 @@ exclude_patterns = [
227227
'**/generated/**',
228228
'**/*pb.h',
229229
'**/*inl.h',
230+
'aten/src/ATen/cpu/FlushDenormal.cpp',
231+
'aten/src/ATen/cpu/Utils.cpp',
232+
'aten/src/ATen/cpu/vml.h',
230233
'aten/src/ATen/CPUFixedAllocator.h',
231234
'aten/src/ATen/Parallel*.h',
232235
'c10/xpu/**/*.h',

aten/src/ATen/core/CachingHostAllocator.h

+1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ struct alignas(64) FreeBlockList {
4040

4141
namespace {
4242
// Max cached block sizes: (1 << MAX_SIZE_INDEX) bytes
43+
// NOLINTNEXTLINE(misc-definitions-in-headers)
4344
constexpr size_t MAX_SIZE_INDEX = 64;
4445
}
4546

aten/src/ATen/cuda/CUDASparseBlas.h

+2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <ATen/cuda/CUDAContext.h>
1313
#include <ATen/cuda/CUDASparse.h>
1414

15+
// NOLINTBEGIN(misc-misplaced-const)
1516
namespace at::cuda::sparse {
1617

1718
#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
@@ -316,3 +317,4 @@ void bsrsm2_solve<c10::complex<double>>(
316317
#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
317318

318319
} // namespace at::cuda::sparse
320+
// NOLINTEND(misc-misplaced-const)

aten/src/ATen/cuda/CUDASparseDescriptors.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
namespace at::cuda::sparse {
99

1010
cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr) {
11+
// NOLINTNEXTLINE(*const-cast)
1112
return cusparseDestroyDnMat(const_cast<cusparseDnMatDescr*>(dnMatDescr));
1213
}
1314

@@ -83,6 +84,7 @@ cusparseDnMatDescr_t createRawDnMatDescriptor(const Tensor& input, int64_t batch
8384
#endif
8485

8586
auto batch_stride = ndim > 2 && batch_offset >= 0 ? input_strides[ndim - 3] : 0;
87+
// NOLINTNEXTLINE(*const-cast)
8688
void* data_ptr = is_const ? const_cast<void*>(input.const_data_ptr()) : input.data_ptr();
8789
void* values_ptr = static_cast<char*>(data_ptr) +
8890
batch_offset * batch_stride * input.itemsize();

aten/src/ATen/cudnn/AutocastRNN.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ Autocast wrapper for CuDNN RNNs (the weight reflattening needs special attention
1818

1919
// To be registered for the "_cudnn_rnn(...)" schema.
2020
// _cudnn_rnn is autograd-exposed (test_autocast_cudnn_rnn in test_cuda.py includes a test to confirm)
21-
std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
21+
static std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor>
2222
_cudnn_rnn_cast_reflatten(const Tensor & input,
2323
TensorList weight,
2424
int64_t weight_stride0,

aten/src/ATen/cudnn/Descriptors.cpp

+8-5
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <iostream>
77
#include <sstream>
88

9+
// NOLINTBEGIN(*c-arrays*)
910
namespace at::native {
1011

1112
namespace {
@@ -101,7 +102,7 @@ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d) {
101102
int nbDims = 0;
102103
int dimA[CUDNN_DIM_MAX];
103104
int strideA[CUDNN_DIM_MAX];
104-
cudnnDataType_t dtype;
105+
cudnnDataType_t dtype{};
105106
cudnnGetTensorNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &nbDims, dimA, strideA);
106107
out << " type = " << cudnnTypeToString(dtype) << "\n";
107108
out << " nbDims = " << nbDims << "\n";
@@ -143,7 +144,7 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
143144
size[i] = (int) 1;
144145
}
145146
dim = std::max(dim, pad);
146-
cudnnTensorFormat_t filter_format;
147+
cudnnTensorFormat_t filter_format{};
147148
switch(memory_format) {
148149
case at::MemoryFormat::Contiguous:
149150
filter_format = CUDNN_TENSOR_NCHW;
@@ -155,7 +156,8 @@ void FilterDescriptor::set(const at::Tensor &t, const at::MemoryFormat memory_fo
155156
default:
156157
TORCH_INTERNAL_ASSERT(false, "unsupported memory_format for cuDNN filters");
157158
}
158-
set(getDataType(t), (int) dim, size, filter_format);
159+
// NOLINTNEXTLINE(*narrowing-conversions)
160+
set(getDataType(t), static_cast<int64_t>(dim), size, filter_format);
159161
}
160162

161163
std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) {
@@ -175,8 +177,8 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
175177
out << "FilterDescriptor " << static_cast<void*>(d.desc()) << "\n";
176178
int nbDims = 0;
177179
int dimA[CUDNN_DIM_MAX];
178-
cudnnDataType_t dtype;
179-
cudnnTensorFormat_t tformat;
180+
cudnnDataType_t dtype{};
181+
cudnnTensorFormat_t tformat{};
180182
cudnnGetFilterNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &tformat, &nbDims, dimA);
181183
out << " type = " << cudnnTypeToString(dtype) << "\n";
182184
out << " tensor_format = " << cudnnMemoryFormatToString(tformat) << "\n";
@@ -193,3 +195,4 @@ std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) {
193195
void FilterDescriptor::print() { std::cout << *this; }
194196

195197
}
198+
// NOLINTEND(*c-arrays*)

aten/src/ATen/cudnn/Descriptors.h

+4-3
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ struct DescriptorDeleter {
9292
// initialized the first time you call set() or any other initializing
9393
// function.
9494
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
95+
// NOLINTNEXTLINE(bugprone-exception-escape)
9596
class TORCH_CUDA_CPP_API Descriptor {
9697
public:
9798
// TODO: Figure out why const-correctness doesn't work here
@@ -128,7 +129,7 @@ class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
128129
void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
129130
private:
130131
void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
131-
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, NULL));
132+
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, nullptr));
132133
}
133134
};
134135

@@ -224,6 +225,7 @@ struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
224225
}
225226
};
226227

228+
// NOLINTNEXTLINE(bugprone-exception-escape)
227229
struct TORCH_CUDA_CPP_API DropoutDescriptor
228230
: public Descriptor<
229231
cudnnDropoutStruct,
@@ -244,9 +246,8 @@ struct TORCH_CUDA_CPP_API DropoutDescriptor
244246
}
245247

246248
// Restore a dropout descriptor given a dropout probability and existing RNG state.
247-
void set(cudnnHandle_t handle, float dropout, at::Tensor state_) {
249+
void set(cudnnHandle_t handle, float dropout, const at::Tensor& state) {
248250
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
249-
state = state_;
250251
void *state_ptr = state.data_ptr();
251252
size_t state_size = state.size(0);
252253
// NB: The seed doesn't actually matter, so we give a dummy value

aten/src/ATen/cudnn/Types.cpp

+1-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
namespace at::native {
66

77
cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
8-
if (dtype == c10::kQInt8) {
8+
if (dtype == c10::kQInt8 || dtype == at::kChar) {
99
return CUDNN_DATA_INT8;
1010
} else if (dtype == at::kFloat) {
1111
return CUDNN_DATA_FLOAT;
@@ -19,8 +19,6 @@ cudnnDataType_t getCudnnDataTypeFromScalarType(const at::ScalarType dtype) {
1919
return CUDNN_DATA_INT32;
2020
} else if (dtype == at::kByte) {
2121
return CUDNN_DATA_UINT8;
22-
} else if (dtype == at::kChar) {
23-
return CUDNN_DATA_INT8;
2422
}
2523
std::string msg("getCudnnDataTypeFromScalarType() not supported for ");
2624
msg += toString(dtype);

aten/src/ATen/native/nested/NestedTensorUtils.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ struct NestedTensorImpl;
3232
// The following functions are used to construct nested tensors from buffers and
3333
// metadata.
3434

35-
inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
35+
inline at::Tensor wrap_buffer(const at::Tensor& buffer, const at::Tensor& nested_sizes) {
3636
TORCH_CHECK(
3737
buffer.dim() == 1,
3838
"Expected given buffer to be 1dim, but got ",
@@ -41,19 +41,19 @@ inline at::Tensor wrap_buffer(at::Tensor buffer, at::Tensor nested_sizes) {
4141
TORCH_CHECK(
4242
buffer.is_contiguous(), "Expected given buffer to be contiguous.");
4343
return at::detail::make_tensor<NestedTensorImpl>(
44-
std::move(buffer), std::move(nested_sizes));
44+
buffer, nested_sizes);
4545
}
4646

4747
// TODO: Figure out if we need a non-moving wrap_buffer()
4848
inline at::Tensor wrap_buffer(
49-
at::Tensor buffer,
49+
const at::Tensor& buffer,
5050
at::Tensor nested_sizes,
5151
at::Tensor nested_strides,
5252
at::Tensor storage_offsets) {
5353
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
5454
buffer.is_contiguous(), "Given buffer must be contiguous.");
5555
return at::detail::make_tensor<NestedTensorImpl>(
56-
std::move(buffer),
56+
buffer,
5757
std::move(nested_sizes),
5858
std::move(nested_strides),
5959
std::move(storage_offsets));
@@ -95,9 +95,9 @@ inline at::Tensor create_nested_view_tensor(
9595
return at::detail::make_tensor<NestedTensorImpl>(
9696
c10::TensorImpl::VIEW,
9797
base,
98-
nested_sizes,
99-
nested_strides,
100-
storage_offsets);
98+
std::move(nested_sizes),
99+
std::move(nested_strides),
100+
std::move(storage_offsets));
101101
}
102102
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
103103

aten/src/ATen/templates/TensorBody.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ class TORCH_API Tensor: public TensorBase {
195195
//
196196
// TODO: temporarily disabled
197197

198-
Tensor& operator=(const TensorBase& x) & {
198+
Tensor& operator=(const TensorBase& x) & noexcept {
199199
impl_ = x.getIntrusivePtr();
200200
return *this;
201201
}
@@ -204,7 +204,7 @@ class TORCH_API Tensor: public TensorBase {
204204
return *this;
205205
}
206206

207-
Tensor& operator=(const Tensor &x) & {
207+
Tensor& operator=(const Tensor &x) & noexcept {
208208
return operator=(static_cast<const TensorBase&>(x));
209209
}
210210
Tensor& operator=(Tensor &&x) & noexcept {

c10/util/intrusive_ptr.h

+5-3
Original file line numberDiff line numberDiff line change
@@ -664,15 +664,17 @@ struct MaybeOwnedTraits<c10::intrusive_ptr<T>> {
664664
toDestroy.release();
665665
}
666666

667-
static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
667+
static const owned_type& referenceFromBorrow(
668+
const borrow_type& borrow) noexcept {
668669
return borrow;
669670
}
670671

671-
static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
672+
static const owned_type* pointerFromBorrow(
673+
const borrow_type& borrow) noexcept {
672674
return &borrow;
673675
}
674676

675-
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
677+
static bool debugBorrowIsValid(const borrow_type& /*borrow*/) noexcept {
676678
return true;
677679
}
678680
};

tools/onnx/templates/rules.h.in

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#pragma once
2+
#include <cstdint>
23

34
/**
45
${generated_comment}

torch/csrc/autograd/python_autograd.h

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#ifndef THP_AUTOGRAD_H
22
#define THP_AUTOGRAD_H
3+
#include <torch/csrc/utils/pythoncapi_compat.h>
34

45
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
56
void THPAutograd_initFunctions();

torch/csrc/autograd/python_fft_functions.h

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#pragma once
2+
#include <torch/csrc/utils/pythoncapi_compat.h>
23

34
namespace torch::autograd {
45

torch/csrc/autograd/python_linalg_functions.h

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#pragma once
2+
#include <torch/csrc/utils/pythoncapi_compat.h>
23

34
namespace torch::autograd {
45

torch/csrc/autograd/python_sparse_functions.h

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#pragma once
2+
#include <torch/csrc/utils/pythoncapi_compat.h>
23

34
namespace torch::autograd {
45

torch/csrc/autograd/python_special_functions.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#pragma once
2-
2+
#include <torch/csrc/utils/pythoncapi_compat.h>
33
namespace torch::autograd {
44

55
void initSpecialFunctions(PyObject* module);

torch/csrc/cuda/Module.h

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#ifndef THCP_CUDA_MODULE_INC
22
#define THCP_CUDA_MODULE_INC
3+
#include <torch/csrc/utils/pythoncapi_compat.h>
34

45
PyObject* THCPModule_getDevice_wrap(PyObject* self);
56
PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);

torch/csrc/cuda/nccl.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ struct NcclCommList {
273273
devices.data()));
274274
}
275275
NcclCommList(NcclCommList&& foo) = default;
276+
// NOLINTNEXTLINE(bugprone-exception-escape)
276277
~NcclCommList() {
277278
if (comms) {
278279
for (const auto i : c10::irange(ndevices)) {
@@ -457,6 +458,7 @@ AutoNcclGroup::AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking)
457458
#endif
458459
}
459460

461+
// NOLINTNEXTLINE(bugprone-exception-escape)
460462
AutoNcclGroup::~AutoNcclGroup() noexcept(false) {
461463
#if defined(NCCL_MAJOR) && (NCCL_MAJOR >= 2)
462464
if (comm_nonblocking_ && comm_ != nullptr) {

torch/csrc/cuda/shared/cudnn.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#if defined(USE_CUDNN) || defined(USE_ROCM)
55
#include <torch/csrc/utils/pybind.h>
66

7-
#include <array>
87
#include <tuple>
98

109
namespace {
@@ -22,7 +21,7 @@ version_tuple getCompileVersion() {
2221

2322
version_tuple getRuntimeVersion() {
2423
#ifndef USE_STATIC_CUDNN
25-
int major, minor, patch;
24+
int major = 0, minor = 0, patch = 0;
2625
cudnnGetProperty(MAJOR_VERSION, &major);
2726
cudnnGetProperty(MINOR_VERSION, &minor);
2827
cudnnGetProperty(PATCH_LEVEL, &patch);

torch/csrc/profiler/collection.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ struct TORCH_API Result : public std::enable_shared_from_this<Result> {
378378
}
379379

380380
template <typename T, typename Fn>
381-
void visit_if_base(Fn&& fn) const {
381+
void visit_if_base(const Fn& fn) const {
382382
visit([&](const auto& extra_fields) {
383383
using extra_fields_t = typename std::remove_cv_t<
384384
typename std::remove_reference_t<decltype(extra_fields)>>;

torch/csrc/profiler/kineto_client_interface.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@
66
namespace torch {
77

88
// declare global_kineto_init for libtorch_cpu.so to call
9-
TORCH_API void global_kineto_init(void);
9+
TORCH_API void global_kineto_init();
1010

1111
} // namespace torch

torch/csrc/profiler/kineto_shim.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ bool collectivesProfilerExists() {
222222

223223
#ifdef USE_KINETO
224224
static const std::string setTraceID(const std::string& trace_id) {
225-
if (trace_id == "") {
225+
if (trace_id.empty()) {
226226
return "";
227227
}
228228
std::stringstream configss;

torch/csrc/profiler/orchestration/observer.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,15 +39,15 @@ ProfilerConfig::ProfilerConfig(
3939
bool with_flops,
4040
bool with_modules,
4141
ExperimentalConfig experimental_config,
42-
const std::string& trace_id)
42+
std::string trace_id)
4343
: state{state},
4444
experimental_config{std::move(experimental_config)},
4545
report_input_shapes{report_input_shapes},
4646
profile_memory{profile_memory},
4747
with_stack{with_stack},
4848
with_flops{with_flops},
4949
with_modules{with_modules},
50-
trace_id{trace_id} {}
50+
trace_id{std::move(trace_id)} {}
5151

5252
bool ProfilerConfig::disabled() const {
5353
return state == torch::profiler::impl::ProfilerState::Disabled;

torch/csrc/profiler/orchestration/observer.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ struct TORCH_API ProfilerConfig {
104104
bool with_flops = false,
105105
bool with_modules = false,
106106
ExperimentalConfig experimental_config = ExperimentalConfig(),
107-
const std::string& trace_id = "");
107+
std::string trace_id = "");
108108

109109
bool disabled() const;
110110
bool global() const;

0 commit comments

Comments
 (0)