Skip to content

Commit b9d6f8c

Browse files
cyyeverpytorchmergebot
authored andcommitted
Fix clang-tidy warnings in aten/src/ATen/core/*.cpp (pytorch#122572)
This PR fixes clang-tidy warnings in aten/src/ATen/core/*.cpp. Pull Request resolved: pytorch#122572 Approved by: https://github.com/ezyang
1 parent 1e404c9 commit b9d6f8c

15 files changed

+49
-56
lines changed

aten/src/ATen/core/Dict.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/core/Dict.h>
22

3-
namespace c10 {
4-
namespace detail {
3+
4+
namespace c10::detail {
55
bool operator==(const DictImpl& lhs, const DictImpl& rhs) {
66
bool isEqualFastChecks =
77
*lhs.elementTypes.keyType == *rhs.elementTypes.keyType &&
@@ -25,5 +25,4 @@ bool operator==(const DictImpl& lhs, const DictImpl& rhs) {
2525

2626
return true;
2727
}
28-
} // namespace detail
29-
} // namespace c10
28+
} // namespace c10::detail

aten/src/ATen/core/Dimname.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ bool Dimname::isValidName(const std::string& name) {
2020
// letters A through Z, the underscore _ and, except for the first
2121
// character, the digits 0 through 9" (at least length 1)
2222
// https://docs.python.org/3/reference/lexical_analysis.html#identifiers
23-
if (name.length() == 0) {
23+
if (name.empty()) {
2424
return false;
2525
}
2626
for (auto it = name.begin(); it != name.end(); ++it) {

aten/src/ATen/core/Formatting.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ static void __printIndent(std::ostream &stream, int64_t indent)
160160

161161
static void printScale(std::ostream & stream, double scale) {
162162
FormatGuard guard(stream);
163-
stream << defaultfloat << scale << " *" << std::endl;
163+
stream << defaultfloat << scale << " *" << '\n';
164164
}
165165
static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t linesize, int64_t indent)
166166
{
@@ -178,7 +178,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
178178
}
179179
if(nColumnPerLine < self.size(1)) {
180180
if(firstColumn != 0) {
181-
stream << std::endl;
181+
stream << '\n';
182182
}
183183
stream << "Columns " << firstColumn+1 << " to " << lastColumn+1;
184184
__printIndent(stream, indent);
@@ -193,7 +193,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
193193
for (const auto c : c10::irange(firstColumn, lastColumn+1)) {
194194
stream << std::setw(sz) << row_ptr[c]/scale;
195195
if(c == lastColumn) {
196-
stream << std::endl;
196+
stream << '\n';
197197
if(l != self.size(0)-1) {
198198
if(scale != 1) {
199199
__printIndent(stream, indent);
@@ -239,15 +239,15 @@ static void __printTensor(std::ostream& stream, Tensor& self, int64_t linesize)
239239
if(start) {
240240
start = false;
241241
} else {
242-
stream << std::endl;
242+
stream << '\n';
243243
}
244244
stream << "(";
245245
Tensor tensor = self;
246246
for (const auto i : c10::irange(self.ndimension()-2)) {
247247
tensor = tensor.select(0, counter[i]);
248248
stream << counter[i]+1 << ",";
249249
}
250-
stream << ".,.) = " << std::endl;
250+
stream << ".,.) = " << '\n';
251251
__printMatrix(stream, tensor, linesize, 1);
252252
}
253253
}
@@ -279,7 +279,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
279279
tensor = tensor_.to(kCPU, kDouble).contiguous();
280280
}
281281
if(tensor.ndimension() == 0) {
282-
stream << defaultfloat << tensor.data_ptr<double>()[0] << std::endl;
282+
stream << defaultfloat << tensor.data_ptr<double>()[0] << '\n';
283283
stream << "[ " << tensor_.toString() << "{}";
284284
} else if(tensor.ndimension() == 1) {
285285
if (tensor.numel() > 0) {
@@ -289,7 +289,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
289289
}
290290
double* tensor_p = tensor.data_ptr<double>();
291291
for (const auto i : c10::irange(tensor.size(0))) {
292-
stream << std::setw(sz) << tensor_p[i]/scale << std::endl;
292+
stream << std::setw(sz) << tensor_p[i]/scale << '\n';
293293
}
294294
}
295295
stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "}";
@@ -329,7 +329,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
329329
if (tensor.getIntrusivePtr()->autograd_meta()) {
330330
auto& fw_grad = tensor._fw_grad(/* level */ 0);
331331
if (fw_grad.defined()) {
332-
stream << ", tangent:" << std::endl << fw_grad;
332+
stream << ", tangent:" << '\n' << fw_grad;
333333
}
334334
}
335335
stream << " ]";

aten/src/ATen/core/IListRef_test.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ TEST(ITensorListRefTest, Boxed_GetConstRefTensor) {
103103
const List<at::Tensor> boxed(vec);
104104
at::ITensorListRef list(boxed);
105105
static_assert(
106-
std::is_same<decltype(*list.begin()), const at::Tensor&>::value,
106+
std::is_same_v<decltype(*list.begin()), const at::Tensor&>,
107107
"Accessing elements from List<Tensor> through a ITensorListRef should be const references.");
108108
EXPECT_TRUE(boxed[0].is_same(*list.begin()));
109109
EXPECT_TRUE(boxed[1].is_same(*(++list.begin())));
@@ -113,7 +113,7 @@ TEST(ITensorListRefTest, Unboxed_GetConstRefTensor) {
113113
auto vec = get_tensor_vector();
114114
at::ITensorListRef list(vec);
115115
static_assert(
116-
std::is_same<decltype(*list.begin()), const at::Tensor&>::value,
116+
std::is_same_v<decltype(*list.begin()), const at::Tensor&>,
117117
"Accessing elements from ArrayRef<Tensor> through a ITensorListRef should be const references.");
118118
EXPECT_TRUE(vec[0].is_same(*list.begin()));
119119
EXPECT_TRUE(vec[1].is_same(*(++list.begin())));

aten/src/ATen/core/List.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/core/List.h>
22

3-
namespace c10 {
4-
namespace detail {
3+
4+
namespace c10::detail {
55
bool operator==(const ListImpl& lhs, const ListImpl& rhs) {
66
return *lhs.elementType == *rhs.elementType &&
77
lhs.list.size() == rhs.list.size() &&
@@ -16,5 +16,4 @@ bool operator==(const ListImpl& lhs, const ListImpl& rhs) {
1616
ListImpl::ListImpl(list_type list_, TypePtr elementType_)
1717
: list(std::move(list_))
1818
, elementType(std::move(elementType_)) {}
19-
} // namespace detail
20-
} // namespace c10
19+
} // namespace c10::detail

aten/src/ATen/core/List_test.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1118,7 +1118,7 @@ TEST(ListTestNonIValueBasedList, sameValueDifferentStorage_thenIsReturnsFalse) {
11181118
TEST(ListTest, canAccessStringByReference) {
11191119
List<std::string> list({"one", "two"});
11201120
const auto& listRef = list;
1121-
static_assert(std::is_same<decltype(listRef[1]), const std::string&>::value,
1121+
static_assert(std::is_same_v<decltype(listRef[1]), const std::string&>,
11221122
"const List<std::string> access should be by const reference");
11231123
std::string str = list[1];
11241124
const std::string& strRef = listRef[1];
@@ -1130,7 +1130,7 @@ TEST(ListTest, canAccessOptionalStringByReference) {
11301130
List<c10::optional<std::string>> list({"one", "two", c10::nullopt});
11311131
const auto& listRef = list;
11321132
static_assert(
1133-
std::is_same<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>::value,
1133+
std::is_same_v<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>,
11341134
"List<c10::optional<std::string>> access should be by const reference");
11351135
c10::optional<std::string> str1 = list[1];
11361136
c10::optional<std::string> str2 = list[2];
@@ -1148,7 +1148,7 @@ TEST(ListTest, canAccessTensorByReference) {
11481148
List<at::Tensor> list;
11491149
const auto& listRef = list;
11501150
static_assert(
1151-
std::is_same<decltype(listRef[0]), const at::Tensor&>::value,
1151+
std::is_same_v<decltype(listRef[0]), const at::Tensor&>,
11521152
"List<at::Tensor> access should be by const reference");
11531153
}
11541154

aten/src/ATen/core/NamedTensor.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,9 @@ void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names,
121121
}
122122
auto* meta = get_named_tensor_meta(impl);
123123
if (meta == nullptr) {
124-
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(NamedTensorMeta::HasNonWildcard, names));
124+
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(NamedTensorMeta::HasNonWildcard, std::move(names)));
125125
} else {
126-
meta->set_names(NamedTensorMeta::HasNonWildcard, names);
126+
meta->set_names(NamedTensorMeta::HasNonWildcard, std::move(names));
127127
}
128128
}
129129

aten/src/ATen/core/PythonFallbackKernel.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ void preDispatchFallback(const c10::OperatorHandle& op, c10::DispatchKeySet disp
120120

121121
} // anonymous namespace
122122

123-
namespace at {
124-
namespace impl {
123+
124+
namespace at::impl {
125125

126126
RestorePythonTLSSnapshot::RestorePythonTLSSnapshot() : saved_(safe_get_tls_on_entry()), guard_(safe_get_tls_on_entry()) {
127127
tls_on_entry = c10::nullopt;
@@ -148,8 +148,7 @@ MaybeSetTLSOnEntryGuard::~MaybeSetTLSOnEntryGuard() {
148148
}
149149

150150

151-
} // namespace impl
152-
} // namespace at
151+
} // namespace at::impl
153152

154153
TORCH_LIBRARY_IMPL(_, Python, m) {
155154
m.fallback(torch::CppFunction::makeFromBoxedFunction<&pythonFallback>());
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,22 @@
11
#include <ATen/core/PythonOpRegistrationTrampoline.h>
22

3-
namespace at {
4-
namespace impl {
3+
namespace at::impl {
54

65
// The strategy is that all python interpreters attempt to register themselves
76
// as the main interpreter, but only one wins. Only that interpreter is
87
// allowed to interact with the C++ dispatcher. Furthermore, when we execute
98
// logic on that interpreter, we do so hermetically, never setting pyobj field
109
// on Tensor.
1110

12-
std::atomic<c10::impl::PyInterpreter*> PythonOpRegistrationTrampoline::interpreter_{nullptr};
11+
std::atomic<c10::impl::PyInterpreter*>
12+
PythonOpRegistrationTrampoline::interpreter_{nullptr};
1313

1414
c10::impl::PyInterpreter* PythonOpRegistrationTrampoline::getInterpreter() {
1515
return PythonOpRegistrationTrampoline::interpreter_.load();
16-
1716
}
1817

19-
bool PythonOpRegistrationTrampoline::registerInterpreter(c10::impl::PyInterpreter* interp) {
18+
bool PythonOpRegistrationTrampoline::registerInterpreter(
19+
c10::impl::PyInterpreter* interp) {
2020
c10::impl::PyInterpreter* expected = nullptr;
2121
interpreter_.compare_exchange_strong(expected, interp);
2222
if (expected != nullptr) {
@@ -29,5 +29,4 @@ bool PythonOpRegistrationTrampoline::registerInterpreter(c10::impl::PyInterprete
2929
}
3030
}
3131

32-
} // namespace impl
33-
} // namespace at
32+
} // namespace at::impl

aten/src/ATen/core/Tensor.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,9 @@ void TensorBase::enforce_invariants() {
7272

7373
void TensorBase::print() const {
7474
if (defined()) {
75-
std::cerr << "[" << toString() << " " << sizes() << "]" << std::endl;
75+
std::cerr << "[" << toString() << " " << sizes() << "]" << '\n';
7676
} else {
77-
std::cerr << "[UndefinedTensor]" << std::endl;
77+
std::cerr << "[UndefinedTensor]" << '\n';
7878
}
7979
}
8080

aten/src/ATen/core/TorchDispatchUtils.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/core/TorchDispatchUtils.h>
22

3-
namespace at {
4-
namespace impl {
3+
4+
namespace at::impl {
55

66
bool tensor_has_dispatch(const at::Tensor& t) {
77
DispatchKeySet key_set({DispatchKey::Python, DispatchKey::PythonTLSSnapshot});
@@ -27,5 +27,4 @@ bool tensorlist_has_dispatch(const c10::List<c10::optional<at::Tensor>>& li) {
2727
return false;
2828
}
2929

30-
} // namespace impl
31-
} // namespace at
30+
} // namespace at::impl

aten/src/ATen/core/VariableHooksInterface.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#include <ATen/core/VariableHooksInterface.h>
22

3-
namespace at { namespace impl {
3+
namespace at::impl {
44

55
namespace {
66
VariableHooksInterface* hooks = nullptr;
@@ -17,4 +17,4 @@ bool HasVariableHooks() {
1717
return hooks != nullptr;
1818
}
1919

20-
}} // namespace at::impl
20+
} // namespace at::impl

aten/src/ATen/core/Vitals.cpp

+3-6
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
#include <cstdlib>
33
#include <iostream>
44

5-
namespace at {
6-
namespace vitals {
5+
namespace at::vitals {
76

87
APIVitals VitalsAPI;
98

@@ -78,8 +77,7 @@ bool APIVitals::setVital(
7877
auto iter = name_map_.find(vital_name);
7978
TorchVital* vital = nullptr;
8079
if (iter == name_map_.end()) {
81-
auto r =
82-
name_map_.emplace(vital_name, TorchVital(vital_name));
80+
auto r = name_map_.emplace(vital_name, TorchVital(vital_name));
8381
vital = &r.first->second;
8482
} else {
8583
vital = &iter->second;
@@ -95,5 +93,4 @@ APIVitals::APIVitals() : vitals_enabled(false), name_map_() {
9593
setVital("CUDA", "used", "False", /* force = */ true);
9694
}
9795

98-
} // namespace vitals
99-
} // namespace at
96+
} // namespace at::vitals

aten/src/ATen/core/adaption.cpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,13 @@
11
#include <ATen/core/op_registration/adaption.h>
22

3-
namespace c10 {
4-
namespace impl {
3+
4+
namespace c10::impl {
55

66
void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
77
TORCH_CHECK(false,
88
"Expected all tensors to be on the same device, but "
9-
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
109
"found at least two devices, ", common_device, " and ", tensor.device(), "! "
1110
"(when checking argument for argument ", argName, " in method ", methodName, ")");
1211
}
1312

14-
} // namespace impl
15-
} // namespace c10
13+
} // namespace c10::impl

aten/src/ATen/core/library.cpp

+5-2
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ Library& Library::_def(c10::FunctionSchema&& schema, c10::OperatorName* out_name
157157
}
158158
#undef DEF_PRELUDE
159159

160+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
160161
Library& Library::_def(std::variant<c10::OperatorName, c10::FunctionSchema>&& name_or_schema, CppFunction&& f, const std::vector<at::Tag>& tags) & {
161162
c10::FunctionSchema schema = [&] {
162163
if (std::holds_alternative<c10::FunctionSchema>(name_or_schema)){
@@ -218,6 +219,7 @@ at::OperatorName Library::_parseNameForLib(const char* name_str) const {
218219
return name;
219220
}
220221

222+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
221223
Library& Library::_impl(const char* name_str, CppFunction&& f, _RegisterOrVerify rv) & {
222224
at::OperatorName name = _parseNameForLib(name_str);
223225
// See Note [Redundancy in registration code is OK]
@@ -257,6 +259,7 @@ c10::OperatorName Library::_resolve(const char* name_str) const {
257259
}
258260
#undef IMPL_PRELUDE
259261

262+
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
260263
Library& Library::_fallback(CppFunction&& f) & {
261264
TORCH_CHECK(kind_ == IMPL,
262265
"fallback(...): Cannot define an operator inside of a ", toString(kind_), " block. "
@@ -279,8 +282,8 @@ Library& Library::_fallback(CppFunction&& f) & {
279282
registrars_.emplace_back(
280283
c10::Dispatcher::singleton().registerFallback(
281284
k,
282-
std::move(f.func_),
283-
debugString(std::move(f.debug_), file_, line_)
285+
f.func_,
286+
debugString(f.debug_, file_, line_)
284287
)
285288
);
286289
}

0 commit comments

Comments
 (0)