Skip to content

Commit b9d6f8c

Browse files
cyyeverpytorchmergebot
authored andcommitted
Fix clang-tidy warnings in aten/src/ATen/core/*.cpp (pytorch#122572)
This PR fixes clang-tidy warnings in aten/src/ATen/core/*.cpp. Pull Request resolved: pytorch#122572 Approved by: https://github.com/ezyang
1 parent 1e404c9 commit b9d6f8c

15 files changed

+49
-56
lines changed

aten/src/ATen/core/Dict.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/core/Dict.h>
22

3-
namespace c10 {
4-
namespace detail {
3+
4+
namespace c10::detail {
55
bool operator==(const DictImpl& lhs, const DictImpl& rhs) {
66
bool isEqualFastChecks =
77
*lhs.elementTypes.keyType == *rhs.elementTypes.keyType &&
@@ -25,5 +25,4 @@ bool operator==(const DictImpl& lhs, const DictImpl& rhs) {
2525

2626
return true;
2727
}
28-
} // namespace detail
29-
} // namespace c10
28+
} // namespace c10::detail

aten/src/ATen/core/Dimname.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ bool Dimname::isValidName(const std::string& name) {
2020
// letters A through Z, the underscore _ and, except for the first
2121
// character, the digits 0 through 9" (at least length 1)
2222
// https://docs.python.org/3/reference/lexical_analysis.html#identifiers
23-
if (name.length() == 0) {
23+
if (name.empty()) {
2424
return false;
2525
}
2626
for (auto it = name.begin(); it != name.end(); ++it) {

aten/src/ATen/core/Formatting.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ static void __printIndent(std::ostream &stream, int64_t indent)
160160

161161
static void printScale(std::ostream & stream, double scale) {
162162
FormatGuard guard(stream);
163-
stream << defaultfloat << scale << " *" << std::endl;
163+
stream << defaultfloat << scale << " *" << '\n';
164164
}
165165
static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t linesize, int64_t indent)
166166
{
@@ -178,7 +178,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
178178
}
179179
if(nColumnPerLine < self.size(1)) {
180180
if(firstColumn != 0) {
181-
stream << std::endl;
181+
stream << '\n';
182182
}
183183
stream << "Columns " << firstColumn+1 << " to " << lastColumn+1;
184184
__printIndent(stream, indent);
@@ -193,7 +193,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
193193
for (const auto c : c10::irange(firstColumn, lastColumn+1)) {
194194
stream << std::setw(sz) << row_ptr[c]/scale;
195195
if(c == lastColumn) {
196-
stream << std::endl;
196+
stream << '\n';
197197
if(l != self.size(0)-1) {
198198
if(scale != 1) {
199199
__printIndent(stream, indent);
@@ -239,15 +239,15 @@ static void __printTensor(std::ostream& stream, Tensor& self, int64_t linesize)
239239
if(start) {
240240
start = false;
241241
} else {
242-
stream << std::endl;
242+
stream << '\n';
243243
}
244244
stream << "(";
245245
Tensor tensor = self;
246246
for (const auto i : c10::irange(self.ndimension()-2)) {
247247
tensor = tensor.select(0, counter[i]);
248248
stream << counter[i]+1 << ",";
249249
}
250-
stream << ".,.) = " << std::endl;
250+
stream << ".,.) = " << '\n';
251251
__printMatrix(stream, tensor, linesize, 1);
252252
}
253253
}
@@ -279,7 +279,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
279279
tensor = tensor_.to(kCPU, kDouble).contiguous();
280280
}
281281
if(tensor.ndimension() == 0) {
282-
stream << defaultfloat << tensor.data_ptr<double>()[0] << std::endl;
282+
stream << defaultfloat << tensor.data_ptr<double>()[0] << '\n';
283283
stream << "[ " << tensor_.toString() << "{}";
284284
} else if(tensor.ndimension() == 1) {
285285
if (tensor.numel() > 0) {
@@ -289,7 +289,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
289289
}
290290
double* tensor_p = tensor.data_ptr<double>();
291291
for (const auto i : c10::irange(tensor.size(0))) {
292-
stream << std::setw(sz) << tensor_p[i]/scale << std::endl;
292+
stream << std::setw(sz) << tensor_p[i]/scale << '\n';
293293
}
294294
}
295295
stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "}";
@@ -329,7 +329,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
329329
if (tensor.getIntrusivePtr()->autograd_meta()) {
330330
auto& fw_grad = tensor._fw_grad(/* level */ 0);
331331
if (fw_grad.defined()) {
332-
stream << ", tangent:" << std::endl << fw_grad;
332+
stream << ", tangent:" << '\n' << fw_grad;
333333
}
334334
}
335335
stream << " ]";

aten/src/ATen/core/IListRef_test.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ TEST(ITensorListRefTest, Boxed_GetConstRefTensor) {
103103
const List<at::Tensor> boxed(vec);
104104
at::ITensorListRef list(boxed);
105105
static_assert(
106-
std::is_same<decltype(*list.begin()), const at::Tensor&>::value,
106+
std::is_same_v<decltype(*list.begin()), const at::Tensor&>,
107107
"Accessing elements from List<Tensor> through a ITensorListRef should be const references.");
108108
EXPECT_TRUE(boxed[0].is_same(*list.begin()));
109109
EXPECT_TRUE(boxed[1].is_same(*(++list.begin())));
@@ -113,7 +113,7 @@ TEST(ITensorListRefTest, Unboxed_GetConstRefTensor) {
113113
auto vec = get_tensor_vector();
114114
at::ITensorListRef list(vec);
115115
static_assert(
116-
std::is_same<decltype(*list.begin()), const at::Tensor&>::value,
116+
std::is_same_v<decltype(*list.begin()), const at::Tensor&>,
117117
"Accessing elements from ArrayRef<Tensor> through a ITensorListRef should be const references.");
118118
EXPECT_TRUE(vec[0].is_same(*list.begin()));
119119
EXPECT_TRUE(vec[1].is_same(*(++list.begin())));

aten/src/ATen/core/List.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/core/List.h>
22

3-
namespace c10 {
4-
namespace detail {
3+
4+
namespace c10::detail {
55
bool operator==(const ListImpl& lhs, const ListImpl& rhs) {
66
return *lhs.elementType == *rhs.elementType &&
77
lhs.list.size() == rhs.list.size() &&
@@ -16,5 +16,4 @@ bool operator==(const ListImpl& lhs, const ListImpl& rhs) {
1616
ListImpl::ListImpl(list_type list_, TypePtr elementType_)
1717
: list(std::move(list_))
1818
, elementType(std::move(elementType_)) {}
19-
} // namespace detail
20-
} // namespace c10
19+
} // namespace c10::detail

aten/src/ATen/core/List_test.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1118,7 +1118,7 @@ TEST(ListTestNonIValueBasedList, sameValueDifferentStorage_thenIsReturnsFalse) {
11181118
TEST(ListTest, canAccessStringByReference) {
11191119
List<std::string> list({"one", "two"});
11201120
const auto& listRef = list;
1121-
static_assert(std::is_same<decltype(listRef[1]), const std::string&>::value,
1121+
static_assert(std::is_same_v<decltype(listRef[1]), const std::string&>,
11221122
"const List<std::string> access should be by const reference");
11231123
std::string str = list[1];
11241124
const std::string& strRef = listRef[1];
@@ -1130,7 +1130,7 @@ TEST(ListTest, canAccessOptionalStringByReference) {
11301130
List<c10::optional<std::string>> list({"one", "two", c10::nullopt});
11311131
const auto& listRef = list;
11321132
static_assert(
1133-
std::is_same<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>::value,
1133+
std::is_same_v<decltype(listRef[1]), c10::optional<std::reference_wrapper<const std::string>>>,
11341134
"List<c10::optional<std::string>> access should be by const reference");
11351135
c10::optional<std::string> str1 = list[1];
11361136
c10::optional<std::string> str2 = list[2];
@@ -1148,7 +1148,7 @@ TEST(ListTest, canAccessTensorByReference) {
11481148
List<at::Tensor> list;
11491149
const auto& listRef = list;
11501150
static_assert(
1151-
std::is_same<decltype(listRef[0]), const at::Tensor&>::value,
1151+
std::is_same_v<decltype(listRef[0]), const at::Tensor&>,
11521152
"List<at::Tensor> access should be by const reference");
11531153
}
11541154

aten/src/ATen/core/NamedTensor.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,9 @@ void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names,
121121
}
122122
auto* meta = get_named_tensor_meta(impl);
123123
if (meta == nullptr) {
124-
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(NamedTensorMeta::HasNonWildcard, names));
124+
impl->set_named_tensor_meta(std::make_unique<NamedTensorMeta>(NamedTensorMeta::HasNonWildcard, std::move(names)));
125125
} else {
126-
meta->set_names(NamedTensorMeta::HasNonWildcard, names);
126+
meta->set_names(NamedTensorMeta::HasNonWildcard, std::move(names));
127127
}
128128
}
129129

aten/src/ATen/core/PythonFallbackKernel.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,8 @@ void preDispatchFallback(const c10::OperatorHandle& op, c10::DispatchKeySet disp
120120

121121
} // anonymous namespace
122122

123-
namespace at {
124-
namespace impl {
123+
124+
namespace at::impl {
125125

126126
RestorePythonTLSSnapshot::RestorePythonTLSSnapshot() : saved_(safe_get_tls_on_entry()), guard_(safe_get_tls_on_entry()) {
127127
tls_on_entry = c10::nullopt;
@@ -148,8 +148,7 @@ MaybeSetTLSOnEntryGuard::~MaybeSetTLSOnEntryGuard() {
148148
}
149149

150150

151-
} // namespace impl
152-
} // namespace at
151+
} // namespace at::impl
153152

154153
TORCH_LIBRARY_IMPL(_, Python, m) {
155154
m.fallback(torch::CppFunction::makeFromBoxedFunction<&pythonFallback>());
Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,22 @@
11
#include <ATen/core/PythonOpRegistrationTrampoline.h>
22

3-
namespace at {
4-
namespace impl {
3+
namespace at::impl {
54

65
// The strategy is that all python interpreters attempt to register themselves
76
// as the main interpreter, but only one wins. Only that interpreter is
87
// allowed to interact with the C++ dispatcher. Furthermore, when we execute
98
// logic on that interpreter, we do so hermetically, never setting pyobj field
109
// on Tensor.
1110

12-
std::atomic<c10::impl::PyInterpreter*> PythonOpRegistrationTrampoline::interpreter_{nullptr};
11+
std::atomic<c10::impl::PyInterpreter*>
12+
PythonOpRegistrationTrampoline::interpreter_{nullptr};
1313

1414
c10::impl::PyInterpreter* PythonOpRegistrationTrampoline::getInterpreter() {
1515
return PythonOpRegistrationTrampoline::interpreter_.load();
16-
1716
}
1817

19-
bool PythonOpRegistrationTrampoline::registerInterpreter(c10::impl::PyInterpreter* interp) {
18+
bool PythonOpRegistrationTrampoline::registerInterpreter(
19+
c10::impl::PyInterpreter* interp) {
2020
c10::impl::PyInterpreter* expected = nullptr;
2121
interpreter_.compare_exchange_strong(expected, interp);
2222
if (expected != nullptr) {
@@ -29,5 +29,4 @@ bool PythonOpRegistrationTrampoline::registerInterpreter(c10::impl::PyInterprete
2929
}
3030
}
3131

32-
} // namespace impl
33-
} // namespace at
32+
} // namespace at::impl

aten/src/ATen/core/Tensor.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,9 @@ void TensorBase::enforce_invariants() {
7272

7373
void TensorBase::print() const {
7474
if (defined()) {
75-
std::cerr << "[" << toString() << " " << sizes() << "]" << std::endl;
75+
std::cerr << "[" << toString() << " " << sizes() << "]" << '\n';
7676
} else {
77-
std::cerr << "[UndefinedTensor]" << std::endl;
77+
std::cerr << "[UndefinedTensor]" << '\n';
7878
}
7979
}
8080

0 commit comments

Comments
 (0)