Skip to content

Commit 20f7695

Browse files
cyyeverpytorchmergebot
authored andcommitted
[12/N] Apply clang-tidy and fix warnings in headers of torch/csrc (pytorch#116486)
This PR follows pytorch#116751. Pull Request resolved: pytorch#116486 Approved by: https://github.com/albanD
1 parent 90df7c0 commit 20f7695

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+120
-244
lines changed

torch/csrc/Exceptions.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ extern PyObject *THPException_FatalError, *THPException_LinAlgError,
145145
// Throwing this exception means that the python error flags have been already
146146
// set and control should be immediately returned to the interpreter.
147147
struct python_error : public std::exception {
148-
python_error() : type(nullptr), value(nullptr), traceback(nullptr) {}
148+
python_error() {}
149149

150150
python_error(const python_error& other)
151151
: type(other.type),
@@ -244,9 +244,9 @@ struct python_error : public std::exception {
244244
PyErr_Restore(type, value, traceback);
245245
}
246246

247-
PyObject* type;
248-
PyObject* value;
249-
PyObject* traceback;
247+
PyObject* type{nullptr};
248+
PyObject* value{nullptr};
249+
PyObject* traceback{nullptr};
250250

251251
// Message to return to the user when 'what()' is invoked.
252252
std::string message;

torch/csrc/autograd/FunctionsManual.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,7 @@
1212
#include <ATen/ATen.h>
1313
#include <torch/csrc/autograd/generated/Functions.h>
1414

15-
namespace torch {
16-
namespace autograd {
17-
namespace generated {
18-
namespace details {
15+
namespace torch::autograd::generated::details {
1916

2017
extern const char* kCudnnDoubleBackwardMsg;
2118

@@ -1101,7 +1098,4 @@ mkldnn_rnn_layer_differentiable_backward(
11011098

11021099
Tensor values_backward(const Tensor& grad, const Tensor& self);
11031100

1104-
} // namespace details
1105-
} // namespace generated
1106-
} // namespace autograd
1107-
} // namespace torch
1101+
} // namespace torch::autograd::generated::details

torch/csrc/autograd/InferenceMode.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,8 @@
33
#include <c10/core/InferenceMode.h>
44
#include <torch/csrc/Export.h>
55

6-
namespace torch {
7-
namespace autograd {
6+
namespace torch::autograd {
87

98
using InferenceMode = c10::InferenceMode;
109

1110
}
12-
} // namespace torch

torch/csrc/autograd/VariableTypeUtils.h

+3-7
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,9 @@
1717
#include <torch/csrc/autograd/jit_decomp_interface.h>
1818
#include <torch/csrc/utils/variadic.h>
1919

20-
#include <array>
2120
#include <cstddef>
2221
#include <functional>
23-
#include <initializer_list>
2422
#include <memory>
25-
#include <stdexcept>
26-
#include <string>
27-
#include <tuple>
2823
#include <utility>
2924
#include <vector>
3025

@@ -117,8 +112,8 @@ inline void rebase_history(Variable& var, std::shared_ptr<Node> grad_fn) {
117112
}
118113

119114
inline void rebase_history(
120-
std::vector<Variable>&& vars,
121-
std::shared_ptr<Node> grad_fn) {
115+
const std::vector<Variable>& vars,
116+
const std::shared_ptr<Node>& grad_fn) {
122117
if (grad_fn) {
123118
for (auto& var : vars) {
124119
if (var.defined()) {
@@ -137,6 +132,7 @@ inline void increment_version(const at::Tensor& t) {
137132

138133
struct Flatten : IterArgs<Flatten> {
139134
Flatten(variable_list& out) : out(out) {}
135+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
140136
variable_list& out;
141137
void operator()(const at::Tensor& x) {
142138
out.emplace_back(x);

torch/csrc/autograd/anomaly_mode.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@
44
#include <memory>
55
#include <string>
66

7-
namespace torch {
8-
namespace autograd {
7+
namespace torch::autograd {
98

109
// forward declaration of Node from function.h
1110
struct Node;
@@ -69,5 +68,4 @@ struct TORCH_API AnomalyMetadata {
6968
std::shared_ptr<Node> parent_;
7069
};
7170

72-
} // namespace autograd
73-
} // namespace torch
71+
} // namespace torch::autograd

torch/csrc/autograd/autograd.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <torch/csrc/autograd/variable.h>
44

5-
namespace torch {
6-
namespace autograd {
5+
namespace torch::autograd {
76

87
/// Computes the sum of gradients of given tensors with respect to graph leaves.
98
///
@@ -102,5 +101,4 @@ TORCH_API uint64_t enter_dual_level();
102101
TORCH_API void exit_dual_level(uint64_t level);
103102

104103
} // namespace forward_ad
105-
} // namespace autograd
106-
} // namespace torch
104+
} // namespace torch::autograd

torch/csrc/autograd/autograd_not_implemented_fallback.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <torch/library.h>
44

5-
namespace torch {
6-
namespace autograd {
5+
namespace torch::autograd {
76

87
// Default DispatchKey::Autograd fallback for built-in operators.
98
// Can be registered for custom operators.
@@ -30,5 +29,4 @@ enum class AutogradFallbackMode {
3029
TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
3130
TORCH_API AutogradFallbackMode getAutogradFallbackMode();
3231

33-
} // namespace autograd
34-
} // namespace torch
32+
} // namespace torch::autograd

torch/csrc/autograd/cpp_hook.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#include <functional>
44
#include <memory>
55

6-
namespace torch {
7-
namespace autograd {
6+
namespace torch::autograd {
87

98
using hooks_list =
109
std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
@@ -27,5 +26,4 @@ struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
2726
size_t value_idx_;
2827
};
2928

30-
} // namespace autograd
31-
} // namespace torch
29+
} // namespace torch::autograd

torch/csrc/autograd/custom_function.h

+8-10
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
#include <torch/csrc/autograd/variable.h>
99
#include <vector>
1010

11-
namespace torch {
12-
namespace autograd {
11+
namespace torch::autograd {
1312

1413
using optional_variable_list = std::vector<c10::optional<Variable>>;
1514
using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
@@ -97,7 +96,7 @@ struct TORCH_API Function {
9796
// the parameter X.
9897
template <typename X = T, typename... Args>
9998
static auto apply(Args&&... args)
100-
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>>;
99+
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>>;
101100
};
102101

103102
/// Context to save information during `forward` that can be accessed in
@@ -228,8 +227,8 @@ inline void extract_vars(
228227
}
229228

230229
template <typename T>
231-
typename std::enable_if<std::is_same<T, variable_list>::value, T>::type
232-
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
230+
std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
231+
std::vector<c10::optional<Variable>>& output_list) {
233232
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
234233
variable_list result;
235234
std::transform(
@@ -241,8 +240,8 @@ to_output_type(std::vector<c10::optional<Variable>>& output_list) {
241240
}
242241

243242
template <typename T>
244-
typename std::enable_if<std::is_same<T, Variable>::value, T>::type
245-
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
243+
std::enable_if_t<std::is_same_v<T, Variable>, T> to_output_type(
244+
std::vector<c10::optional<Variable>>& output_list) {
246245
return *output_list[0];
247246
}
248247

@@ -264,7 +263,7 @@ inline std::vector<c10::optional<Variable>> to_optional(variable_list& output) {
264263
template <class T>
265264
template <typename X, typename... Args>
266265
auto Function<T>::apply(Args&&... args)
267-
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>> {
266+
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>> {
268267
const auto& functorch_tls = at::functorch::functorchTLSAccessor();
269268
if (functorch_tls) {
270269
// Function support for functorch is handled in Python.
@@ -434,5 +433,4 @@ void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
434433
ctx_.grad_fn_ = node;
435434
}
436435

437-
} // namespace autograd
438-
} // namespace torch
436+
} // namespace torch::autograd

torch/csrc/autograd/edge.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66

77
#include <c10/util/hash.h>
88

9-
namespace torch {
10-
namespace autograd {
9+
namespace torch::autograd {
1110

1211
struct Node;
1312

@@ -38,8 +37,7 @@ struct Edge {
3837
/// The identifier of a particular input to the function.
3938
uint32_t input_nr;
4039
};
41-
} // namespace autograd
42-
} // namespace torch
40+
} // namespace torch::autograd
4341

4442
// The idiomatic way of enabling use of a custom type as the key of hash
4543
// containers in C++11. This method removes the requirement of having to pass

torch/csrc/autograd/engine.h

+3-7
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,11 @@
2727
#include <utility>
2828
#include <vector>
2929

30-
namespace torch {
31-
namespace autograd {
30+
namespace torch::autograd {
3231
struct ReadyQueue;
3332
}
34-
} // namespace torch
3533

36-
namespace torch {
37-
namespace autograd {
34+
namespace torch::autograd {
3835

3936
// Maximum reentrant backward depth before switching to a new thread
4037
// This limit is based on the TSAN's deadlock detector, where it will
@@ -291,5 +288,4 @@ struct TORCH_API Engine {
291288
using EngineStub = Engine& (*)();
292289
TORCH_API void set_default_engine_stub(EngineStub stub);
293290

294-
} // namespace autograd
295-
} // namespace torch
291+
} // namespace torch::autograd

torch/csrc/autograd/forward_grad.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#include <ATen/core/Tensor.h>
44
#include <unordered_set>
55

6-
namespace torch {
7-
namespace autograd {
6+
namespace torch::autograd {
87

98
// [ Using ForwardGrad ]
109
// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
@@ -208,5 +207,4 @@ struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
208207
mutable std::mutex mutex_;
209208
};
210209

211-
} // namespace autograd
212-
} // namespace torch
210+
} // namespace torch::autograd

torch/csrc/autograd/function.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,7 @@
2424
#include <utility>
2525
#include <vector>
2626

27-
namespace torch {
28-
namespace autograd {
27+
namespace torch::autograd {
2928

3029
struct Edge;
3130
struct FunctionPostHook;
@@ -757,5 +756,4 @@ struct TypeAndSize {
757756
at::TensorOptions options;
758757
};
759758

760-
} // namespace autograd
761-
} // namespace torch
759+
} // namespace torch::autograd

torch/csrc/autograd/function_hook.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@ class SwapSavedVariables;
1212

1313
// A hook that's called on gradients
1414

15-
namespace torch {
16-
namespace autograd {
15+
namespace torch::autograd {
1716

1817
using Variable = at::Tensor;
1918
using variable_list = std::vector<Variable>;
@@ -62,5 +61,4 @@ struct TORCH_API PostAccumulateGradHook {
6261
}
6362
};
6463

65-
} // namespace autograd
66-
} // namespace torch
64+
} // namespace torch::autograd

torch/csrc/autograd/grad_mode.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,9 @@
33
#include <ATen/core/grad_mode.h>
44
#include <torch/csrc/Export.h>
55

6-
namespace torch {
7-
namespace autograd {
6+
namespace torch::autograd {
87

98
using GradMode = at::GradMode;
109
using AutoGradMode = at::AutoGradMode;
1110

12-
} // namespace autograd
13-
} // namespace torch
11+
} // namespace torch::autograd

torch/csrc/autograd/graph_task.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <torch/csrc/autograd/utils/warnings.h>
77
#include <vector>
88

9-
namespace torch {
10-
namespace autograd {
9+
namespace torch::autograd {
1110

1211
using edge_list = std::vector<Edge>;
1312
struct ReadyQueue;
@@ -239,5 +238,4 @@ TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
239238
TORCH_API int get_current_graph_task_id();
240239
void add_node_to_current_graph_task_exec_info(Node* fn);
241240

242-
} // namespace autograd
243-
} // namespace torch
241+
} // namespace torch::autograd

torch/csrc/autograd/input_buffer.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@
1313
#include <c10/util/Optional.h>
1414
#include <torch/csrc/autograd/variable.h>
1515

16-
namespace torch {
17-
namespace autograd {
16+
namespace torch::autograd {
1817

1918
struct InputBuffer {
2019
explicit InputBuffer(size_t size) : buffer(size) {}
@@ -44,5 +43,4 @@ struct InputBuffer {
4443
std::vector<Variable> buffer;
4544
};
4645

47-
} // namespace autograd
48-
} // namespace torch
46+
} // namespace torch::autograd

torch/csrc/autograd/input_metadata.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@
2222
#include <cstdint>
2323
#include <utility>
2424

25-
namespace torch {
26-
namespace autograd {
25+
namespace torch::autograd {
2726

2827
using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
2928
using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
@@ -109,5 +108,4 @@ struct TORCH_API InputMetadata {
109108
bool is_nested_ = false;
110109
bool was_default_constructed_ = true;
111110
};
112-
} // namespace autograd
113-
} // namespace torch
111+
} // namespace torch::autograd

torch/csrc/autograd/jit_decomp_interface.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,7 @@
2727
// For extra context, see VariableHooksInterface.h, where a similar technique
2828
// is used
2929

30-
namespace torch {
31-
namespace autograd {
32-
namespace impl {
30+
namespace torch::autograd::impl {
3331

3432
struct TORCH_API JitDecompInterface {
3533
virtual ~JitDecompInterface() = default;
@@ -49,6 +47,4 @@ struct TORCH_API JitDecompRegisterer {
4947
}
5048
};
5149

52-
} // namespace impl
53-
} // namespace autograd
54-
} // namespace torch
50+
} // namespace torch::autograd::impl

0 commit comments

Comments
 (0)