Skip to content

Commit 73604ee

Browse files
cyyeverpytorchmergebot
authored andcommitted
[20/N] Fix clang-tidy warnings in jit (pytorch#133399)
Follows pytorch#133067 Pull Request resolved: pytorch#133399 Approved by: https://github.com/Skylion007
1 parent 019b808 commit 73604ee

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+133
-318
lines changed

torch/csrc/jit/codegen/fuser/arg_spec.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,7 @@
88
#include <cstdint>
99
#include <vector>
1010

11-
namespace torch {
12-
namespace jit {
13-
namespace fuser {
11+
namespace torch::jit::fuser {
1412

1513
// Describes the (runtime) arguments to a kernel.
1614
// ArgSpecs are also used as keys to lookup instantiated kernels, so
@@ -55,6 +53,4 @@ struct TORCH_API ArgSpec {
5553
int device_;
5654
};
5755

58-
} // namespace fuser
59-
} // namespace jit
60-
} // namespace torch
56+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/codegen.cpp

+4-8
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@
1818
#include <tuple>
1919
#include <vector>
2020

21-
namespace torch {
22-
namespace jit {
23-
namespace fuser {
21+
namespace torch::jit::fuser {
2422

2523
// Template for computing the offset into the tensor to access a value
2624
static auto dim_calc = at::jit::CodeTemplate(R"(
@@ -538,7 +536,7 @@ std::string generateKernel(
538536
// places where the constant None node is used
539537
// Note: No need to iterate over reference as n is a pointer
540538
for (const auto n : graph.nodes()) {
541-
static_assert(std::is_pointer<decltype(n)>::value, "n must be a pointer");
539+
static_assert(std::is_pointer_v<decltype(n)>, "n must be a pointer");
542540
// Note: FusedConcat nodes work by narrowing the output Tensors before the
543541
// kernel runs
544542
if (n->kind() == prim::FusedConcat)
@@ -680,11 +678,9 @@ std::string generateKernel(
680678
}
681679

682680
if (debugFuser()) {
683-
std::cerr << "fusion code:" << code_string << std::endl;
681+
std::cerr << "fusion code:" << code_string << '\n';
684682
}
685683
return code_string;
686684
}
687685

688-
} // namespace fuser
689-
} // namespace jit
690-
} // namespace torch
686+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/codegen.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,7 @@
99
#include <string>
1010
#include <vector>
1111

12-
namespace torch {
13-
namespace jit {
14-
namespace fuser {
12+
namespace torch::jit::fuser {
1513

1614
// Creates a CPU or CUDA kernel for the given graph.
1715
// Returns the C++ or CUDA string implementing the kernel.
@@ -23,6 +21,4 @@ TORCH_API std::string generateKernel(
2321
const std::vector<std::pair<const Value*, const TensorDesc>>& outputs,
2422
const bool use_cuda);
2523

26-
} // namespace fuser
27-
} // namespace jit
28-
} // namespace torch
24+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/compiler.cpp

+2-6
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,7 @@ std::mutex& fusionBackendLock() {
3030
}
3131
} // namespace
3232

33-
namespace torch {
34-
namespace jit {
35-
namespace fuser {
33+
namespace torch::jit::fuser {
3634

3735
static std::unordered_map<at::Device::Type, FusedKernelConstructor>&
3836
getFusionBackends() {
@@ -297,6 +295,4 @@ std::shared_ptr<FusedKernel> compileKernel(
297295
spec.hasRandom());
298296
}
299297

300-
} // namespace fuser
301-
} // namespace jit
302-
} // namespace torch
298+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/compiler.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,7 @@
1111
#include <cstdint>
1212
#include <vector>
1313

14-
namespace torch {
15-
namespace jit {
16-
namespace fuser {
14+
namespace torch::jit::fuser {
1715

1816
// Performs device-independent "upfront" compilation of the given fusion_group,
1917
// if it has not been registered already.
@@ -55,6 +53,4 @@ struct TORCH_API RegisterFusionBackend {
5553
}
5654
};
5755

58-
} // namespace fuser
59-
} // namespace jit
60-
} // namespace torch
56+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp

+4-10
Original file line numberDiff line numberDiff line change
@@ -115,14 +115,12 @@ FusedKernelCUDA::FusedKernelCUDA(
115115
// Acquires device and NVRTC properties (for compile arch and occupancy
116116
// calculations)
117117
prop_ = at::cuda::getCurrentDeviceProperties();
118-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
119-
int major, minor;
118+
int major = 0, minor = 0;
120119
bool compile_to_sass = false;
121120
codegenOutputQuery(prop_, major, minor, compile_to_sass);
122121

123122
// Creates the NVRTC program
124-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
125-
nvrtcProgram program;
123+
nvrtcProgram program{};
126124
AT_CUDA_NVRTC_CHECK(nvrtc().nvrtcCreateProgram(
127125
&program, code_.c_str(), nullptr, 0, nullptr, nullptr));
128126

@@ -144,17 +142,14 @@ FusedKernelCUDA::FusedKernelCUDA(
144142
"compute_" +
145143
#endif
146144
std::to_string(major) + std::to_string(minor);
147-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
148145
const std::vector<const char*> args = {
149146
"--std=c++17", compute.c_str(), "-default-device"};
150147
#endif
151148
const auto result =
152149
nvrtc().nvrtcCompileProgram(program, args.size(), args.data());
153150
if (result != NVRTC_SUCCESS) {
154-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
155-
size_t logsize;
151+
size_t logsize = 0;
156152
AT_CUDA_NVRTC_CHECK(nvrtc().nvrtcGetProgramLogSize(program, &logsize));
157-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
158153
std::vector<char> log(logsize);
159154
AT_CUDA_NVRTC_CHECK(nvrtc().nvrtcGetProgramLog(program, log.data()));
160155
std::stringstream cu;
@@ -164,8 +159,7 @@ FusedKernelCUDA::FusedKernelCUDA(
164159
ResourceGuard holdProgram(
165160
[&] { AT_CUDA_NVRTC_CHECK(nvrtc().nvrtcDestroyProgram(&program)); });
166161
AT_CUDA_NVRTC_CHECK(result);
167-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
168-
size_t ptx_size;
162+
size_t ptx_size = 0;
169163
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11010
170164
// compile_to_sass determines whether we are generating SASS or PTX, hence
171165
// the different API.

torch/csrc/jit/codegen/fuser/cuda/fused_kernel.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,7 @@
1212
#include <string>
1313
#include <vector>
1414

15-
namespace torch {
16-
namespace jit {
17-
namespace fuser {
18-
namespace cuda {
15+
namespace torch::jit::fuser::cuda {
1916

2017
// query codegen output arch and target
2118
TORCH_CUDA_CU_API void codegenOutputQuery(
@@ -60,7 +57,4 @@ struct TORCH_CUDA_CU_API FusedKernelCUDA
6057
CUfunction function_;
6158
};
6259

63-
} // namespace cuda
64-
} // namespace fuser
65-
} // namespace jit
66-
} // namespace torch
60+
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/fuser/cuda/resource_strings.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,7 @@
33
#include <ATen/code_template.h>
44
#include <torch/csrc/Export.h>
55

6-
namespace torch {
7-
namespace jit {
8-
namespace fuser {
9-
namespace cuda {
6+
namespace torch::jit::fuser::cuda {
107

118
/*with type_as not checking type of its input, a fusion group can have non-fp32
129
tensor as input. Correct code for this case is generated, however, nvrtc does
@@ -405,7 +402,4 @@ __device__ float __bfloat162float(const __nv_bfloat16 a) {
405402
)";
406403
#endif
407404

408-
} // namespace cuda
409-
} // namespace fuser
410-
} // namespace jit
411-
} // namespace torch
405+
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/fuser/executor.cpp

+3-12
Original file line numberDiff line numberDiff line change
@@ -14,15 +14,9 @@
1414
#include <optional>
1515

1616
#include <algorithm>
17-
#include <iostream> // TODO: remove, debugging only
18-
#include <map>
19-
#include <stdexcept>
20-
#include <tuple>
2117
#include <vector>
2218

23-
namespace torch {
24-
namespace jit {
25-
namespace fuser {
19+
namespace torch::jit::fuser {
2620

2721
// Returns the "map size" for this run, which is the common size for all
2822
// intermediate tensors.
@@ -215,8 +209,7 @@ static void launchFusion(
215209

216210
// Computes map_size, numel from the first input
217211
at::IntArrayRef map_size;
218-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
219-
uint32_t numel;
212+
uint32_t numel = 0;
220213
std::vector<int64_t> keep_alive_size;
221214
if (fusion.chunkDesc()[0].isNoop()) {
222215
map_size = inputs[0].sizes();
@@ -409,6 +402,4 @@ bool runFusion(const int64_t key, Stack& stack, std::string* code_out) {
409402
return true;
410403
}
411404

412-
} // namespace fuser
413-
} // namespace jit
414-
} // namespace torch
405+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/executor.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77

88
#include <cstdint>
99

10-
namespace torch {
11-
namespace jit {
12-
namespace fuser {
10+
namespace torch::jit::fuser {
1311

1412
// Runs the fusion associated with the key (see registerFusion() in interface.h)
1513
// on the inputs taken from the given Stack.
@@ -18,6 +16,4 @@ TORCH_API bool runFusion(
1816
Stack& stack,
1917
std::string* code_out = nullptr);
2018

21-
} // namespace fuser
22-
} // namespace jit
23-
} // namespace torch
19+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/fallback.cpp

+2-6
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,7 @@
99

1010
#include <stdexcept>
1111

12-
namespace torch {
13-
namespace jit {
14-
namespace fuser {
12+
namespace torch::jit::fuser {
1513

1614
namespace {
1715
c10::AliasAnalysisKind aliasAnalysisIsSpecialCase() {
@@ -46,6 +44,4 @@ void runFallback(int64_t key, Stack& stack) {
4644
InterpreterState{(*maybe_spec)->code()}.run(stack);
4745
}
4846

49-
} // namespace fuser
50-
} // namespace jit
51-
} // namespace torch
47+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/fallback.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,8 @@
44

55
#include <cstdlib>
66

7-
namespace torch {
8-
namespace jit {
9-
namespace fuser {
7+
namespace torch::jit::fuser {
108

119
void runFallback(int64_t key, Stack& stack);
1210

13-
} // namespace fuser
14-
} // namespace jit
15-
} // namespace torch
11+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/fused_kernel.h

+2-7
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,11 @@
99
#include <string>
1010
#include <vector>
1111

12-
namespace torch {
13-
namespace jit {
14-
namespace fuser {
12+
namespace torch::jit::fuser {
1513

1614
struct FusedKernel {
1715
AT_DISALLOW_COPY_AND_ASSIGN(FusedKernel);
1816

19-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
2017
FusedKernel(
2118
std::string name,
2219
std::string code,
@@ -98,6 +95,4 @@ struct FusedKernel {
9895
const bool has_random_;
9996
};
10097

101-
} // namespace fuser
102-
} // namespace jit
103-
} // namespace torch
98+
} // namespace torch::jit::fuser

torch/csrc/jit/codegen/fuser/interface.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
#include <c10/util/Flags.h>
99
#include <stdexcept>
1010

11-
namespace torch {
12-
namespace jit {
11+
namespace torch::jit {
1312

1413
namespace detail {
1514

@@ -105,5 +104,4 @@ size_t nCompiledKernels() {
105104
return fuser::nCompiledKernels();
106105
}
107106

108-
} // namespace jit
109-
} // namespace torch
107+
} // namespace torch::jit

torch/csrc/jit/codegen/fuser/interface.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@
99
#include <memory>
1010
#include <vector>
1111

12-
namespace torch {
13-
namespace jit {
12+
namespace torch::jit {
1413

1514
constexpr int kCPUDevice = -1;
1615

@@ -52,5 +51,4 @@ TORCH_API std::string debugGetFusedKernelCode(
5251

5352
TORCH_API size_t nCompiledKernels();
5453

55-
} // namespace jit
56-
} // namespace torch
54+
} // namespace torch::jit

torch/csrc/jit/codegen/fuser/kernel_cache.cpp

+3-7
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66
#include <mutex>
77
#include <unordered_map>
88

9-
namespace torch {
10-
namespace jit {
11-
namespace fuser {
9+
namespace torch::jit::fuser {
1210

1311
struct KernelCacheImpl {
1412
// Note: std::unordered_map does not invalidate references even if rehashing
@@ -76,7 +74,7 @@ std::optional<KernelSpec*> retrieve(const int64_t key) {
7674
}
7775

7876
// precondition: graph has been normalized via normalizeGraphForCache
79-
std::optional<KernelSpec*> lookupGraph(std::shared_ptr<Graph> graph) {
77+
std::optional<KernelSpec*> lookupGraph(const std::shared_ptr<Graph>& graph) {
8078
auto& cache = getKernelCache();
8179
std::string repr = graph->toString(false);
8280

@@ -87,6 +85,4 @@ std::optional<KernelSpec*> lookupGraph(std::shared_ptr<Graph> graph) {
8785
return nolock_retrieve(cache, it->second);
8886
}
8987

90-
} // namespace fuser
91-
} // namespace jit
92-
} // namespace torch
88+
} // namespace torch::jit::fuser

0 commit comments

Comments
 (0)