Skip to content

Commit 7624d62

Browse files
cyyeverpytorchmergebot
authored andcommitted
[Reland][7/N] Fix Wextra-semi warning (pytorch#140342)
Reland of pytorch#140225 to fix a change in FBCODE_CAFFE2 Pull Request resolved: pytorch#140342 Approved by: https://github.com/kit1980
1 parent e4195f8 commit 7624d62

27 files changed

+469
-470
lines changed

Diff for: .clang-format

+7
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,16 @@ SpacesInParentheses: false
101101
SpacesInSquareBrackets: false
102102
Standard: c++17
103103
StatementMacros:
104+
- C10_DEFINE_bool
105+
- C10_DEFINE_int
106+
- C10_DEFINE_int32
107+
- C10_DEFINE_int64
108+
- C10_DEFINE_string
104109
- PyObject_HEAD
105110
- PyObject_VAR_HEAD
106111
- PyException_HEAD
112+
- DEFINE_BINARY
113+
107114
TabWidth: 8
108115
UseTab: Never
109116
---

Diff for: aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -1594,8 +1594,8 @@ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(c
15941594
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
15951595
return cvt_from_fp32<type>(__m512(a), __m512(b)); \
15961596
}
1597-
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1598-
CONVERT_VECTORIZED_INIT(Half, half);
1597+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16)
1598+
CONVERT_VECTORIZED_INIT(Half, half)
15991599

16001600
#else //defined(CPU_CAPABILITY_AVX512)
16011601

@@ -1624,8 +1624,8 @@ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const V
16241624
} \
16251625
return Vectorized<type>::loadu(arr2); \
16261626
}
1627-
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1628-
CONVERT_NON_VECTORIZED_INIT(Half, half);
1627+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16)
1628+
CONVERT_NON_VECTORIZED_INIT(Half, half)
16291629

16301630
#endif // defined(CPU_CAPABILITY_AVX512)
16311631

@@ -1663,8 +1663,8 @@ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vec
16631663
data += Vectorized<float>::size(); \
16641664
load_fp32_from_##name(data, out2); \
16651665
}
1666-
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1667-
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1666+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16)
1667+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16)
16681668

16691669
#endif
16701670
}}}

Diff for: c10/core/SymInt.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ bool SymInt::has_hint() const {
6161
} \
6262
}
6363

64-
// clang-format off
6564
DEFINE_BINARY(operator+, std::plus<>(), add, SymInt)
6665
DEFINE_BINARY(operator-, std::minus<>(), sub, SymInt)
6766
DEFINE_BINARY(operator*, std::multiplies<>(), mul, SymInt)
@@ -75,7 +74,6 @@ DEFINE_BINARY(sym_gt, std::greater<>(), gt, SymBool)
7574
DEFINE_BINARY(sym_ge, std::greater_equal<>(), ge, SymBool)
7675
DEFINE_BINARY(min, std::min, sym_min, SymInt)
7776
DEFINE_BINARY(max, std::max, sym_max, SymInt)
78-
// clang-format on
7977

8078
SymInt::operator SymFloat() const {
8179
if (auto ma = maybe_as_int()) {

Diff for: tools/autograd/templates/VariableType.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
namespace at {
2020
struct Quantizer;
21-
};
21+
}
2222

2323
namespace torch { namespace autograd {
2424

@@ -54,6 +54,6 @@ namespace VariableType {
5454
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
5555
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
5656
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
57-
};
57+
}
5858

5959
}} // namespace torch::autograd

Diff for: torch/csrc/distributed/autograd/context/context.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ std::unordered_set<rpc::worker_id_t> DistAutogradContext::getKnownWorkerIds()
2121
const {
2222
std::lock_guard<std::mutex> guard(lock_);
2323
return knownWorkerIds_;
24-
};
24+
}
2525

2626
void DistAutogradContext::addKnownWorkerId(const rpc::worker_id_t workerId) {
2727
std::lock_guard<std::mutex> guard(lock_);

Diff for: torch/csrc/distributed/c10d/reducer.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class TORCH_API Reducer {
103103
// been applied.
104104
void set_optimizer_in_backward() {
105105
optim_in_backward_ = true;
106-
};
106+
}
107107

108108
// Runs allreduce or installed communication hook given GradBucket instance.
109109
c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(

Diff for: torch/csrc/jit/api/function_impl.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
C10_DEFINE_bool(
1717
torch_jit_do_not_store_optimized_graph,
1818
false,
19-
"Do not store the optimized graph.");
19+
"Do not store the optimized graph.")
2020

2121
namespace torch::jit {
2222
namespace {
@@ -133,8 +133,8 @@ GraphFunction::SpecializationKey GraphFunction::currentSpecialization() const {
133133
void preoptimizeGraph(std::shared_ptr<Graph>& graph, bool disable_autocast) {
134134
Inline(*graph);
135135

136-
// Peephole Optimize cleans up many "is None" checks and creates constant prop
137-
// opportunities
136+
// Peephole Optimize cleans up many "is None" checks and creates constant
137+
// prop opportunities
138138
PeepholeOptimize(graph, true);
139139

140140
// AliasDb construction can be slow, so run it just on immutable types

Diff for: torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
namespace torch::jit {
77

88
struct ChunkOutput {
9-
ChunkOutput(Value* v, size_t o) : val(v), offset(o){};
9+
ChunkOutput(Value* v, size_t o) : val(v), offset(o) {}
1010
Value* val;
1111
size_t offset;
1212
};

Diff for: torch/csrc/jit/passes/tensorexpr_fuser.cpp

+3-6
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,12 @@
2929
C10_DEFINE_bool(
3030
torch_jit_disable_cat,
3131
false,
32-
"disable aten::cat in TE fusion groups");
32+
"disable aten::cat in TE fusion groups")
3333

3434
C10_DEFINE_bool(
3535
torch_jit_enable_dynamic_shape_fusion,
3636
false,
37-
"enable TE fusion using dynamic shapes");
37+
"enable TE fusion using dynamic shapes")
3838

3939
namespace torch::jit {
4040

@@ -82,9 +82,8 @@ static const OperatorSet& supported_non_eltwise_set() {
8282
"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
8383
"aten::matmul(Tensor self, Tensor other) -> Tensor",
8484
};
85-
// clang-format on
8685
return supported_non_eltwise_set;
87-
};
86+
}
8887

8988
bool isSupported(Node* node) {
9089
// For Block codegen we allow limited ops.
@@ -102,7 +101,6 @@ bool isSupported(Node* node) {
102101
"aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
103102
"aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)",
104103
};
105-
// clang-format on
106104

107105
if (get_tensorexpr_elementwise_set().contains(node) ||
108106
node->isMemberOf(supported_non_eltwise_set()) ||
@@ -903,7 +901,6 @@ class TensorExprFuser {
903901
static const OperatorSet pow{
904902
"aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor",
905903
};
906-
// clang-format on
907904

908905
// Check types of input values.
909906
for (const Value* v : node->inputs()) {

Diff for: torch/csrc/jit/runtime/autodiff.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ static std::optional<std::vector<Value*>> build_script_grad(
167167
auto grad_inputs = insertGraph(*graph, *bw_graph, grad);
168168
grad_inputs = unpackOutputs(grad_inputs);
169169
return grad_inputs;
170-
};
170+
}
171171

172172
namespace {
173173
class GradientHelper {

Diff for: torch/csrc/jit/runtime/graph_executor.cpp

+13-11
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,9 @@
5656
C10_DEFINE_bool(
5757
torch_jit_execution_plan_reuse_code_graph,
5858
false,
59-
"Directly reuse the preprocessed graph in the CodeImpl to reduce the memory consumption. This is aggressive memory saving, and please be cautious!");
59+
"Directly reuse the preprocessed graph in the CodeImpl to reduce the memory consumption. This is aggressive memory saving, and please be cautious!")
6060

6161
namespace torch::jit {
62-
6362
EnableProfilingGuard::EnableProfilingGuard() {
6463
auto& executor_mode = getExecutorMode();
6564
old_executor_mode = executor_mode;
@@ -432,8 +431,8 @@ struct DifferentiableGraphOp {
432431

433432
{
434433
auto inputs = last(stack, num_inputs);
435-
// hook up the outputs of df to the gradient functions of the inputs that
436-
// require gradients
434+
// hook up the outputs of df to the gradient functions of the inputs
435+
// that require gradients
437436
for (auto idx : grad.df_output_vjps) {
438437
grad_fn->addOutputForIValue(inputs[idx]);
439438
}
@@ -455,8 +454,8 @@ struct DifferentiableGraphOp {
455454
// TODO - XXX - if any output is the same tensor multiple times, views
456455
// have to be setup here. We need to refactor autograd until it is safe
457456
// for tensors to be constructed without all the viewing infrastructure.
458-
// this is currently intentionally not done here so we can get an idea of
459-
// our perf before introducing overhead for correctness
457+
// this is currently intentionally not done here so we can get an idea
458+
// of our perf before introducing overhead for correctness
460459
for (auto idx : grad.df_input_vjps) {
461460
grad_fn->addInputIValue(outputs[idx]);
462461
}
@@ -501,7 +500,8 @@ struct DifferentiableGraphOp {
501500
detach(stack[i]);
502501
}
503502
}
504-
// Capture (save) inputs that would be required to subsequently run backwards
503+
// Capture (save) inputs that would be required to subsequently run
504+
// backwards
505505
void captureInputs(
506506
DifferentiableGraphBackward& grad_fn,
507507
at::ArrayRef<IValue> inputs) const {
@@ -736,8 +736,10 @@ struct GraphExecutorImpl : public GraphExecutorImplBase {
736736
runOptimization(opt_graph);
737737

738738
// Phase 4. If this graph will be differentiated, we need to slice out the
739-
// symbolically differentiable subgraphs for further optimizations.
740-
// Phase 5. Apply non-differentiable optimizations to the graphs we've found
739+
// symbolically differentiable subgraphs for further
740+
// optimizations.
741+
// Phase 5. Apply non-differentiable optimizations to the graphs we've
742+
// found
741743
// (or the whole graph if we know we won't need its derivative).
742744
if (needsGradient(opt_graph)) {
743745
auto diff_nodes = CreateAutodiffSubgraphs(
@@ -781,8 +783,8 @@ struct GraphExecutorImpl : public GraphExecutorImplBase {
781783

782784
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
783785
ArgumentSpecCreator arg_spec_creator_;
784-
// Populated only when optimize is false (and in that case plan_cache will be
785-
// unused). The compiled version of graph.
786+
// Populated only when optimize is false (and in that case plan_cache will
787+
// be unused). The compiled version of graph.
786788
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
787789
ExecutionPlan fallback;
788790

Diff for: torch/csrc/jit/runtime/interpreter.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,12 @@ using torch::distributed::autograd::DistAutogradContainer;
4949
C10_DEFINE_bool(
5050
torch_jit_enable_rethrow_caught_exception,
5151
false,
52-
"enable rethrowing caught exception");
52+
"enable rethrowing caught exception")
5353

5454
C10_DEFINE_bool(
5555
torch_jit_enable_expanded_stacks,
5656
false,
57-
"When true we will attemps to pre-expand node stacks and cache expanded stacks.");
57+
"When true we will attemps to pre-expand node stacks and cache expanded stacks.")
5858

5959
namespace torch::jit {
6060

Diff for: torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -41,44 +41,44 @@
4141
C10_DEFINE_bool(
4242
torch_jit_enable_new_executor,
4343
true,
44-
"If this flag is set to false TorchScript will be using the legacy/original executor");
44+
"If this flag is set to false TorchScript will be using the legacy/original executor")
4545

4646
C10_DEFINE_bool(
4747
torch_jit_disable_warning_prints,
4848
false,
49-
"Disables warning.warn prints in TorchScript graph");
49+
"Disables warning.warn prints in TorchScript graph")
5050

5151
C10_DEFINE_bool(
5252
torch_jit_static_then_dynamic,
5353
false,
54-
"fuse on two static compilations then 10 dynamic");
54+
"fuse on two static compilations then 10 dynamic")
5555

5656
C10_DEFINE_bool(
5757
torch_jit_always_dynamic,
5858
false,
59-
"fuse on 12 dynamic compilations");
59+
"fuse on 12 dynamic compilations")
6060

6161
C10_DEFINE_bool(
6262
torch_jit_release_profiling_graph_after_optimization,
6363
false,
64-
"After getOptimizedPlanFor release the optimization record for reduction of memory in inference. This is aggressive memory saving, and please be cautious!");
64+
"After getOptimizedPlanFor release the optimization record for reduction of memory in inference. This is aggressive memory saving, and please be cautious!")
6565

6666
C10_DEFINE_int32(
6767
torch_jit_release_profiling_graph_delay_in_seconds,
6868
60,
69-
"How long to wait before releasing the profiling graph after optimizaiton is done. Only used if torch_jit_release_profiling_graph_after_optimization is set to true.");
69+
"How long to wait before releasing the profiling graph after optimizaiton is done. Only used if torch_jit_release_profiling_graph_after_optimization is set to true.")
7070

7171
constexpr size_t kDefaultNumProfiledRuns = 1;
7272
constexpr size_t kDefaultBailoutDepth = 20;
7373

7474
C10_DEFINE_int64(
7575
torch_jit_num_profiled_runs,
7676
kDefaultNumProfiledRuns,
77-
"Number of profiling runs");
77+
"Number of profiling runs")
7878
C10_DEFINE_int64(
7979
torch_jit_bailout_depth,
8080
kDefaultBailoutDepth,
81-
"Number of re-specializations");
81+
"Number of re-specializations")
8282

8383
namespace torch::jit {
8484

0 commit comments

Comments
 (0)