Skip to content

Commit 7624d62

Browse files
cyyeverpytorchmergebot
authored andcommitted
[Reland][7/N] Fix Wextra-semi warning (pytorch#140342)
Reland of pytorch#140225 to fix a change in FBCODE_CAFFE2 Pull Request resolved: pytorch#140342 Approved by: https://github.com/kit1980
1 parent e4195f8 commit 7624d62

File tree

27 files changed

+469
-470
lines changed

27 files changed

+469
-470
lines changed

.clang-format

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,16 @@ SpacesInParentheses: false
101101
SpacesInSquareBrackets: false
102102
Standard: c++17
103103
StatementMacros:
104+
- C10_DEFINE_bool
105+
- C10_DEFINE_int
106+
- C10_DEFINE_int32
107+
- C10_DEFINE_int64
108+
- C10_DEFINE_string
104109
- PyObject_HEAD
105110
- PyObject_VAR_HEAD
106111
- PyException_HEAD
112+
- DEFINE_BINARY
113+
107114
TabWidth: 8
108115
UseTab: Never
109116
---

aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,8 +1594,8 @@ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(c
15941594
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
15951595
return cvt_from_fp32<type>(__m512(a), __m512(b)); \
15961596
}
1597-
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1598-
CONVERT_VECTORIZED_INIT(Half, half);
1597+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16)
1598+
CONVERT_VECTORIZED_INIT(Half, half)
15991599

16001600
#else //defined(CPU_CAPABILITY_AVX512)
16011601

@@ -1624,8 +1624,8 @@ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const V
16241624
} \
16251625
return Vectorized<type>::loadu(arr2); \
16261626
}
1627-
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1628-
CONVERT_NON_VECTORIZED_INIT(Half, half);
1627+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16)
1628+
CONVERT_NON_VECTORIZED_INIT(Half, half)
16291629

16301630
#endif // defined(CPU_CAPABILITY_AVX512)
16311631

@@ -1663,8 +1663,8 @@ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vec
16631663
data += Vectorized<float>::size(); \
16641664
load_fp32_from_##name(data, out2); \
16651665
}
1666-
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1667-
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1666+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16)
1667+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16)
16681668

16691669
#endif
16701670
}}}

c10/core/SymInt.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ bool SymInt::has_hint() const {
6161
} \
6262
}
6363

64-
// clang-format off
6564
DEFINE_BINARY(operator+, std::plus<>(), add, SymInt)
6665
DEFINE_BINARY(operator-, std::minus<>(), sub, SymInt)
6766
DEFINE_BINARY(operator*, std::multiplies<>(), mul, SymInt)
@@ -75,7 +74,6 @@ DEFINE_BINARY(sym_gt, std::greater<>(), gt, SymBool)
7574
DEFINE_BINARY(sym_ge, std::greater_equal<>(), ge, SymBool)
7675
DEFINE_BINARY(min, std::min, sym_min, SymInt)
7776
DEFINE_BINARY(max, std::max, sym_max, SymInt)
78-
// clang-format on
7977

8078
SymInt::operator SymFloat() const {
8179
if (auto ma = maybe_as_int()) {

tools/autograd/templates/VariableType.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
namespace at {
2020
struct Quantizer;
21-
};
21+
}
2222

2323
namespace torch { namespace autograd {
2424

@@ -54,6 +54,6 @@ namespace VariableType {
5454
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
5555
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
5656
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
57-
};
57+
}
5858

5959
}} // namespace torch::autograd

torch/csrc/distributed/autograd/context/context.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ std::unordered_set<rpc::worker_id_t> DistAutogradContext::getKnownWorkerIds()
2121
const {
2222
std::lock_guard<std::mutex> guard(lock_);
2323
return knownWorkerIds_;
24-
};
24+
}
2525

2626
void DistAutogradContext::addKnownWorkerId(const rpc::worker_id_t workerId) {
2727
std::lock_guard<std::mutex> guard(lock_);

torch/csrc/distributed/c10d/reducer.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class TORCH_API Reducer {
103103
// been applied.
104104
void set_optimizer_in_backward() {
105105
optim_in_backward_ = true;
106-
};
106+
}
107107

108108
// Runs allreduce or installed communication hook given GradBucket instance.
109109
c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(

torch/csrc/jit/api/function_impl.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
C10_DEFINE_bool(
1717
torch_jit_do_not_store_optimized_graph,
1818
false,
19-
"Do not store the optimized graph.");
19+
"Do not store the optimized graph.")
2020

2121
namespace torch::jit {
2222
namespace {
@@ -133,8 +133,8 @@ GraphFunction::SpecializationKey GraphFunction::currentSpecialization() const {
133133
void preoptimizeGraph(std::shared_ptr<Graph>& graph, bool disable_autocast) {
134134
Inline(*graph);
135135

136-
// Peephole Optimize cleans up many "is None" checks and creates constant prop
137-
// opportunities
136+
// Peephole Optimize cleans up many "is None" checks and creates constant
137+
// prop opportunities
138138
PeepholeOptimize(graph, true);
139139

140140
// AliasDb construction can be slow, so run it just on immutable types

torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
namespace torch::jit {
77

88
struct ChunkOutput {
9-
ChunkOutput(Value* v, size_t o) : val(v), offset(o){};
9+
ChunkOutput(Value* v, size_t o) : val(v), offset(o) {}
1010
Value* val;
1111
size_t offset;
1212
};

torch/csrc/jit/passes/tensorexpr_fuser.cpp

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,12 @@
2929
C10_DEFINE_bool(
3030
torch_jit_disable_cat,
3131
false,
32-
"disable aten::cat in TE fusion groups");
32+
"disable aten::cat in TE fusion groups")
3333

3434
C10_DEFINE_bool(
3535
torch_jit_enable_dynamic_shape_fusion,
3636
false,
37-
"enable TE fusion using dynamic shapes");
37+
"enable TE fusion using dynamic shapes")
3838

3939
namespace torch::jit {
4040

@@ -82,9 +82,8 @@ static const OperatorSet& supported_non_eltwise_set() {
8282
"aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor",
8383
"aten::matmul(Tensor self, Tensor other) -> Tensor",
8484
};
85-
// clang-format on
8685
return supported_non_eltwise_set;
87-
};
86+
}
8887

8988
bool isSupported(Node* node) {
9089
// For Block codegen we allow limited ops.
@@ -102,7 +101,6 @@ bool isSupported(Node* node) {
102101
"aten::cat(Tensor[] tensors, int dim=0) -> Tensor",
103102
"aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)",
104103
};
105-
// clang-format on
106104

107105
if (get_tensorexpr_elementwise_set().contains(node) ||
108106
node->isMemberOf(supported_non_eltwise_set()) ||
@@ -903,7 +901,6 @@ class TensorExprFuser {
903901
static const OperatorSet pow{
904902
"aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor",
905903
};
906-
// clang-format on
907904

908905
// Check types of input values.
909906
for (const Value* v : node->inputs()) {

torch/csrc/jit/runtime/autodiff.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ static std::optional<std::vector<Value*>> build_script_grad(
167167
auto grad_inputs = insertGraph(*graph, *bw_graph, grad);
168168
grad_inputs = unpackOutputs(grad_inputs);
169169
return grad_inputs;
170-
};
170+
}
171171

172172
namespace {
173173
class GradientHelper {

0 commit comments

Comments
 (0)