Skip to content

Commit f96d96a

Browse files
Michael Andreas Dagitsespytorchmergebot
Michael Andreas Dagitses
authored andcommitted
turn on -Werror=type-limits in our Bazel CPU build
Summary: We also fix any existing issues. Test Plan: Built locally, rely on CI to confirm. Reviewers: malfet Subscribers: Tasks: Tags: Pull Request resolved: pytorch#79139 Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
1 parent 28c5417 commit f96d96a

File tree

13 files changed

+50
-35
lines changed

13 files changed

+50
-35
lines changed

.bazelrc

+21
Original file line numberDiff line numberDiff line change
@@ -30,3 +30,24 @@ build:cpu-only --platform_suffix=-cpu-only
3030
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
3131
# rules_cuda configuration
3232
build:cpu-only --@rules_cuda//cuda:enable_cuda=False
33+
34+
# Set additional warnings to error level.
35+
#
36+
# Implementation notes:
37+
# * we use file extensions to determine if we are using the C++
38+
# compiler or the cuda compiler
39+
# * we use ^// at the start of the regex to only permit matching
40+
# PyTorch files. This excludes external repos.
41+
#
42+
# Note that because this is logically a command-line flag, it is
43+
# considered the word on what warnings are enabled. This has the
44+
# unfortunate consequence of preventing us from disabling an error at
45+
# the target level because those flags will come before these flags in
46+
# the action invocation. Instead we provide per-file exceptions after
47+
# this.
48+
#
49+
# On the bright side, this means we don't have to more broadly apply
50+
# the exceptions to an entire target.
51+
build \
52+
--per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
53+
--per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits

aten/src/ATen/native/cpu/BinaryOpsKernel.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <ATen/native/cpu/Loops.h>
1313
#include <ATen/native/Math.h>
1414
#include <c10/macros/Macros.h>
15+
#include <c10/util/TypeSafeSignMath.h>
1516
#include <c10/util/copysign.h>
1617

1718
namespace at {
@@ -133,7 +134,7 @@ void div_floor_kernel(TensorIteratorBase& iter) {
133134
AT_DISPATCH_INTEGRAL_TYPES(dtype, "div_floor_cpu", [&]() {
134135
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
135136
TORCH_CHECK(b != 0, "ZeroDivisionError");
136-
if ((a < 0) != (b < 0)) {
137+
if (c10::is_negative(a) != c10::is_negative(b)) {
137138
// Subtracts one from the results of truncation division if the
138139
// divisor and dividend have different sign(bit)s and the remainder of
139140
// the division is nonzero
@@ -198,7 +199,7 @@ void remainder_kernel(TensorIteratorBase& iter) {
198199
cpu_kernel(iter, [](scalar_t a, scalar_t b) -> scalar_t {
199200
TORCH_CHECK(b != 0, "ZeroDivisionError");
200201
scalar_t r = a % b;
201-
if ((r != 0) && ((r < 0) != (b < 0))) {
202+
if ((r != 0) && (c10::is_negative(r) != c10::is_negative(b))) {
202203
r += b;
203204
}
204205
return r;

aten/src/ATen/native/cpu/UnaryOpsKernel.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include <c10/util/math_compat.h>
2121
#include <c10/util/MathConstants.h>
2222
#include <c10/core/Scalar.h>
23+
#include <c10/util/TypeSafeSignMath.h>
2324
#include <c10/util/irange.h>
2425

2526
#if AT_MKL_ENABLED()
@@ -284,7 +285,7 @@ void sign_kernel(TensorIteratorBase& iter){
284285

285286
cpu_kernel_vec(
286287
iter,
287-
[=](scalar_t a) -> scalar_t { return (0 < a) - (a < 0); },
288+
[=](scalar_t a) -> scalar_t { return (0 < a) - c10::is_negative(a); },
288289
[=](Vectorized<scalar_t> self_vec){
289290

290291
// Comparison operators returns bitmask.
@@ -301,7 +302,7 @@ static void signbit_kernel(TensorIteratorBase& iter){
301302
// NOTE: signbit does not always support integral arguments.
302303
if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
303304
AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cpu", [&]() {
304-
cpu_kernel(iter, [](scalar_t a) -> bool { return a < 0; }); });
305+
cpu_kernel(iter, [](scalar_t a) -> bool { return c10::is_negative(a); }); });
305306
} else {
306307
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cpu", [&]() {
307308
using opmath_t = at::opmath_type<scalar_t>;

caffe2/core/prof_dag_counters.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void ProfDAGCounters::AddPerOpStartTime(size_t op_id) {
5151
return;
5252
}
5353

54-
CAFFE_ENFORCE(op_id >= 0 && op_id < op_start_times_run_.size());
54+
CAFFE_ENFORCE(op_id < op_start_times_run_.size());
5555
op_start_times_run_[op_id] = timer_.MilliSeconds();
5656
}
5757

@@ -60,7 +60,7 @@ void ProfDAGCounters::AddPerOpEndTime(size_t op_id) {
6060
return;
6161
}
6262

63-
CAFFE_ENFORCE(op_id >= 0 && op_id < op_end_times_run_.size());
63+
CAFFE_ENFORCE(op_id < op_end_times_run_.size());
6464
op_end_times_run_[op_id] = timer_.MilliSeconds();
6565
}
6666

@@ -69,7 +69,7 @@ void ProfDAGCounters::AddPerOpAsyncEndTime(size_t op_id) {
6969
return;
7070
}
7171

72-
CAFFE_ENFORCE(op_id >= 0 && op_id < op_async_end_times_run_.size());
72+
CAFFE_ENFORCE(op_id < op_async_end_times_run_.size());
7373
op_async_end_times_run_[op_id] = timer_.MilliSeconds();
7474
}
7575

test/cpp/jit/test_lite_interpreter.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -563,8 +563,7 @@ TEST(LiteInterpreterTest, GetContainTypes) {
563563
std::stringstream ss;
564564
m._save_for_mobile(ss, {}, true);
565565

566-
auto contained_types = _get_mobile_model_contained_types(ss);
567-
AT_ASSERT(contained_types.size() >= 0);
566+
_get_mobile_model_contained_types(ss);
568567
}
569568

570569
namespace {

test/cpp/jit/test_lite_trainer.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -384,19 +384,16 @@ TEST(LiteTrainerTest, RandomSamplerReturnsIndicesInCorrectRange) {
384384

385385
std::vector<size_t> indices = sampler.next(3).value();
386386
for (auto i : indices) {
387-
AT_ASSERT(i >= 0);
388387
AT_ASSERT(i < 10);
389388
}
390389

391390
indices = sampler.next(5).value();
392391
for (auto i : indices) {
393-
AT_ASSERT(i >= 0);
394392
AT_ASSERT(i < 10);
395393
}
396394

397395
indices = sampler.next(2).value();
398396
for (auto i : indices) {
399-
AT_ASSERT(i >= 0);
400397
AT_ASSERT(i < 10);
401398
}
402399

torch/csrc/distributed/rpc/utils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -492,8 +492,8 @@ std::vector<at::IValue> readWrappedPayload(
492492
// Read the additional payload remove it from the payload.
493493
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
494494
int64_t additionalPayloadSize;
495+
TORCH_INTERNAL_ASSERT(payload.size() >= sizeof(int64_t));
495496
size_t indexToRead = payload.size() - sizeof(int64_t);
496-
TORCH_INTERNAL_ASSERT(indexToRead >= 0);
497497
torch::utils::THP_decodeInt64Buffer(
498498
&additionalPayloadSize,
499499
reinterpret_cast<uint8_t*>(payload.data()) + indexToRead,

torch/csrc/jit/codegen/cuda/compute_at.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -905,7 +905,7 @@ ComputeAt::ComputeAt(
905905
" producer: ",
906906
producer_);
907907
TORCH_INTERNAL_ASSERT(
908-
reference_position_ >= 0 && reference_position_ <= reference_->nDims(),
908+
reference_position_ <= reference_->nDims(),
909909
"Invalid computeAt axis, received ",
910910
reference_position_,
911911
" but should be > -",

torch/csrc/jit/codegen/cuda/scheduler/mma_utils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ void checkDimSize(
186186
ExpressionEvaluator const_eval(tv->fusion());
187187
for (auto axis_index : c10::irange(axis.size())) {
188188
TORCH_INTERNAL_ASSERT(
189-
((axis[axis_index] + tv->nDims()) >= 0) &&
189+
((axis[axis_index] + static_cast<int>(tv->nDims())) >= 0) &&
190190
(axis[axis_index] < (int)tv->nDims()),
191191
"CheckDimSize: axis position out of bound ",
192192
axis[axis_index],

torch/csrc/jit/codegen/cuda/transform_view.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ class MergeTransform final : public ViewTransform {
115115
const std::vector<IterDomain*>& new_root_domain,
116116
std::vector<IterDomain*>& rfactor_domain) override {
117117
TORCH_INTERNAL_ASSERT(
118-
index_ >= 0 && (index_ + 1) < new_root_domain.size(),
118+
(index_ + 1) < new_root_domain.size(),
119119
"Index: \t",
120120
index_,
121121
"\t Domain Size:\t",
@@ -174,7 +174,7 @@ class SplitTransform final : public ViewTransform {
174174
const std::vector<IterDomain*>& new_root_domain,
175175
std::vector<IterDomain*>& rfactor_domain) override {
176176
TORCH_INTERNAL_ASSERT(
177-
index_ >= 0 && index_ < new_root_domain.size(),
177+
index_ < new_root_domain.size(),
178178
"Index: \t",
179179
index_,
180180
"\t Domain Size:\t",
@@ -237,7 +237,7 @@ class KeepTransform final : public ViewTransform {
237237
const std::vector<IterDomain*>& new_root_domain,
238238
std::vector<IterDomain*>& rfactor_domain) override {
239239
TORCH_INTERNAL_ASSERT(
240-
index_ >= 0 && index_ < new_root_domain.size(),
240+
index_ < new_root_domain.size(),
241241
"Index: \t",
242242
index_,
243243
"\t Domain Size:\t",

torch/csrc/jit/mobile/function.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -211,13 +211,13 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
211211
out_args.push_back(stack.back());
212212
stack.pop_back();
213213
}
214-
size_t start_index = num_specified_args.value() - out_args.size();
215214
TORCH_CHECK(
216-
start_index >= 0,
215+
num_specified_args.value() >= out_args.size(),
217216
"The number of output arguments is: ",
218217
out_args.size(),
219218
", which is more then the number of specified arguments: ",
220219
num_specified_args.value());
220+
size_t start_index = num_specified_args.value() - out_args.size();
221221
for (size_t i = start_index; i < (args.size() - out_args.size()); ++i) {
222222
TORCH_CHECK(
223223
args[i].default_value().has_value(),

torch/csrc/jit/serialization/pickler.cpp

+11-9
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <torch/csrc/jit/api/function_impl.h>
99
#include <torch/csrc/jit/serialization/pickler.h>
1010
#include <string>
11+
#include <type_traits>
1112

1213
namespace torch {
1314
namespace jit {
@@ -595,18 +596,19 @@ void Pickler::pushDict(const IValue& ivalue) {
595596

596597
push<PickleOpCode>(PickleOpCode::EMPTY_DICT);
597598

598-
if (dict.size() >= 0) {
599-
push<PickleOpCode>(PickleOpCode::MARK);
600-
601-
// Sort the dict for deterministic keys
602-
for (const auto& entry : dict) {
603-
pushIValue(entry.key());
604-
pushIValue(entry.value());
605-
}
599+
static_assert(
600+
std::is_unsigned<decltype(dict.size())>::value,
601+
"Expected size to be non-negative.");
602+
push<PickleOpCode>(PickleOpCode::MARK);
606603

607-
push<PickleOpCode>(PickleOpCode::SETITEMS);
604+
// Sort the dict for deterministic keys
605+
for (const auto& entry : dict) {
606+
pushIValue(entry.key());
607+
pushIValue(entry.value());
608608
}
609609

610+
push<PickleOpCode>(PickleOpCode::SETITEMS);
611+
610612
endTypeTag(ivalue);
611613
}
612614

torch/csrc/jit/tensorexpr/loopnest_randomization.cpp

-6
Original file line numberDiff line numberDiff line change
@@ -574,9 +574,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
574574

575575
case COMPRESS_BUFFER: {
576576
auto buffers = NodeFinder<Buf>::find(l.root_stmt());
577-
if (buffers.size() < 0) {
578-
break;
579-
}
580577
int buffer_n = std::rand() % (int)buffers.size();
581578
auto buffer = buffers[buffer_n];
582579

@@ -589,9 +586,6 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
589586

590587
case COMPRESS_ALL_BUFFERS: {
591588
auto buffers = BufFinder::find(l.root_stmt());
592-
if (buffers.size() < 0) {
593-
break;
594-
}
595589

596590
message = "compressAllBuffers(l.root_stmt());\n";
597591
randomization_helper::printHistory(n_transform, message);

0 commit comments

Comments
 (0)