Skip to content

Commit 7bb36ad

Browse files
Hector Yuenfacebook-github-bot
Hector Yuen
authored andcommitted
fix -Wsign-compare warnings for some files inside c2 (pytorch#18123)
Summary: Pull Request resolved: pytorch#18123 the motivation of this fix is to resolve things like: for(auto i = 0; i < N; i++) where N is bigger than int32 These instances of comparison were found by enabling -Wsign-compare There are way too many things to fix, so issuing this as a series of fixes The plan is to fix all these issues and then enable this flag into Caffe2 to catch future instances Reviewed By: ZolotukhinM Differential Revision: D14497094 fbshipit-source-id: bca3927a2188bd33a508fa503ba221c220cdaefe
1 parent 1c76746 commit 7bb36ad

16 files changed

+33
-33
lines changed

caffe2/core/blob_serialization.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ inline void CopyToProtoAsIs(
162162
"The source type and dest type cannot be copied as-is. Did "
163163
"you mean CopyToProtoWithCast?");
164164
field->Reserve(size);
165-
for (int i = 0; i < size; ++i) {
165+
for (size_t i = 0; i < size; ++i) {
166166
field->Add(0);
167167
}
168168
context->template CopyToCPU<SrcType>(
@@ -183,7 +183,7 @@ inline void CopyToProtoWithCast(
183183
context->template CopyToCPU<SrcType>(size, src, buffer.get());
184184
context->FinishDeviceComputation();
185185
field->Reserve(size);
186-
for (int i = 0; i < size; ++i) {
186+
for (size_t i = 0; i < size; ++i) {
187187
field->Add(static_cast<DstType>(buffer[i]));
188188
}
189189
}
@@ -214,7 +214,7 @@ inline void CopyFromProtoWithCast(
214214
// CPUContext. Remove it if it is performance critical.
215215
unique_ptr<DstType[]> buffer(new DstType[size]);
216216
const SrcType* src = field.data();
217-
for (int i = 0; i < size; ++i) {
217+
for (size_t i = 0; i < size; ++i) {
218218
buffer[i] = static_cast<DstType>(src[i]);
219219
}
220220
context->template CopyFromCPU<DstType>(size, buffer.get(), dst);

caffe2/ideep/utils/ideep_context.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class IDEEPContext final : public BaseContext {
8383
static_cast<const void*>(src),
8484
static_cast<void*>(dst));
8585
} else {
86-
for (int i = 0; i < n; ++i) {
86+
for (size_t i = 0; i < n; ++i) {
8787
dst[i] = src[i];
8888
}
8989
}

caffe2/operators/conv_op_impl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -738,7 +738,7 @@ bool ConvGradientOp<T, Context>::RunOnDeviceWithOrderNHWC() {
738738
CAFFE_ENFORCE_EQ(C, filter.dim32(filter.dim() - 1) * group_);
739739

740740
int kernel_dims_size = 1;
741-
for (int i = 0; i < kernel_.size(); ++i) {
741+
for (size_t i = 0; i < kernel_.size(); ++i) {
742742
CAFFE_ENFORCE_EQ(filter.dim32(i + 1), kernel_[i]);
743743
kernel_dims_size *= kernel_[i];
744744
}

caffe2/operators/conv_pool_op_base.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ class ConvPoolOpBase : public Operator<Context> {
138138
}
139139

140140
if (global_pooling_) {
141-
for (int dim = 0; dim < kernel_.size(); ++dim) {
141+
for (size_t dim = 0; dim < kernel_.size(); ++dim) {
142142
CAFFE_ENFORCE(
143143
pads_[2 * dim] == 0 && pads_[2 * dim + 1] == 0 &&
144144
dilation_[dim] == 1 && stride_[dim] == 1,
@@ -151,7 +151,7 @@ class ConvPoolOpBase : public Operator<Context> {
151151
// need to clean this up.
152152
if (operator_def.name().find("Conv") == 0 ||
153153
operator_def.name().find("Pool") != std::string::npos) {
154-
for (int dim = 0; dim < kernel_.size(); ++dim) {
154+
for (size_t dim = 0; dim < kernel_.size(); ++dim) {
155155
CAFFE_ENFORCE_GE(pads_[dim], 0);
156156
CAFFE_ENFORCE_GE(pads_[kernel_.size() + dim], 0);
157157
CAFFE_ENFORCE(
@@ -161,7 +161,7 @@ class ConvPoolOpBase : public Operator<Context> {
161161
}
162162
}
163163

164-
for (int dim = 0; dim < kernel_.size(); ++dim) {
164+
for (size_t dim = 0; dim < kernel_.size(); ++dim) {
165165
CAFFE_ENFORCE_GE(kernel_[dim], 0);
166166
CAFFE_ENFORCE_GE(dilation_[dim], 0);
167167
CAFFE_ENFORCE_GE(stride_[dim], 0);
@@ -408,7 +408,7 @@ class ConvPoolOpBase : public Operator<Context> {
408408

409409
bool RunOnDevice() override {
410410
if (!global_pooling_) {
411-
for (int dim = 0; dim < kernel_.size(); ++dim) {
411+
for (size_t dim = 0; dim < kernel_.size(); ++dim) {
412412
CAFFE_ENFORCE_GT(kernel_[dim], 0);
413413
}
414414
}

caffe2/operators/ctc_greedy_decoder_op.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ bool CTCGreedyDecoderOp<CPUContext>::RunOnDevice() {
5757
auto* values =
5858
Output(VALUES, vector<int64_t>{values_cach_size}, at::dtype<int>());
5959
int* values_data = values->mutable_data<int>();
60-
for (int i = 0; i < values_cach.size(); ++i) {
60+
for (size_t i = 0; i < values_cach.size(); ++i) {
6161
values_data[i] = values_cach.at(i);
6262
}
6363
values_cach.clear();

caffe2/operators/dataset_ops.cc

+5-5
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ TreeIterator::TreeIterator(const std::vector<std::string>& fields) {
3030
// populate field vector and split field names
3131
fields_.resize(fields.size());
3232
std::vector<std::vector<std::string>> nameParts(fields_.size());
33-
for (int i = 0; i < fields.size(); ++i) {
33+
for (size_t i = 0; i < fields.size(); ++i) {
3434
auto& field = fields_.at(i);
3535
field.name = fields[i];
3636
field.id = i;
@@ -49,7 +49,7 @@ TreeIterator::TreeIterator(const std::vector<std::string>& fields) {
4949
// find length-field with maximum prefix matching for each field
5050
for (auto& field : fields_) {
5151
// by default, we are matching against the root domain
52-
int maxMatchLevel = 1;
52+
size_t maxMatchLevel = 1;
5353
int maxMatchLengthFieldId = -1;
5454
for (int j = 0; j < numLengthFields(); ++j) {
5555
const auto& lenField = lengthField(j);
@@ -260,12 +260,12 @@ class CheckDatasetConsistencyOp : public Operator<CPUContext> {
260260
sizes.resize(iterator_.numOffsetFields());
261261
// gather length data
262262
lengths.resize(iterator_.numLengthFields());
263-
for (int i = 0; i < lengths.size(); ++i) {
263+
for (size_t i = 0; i < lengths.size(); ++i) {
264264
lengths[i] = Input(iterator_.lengthField(i).id).data<TLength>();
265265
}
266266
// gather size limits
267267
limits.assign(sizes.size(), std::numeric_limits<TOffset>::max());
268-
for (int i = 0; i < iterator_.fields().size(); ++i) {
268+
for (size_t i = 0; i < iterator_.fields().size(); ++i) {
269269
int lengthIdx = iterator_.fields()[i].lengthFieldId + 1;
270270
CAFFE_ENFORCE_GT(Input(i).dim(), 0);
271271
TOffset size = (TOffset)Input(i).sizes()[0];
@@ -290,7 +290,7 @@ class CheckDatasetConsistencyOp : public Operator<CPUContext> {
290290
// advance to the end
291291
offsets.assign(sizes.size(), 0);
292292
iterator_.advance(lengths, offsets, sizes, limits, limits[0]);
293-
for (int i = 0; i < limits.size(); ++i) {
293+
for (size_t i = 0; i < limits.size(); ++i) {
294294
CAFFE_ENFORCE(limits[i] == offsets[i]);
295295
}
296296
return true;

caffe2/operators/expand_squeeze_dims_op.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ class SqueezeOp : public Operator<Context> {
8989
static std::vector<int> ComputeDims(
9090
at::IntArrayRef inputDims,
9191
std::vector<int> dims) {
92-
int j = 0;
92+
size_t j = 0;
9393
std::vector<int> newDims;
94-
for (int i = 0; i < inputDims.size(); ++i) {
94+
for (size_t i = 0; i < inputDims.size(); ++i) {
9595
if (j < dims.size() && dims[j] == i) {
9696
CAFFE_ENFORCE_EQ(
9797
inputDims[i],

caffe2/operators/experimental/c10/cpu/concat_cpu.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ void concat_op_cpu_impl(
2727
int adj_size = Tensor(inputs[0]).dim() + (add_axis ? 1 : 0);
2828
int canonical_axis = caffe2::canonical_axis_index_(axis, adj_size);
2929
CAFFE_ENFORCE_LT(canonical_axis, adj_size, "Axis not in input ndim range.");
30-
for (int i = 1; i < inputs.size(); ++i) {
30+
for (size_t i = 1; i < inputs.size(); ++i) {
3131
CAFFE_ENFORCE(
3232
Tensor(inputs[i]).dtype() == Tensor(inputs[0]).dtype(),
3333
"All inputs must have the same type, expected: ",
@@ -51,7 +51,7 @@ void concat_op_cpu_impl(
5151
after *= dim;
5252
}
5353
// check the input dims are compatible.
54-
for (int j = 1; j < inputs.size(); ++j) {
54+
for (size_t j = 1; j < inputs.size(); ++j) {
5555
int dim_j = Tensor(inputs[j]).dim32(i);
5656
CAFFE_ENFORCE(
5757
dim == dim_j,
@@ -75,7 +75,7 @@ void concat_op_cpu_impl(
7575
}
7676

7777
int output_channels = 0;
78-
for (int i = 0; i < inputs.size(); ++i) {
78+
for (size_t i = 0; i < inputs.size(); ++i) {
7979
axis_data[i] = add_axis ? 1 : Tensor(inputs[i]).dim32(canonical_axis);
8080
output_channels += axis_data[i];
8181
}
@@ -86,7 +86,7 @@ void concat_op_cpu_impl(
8686
}
8787
output.Resize(output_dims);
8888
size_t output_offset = 0;
89-
for (int i = 0; i < inputs.size(); ++i) {
89+
for (size_t i = 0; i < inputs.size(); ++i) {
9090
Tensor input(inputs[i]);
9191
auto axis_dim = add_axis ? 1 : input.dim32(canonical_axis);
9292
caffe2::math::CopyMatrix<Context>(

caffe2/operators/find_duplicate_elements_op.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ class FindDuplicateElementsOp final : public Operator<Context> {
4343

4444
auto* output = Output(0, {static_cast<int64_t>(dupSize)}, at::dtype<int64_t>());
4545
auto* out_ptr = output->template mutable_data<int64_t>();
46-
for (int64_t i = 0; i < dupSize; ++i) {
46+
for (size_t i = 0; i < dupSize; ++i) {
4747
out_ptr[i] = dupIndices[i];
4848
}
4949

caffe2/operators/onnxifi_op.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ class OnnxifiOp final : public Operator<Context> {
5656
if (!output_shape_hint.empty()) {
5757
TensorInfo info;
5858
info.onnxifi_type = output_shape_hint.front();
59-
for (int i = 1; i < output_shape_hint.size(); ++i) {
59+
for (size_t i = 1; i < output_shape_hint.size(); ++i) {
6060
info.dims.push_back(output_shape_hint[i]);
6161
}
6262
output_shape_hints_.emplace(output_idx, std::move(info));

caffe2/operators/reshape_op.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class ReshapeOp : public Operator<Context> {
5959
}
6060

6161
// Copy over the dimensions for those that are specified zero.
62-
for (int i = 0; i < actual_new_shape.size() && i < input.dim(); ++i) {
62+
for (size_t i = 0; i < actual_new_shape.size() && i < input.dim(); ++i) {
6363
if (actual_new_shape[i] == 0) {
6464
actual_new_shape[i] = input.size(i);
6565
}

caffe2/operators/slice_op.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ bool SliceImpl(
129129

130130
char* src_offset_bytes = src_bytes + itemsize * src_offset;
131131
char* dst_offset_bytes = dst_bytes;
132-
for (int i = 0; i < num_blocks; ++i) {
132+
for (size_t i = 0; i < num_blocks; ++i) {
133133
char* local_src_offset_bytes =
134134
src_offset_bytes + i * src_block_size_bytes;
135135
char* local_dst_offset_bytes =
@@ -175,7 +175,7 @@ bool SliceImpl(
175175
return true;
176176
}
177177

178-
for (int i = 0; i < num_blocks; ++i) {
178+
for (size_t i = 0; i < num_blocks; ++i) {
179179
char* local_src_offset_bytes =
180180
src_offset_bytes + i * src_block_size_bytes;
181181
char* local_dst_offset_bytes =

caffe2/operators/utility_ops.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1107,7 +1107,7 @@ class GatherRangesOp : public Operator<Context> {
11071107
template <typename Index>
11081108
size_t accumulate(Index* ranges, size_t start, size_t end) {
11091109
size_t result = 0;
1110-
for (int i = start + 1; i < end; i += 2) {
1110+
for (size_t i = start + 1; i < end; i += 2) {
11111111
result += ranges[i];
11121112
}
11131113
return result;

caffe2/queue/blobs_queue.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ BlobsQueue::BlobsQueue(
3636
stats_.queue_dequeued_bytes.setDetails(fieldNames);
3737
}
3838
queue_.reserve(capacity);
39-
for (auto i = 0; i < capacity; ++i) {
39+
for (size_t i = 0; i < capacity; ++i) {
4040
std::vector<Blob*> blobs;
4141
blobs.reserve(numBlobs);
42-
for (auto j = 0; j < numBlobs; ++j) {
42+
for (size_t j = 0; j < numBlobs; ++j) {
4343
const auto blobName = queueName + "_" + to_string(i) + "_" + to_string(j);
4444
if (enforceUniqueName) {
4545
CAFFE_ENFORCE(

caffe2/queue/rebatching_queue.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,20 @@ void concat(
1919
// Precompute the output sizes to avoid resizing
2020
std::vector<std::vector<int64_t>> outputDims(numTensors);
2121

22-
for (int i = 0; i < numTensors; ++i) {
22+
for (size_t i = 0; i < numTensors; ++i) {
2323
SmartTensorPrinter::PrintTensor(inputZero.at(i));
2424
outputDims[i] = inputZero.at(i).sizes().vec();
2525
outputDims[i].insert(outputDims[i].begin(), numRows);
2626
}
2727

2828
// Resize to the final output size
2929
std::vector<void*> destinations(numTensors);
30-
for (int i = 0; i < numTensors; ++i) {
30+
for (size_t i = 0; i < numTensors; ++i) {
3131
outputs[i]->Resize(outputDims[i]);
3232
destinations[i] = outputs[i]->raw_mutable_data(inputZero[i].meta());
3333
}
3434

35-
for (int i = 0; i < numRows; ++i) {
35+
for (size_t i = 0; i < numRows; ++i) {
3636
CAFFE_ENFORCE_EQ(inputs[i].size(), numTensors);
3737

3838
for (int j = 0; j < numTensors; ++j) {

caffe2/transforms/pattern_net_transform.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class CAFFE2_API PatternNetTransform : public Transform {
2828
"External outputs do not match!");
2929
ordered_ops_ = GetPatternTraversalOrder(p_);
3030
inverse_ops_.resize(ordered_ops_.size());
31-
for (int i = 0; i < ordered_ops_.size(); i++) {
31+
for (size_t i = 0; i < ordered_ops_.size(); i++) {
3232
inverse_ops_[ordered_ops_[i]] = i;
3333
}
3434
}

0 commit comments

Comments
 (0)