Skip to content

Commit 4404762

Browse files
ezyangfacebook-github-bot
authored andcommitted
Rename IntList to IntArrayRef. (pytorch#16751)
Summary: Pull Request resolved: pytorch#16751 This was made more complicated by the fact that ivalue::IntList is a thing. So I had to fix all of the sites where we referring to IValue post facto. The following codemods were run, in this order: ``` codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntList IntArrayRef codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in IntArrayRef::create IntList::create codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in ivalue::IntArrayRef ivalue::IntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in Tag::IntArrayRef Tag::IntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in isIntArrayRef isIntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in toIntArrayRef toIntList codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'Shared<IntArrayRef>' 'Shared<IntList>' codemod -m -d . --extensions cc,cpp,cu,cuh,h,hpp,py,cwrap,yaml,in 'intrusive_ptr<IntArrayRef>' 'intrusive_ptr<IntList>' ``` Some manual fixups were done afterwards; they can be reviewed separately at pytorch#16752 Reviewed By: dzhulgakov Differential Revision: D13954363 fbshipit-source-id: b5c40aacba042402155a2f5a229fa6db7992ac64
1 parent e2d3a3f commit 4404762

File tree

169 files changed

+1195
-1190
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

169 files changed

+1195
-1190
lines changed

aten/src/ATen/CPUApplyUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ inline std::pair<int64_t, int64_t> collapse_dims(
127127
*/
128128

129129
inline Tensor sort_strides(Tensor& tensor_) {
130-
IntList strides = tensor_.strides();
130+
IntArrayRef strides = tensor_.strides();
131131
std::vector<int64_t> indices;
132132
indices.reserve(tensor_.ndimension());
133133
for (int64_t i = 0; i < tensor_.ndimension(); i++) {

aten/src/ATen/DLConvertor.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,8 @@ Tensor fromDLPack(const DLManagedTensor* src) {
172172
src->deleter(const_cast<DLManagedTensor*>(src));
173173
};
174174
return at::from_blob(src->dl_tensor.data,
175-
IntList(src->dl_tensor.shape, src->dl_tensor.ndim),
176-
IntList(src->dl_tensor.strides, src->dl_tensor.ndim),
175+
IntArrayRef(src->dl_tensor.shape, src->dl_tensor.ndim),
176+
IntArrayRef(src->dl_tensor.strides, src->dl_tensor.ndim),
177177
deleter,
178178
at::device(device_type).dtype(stype));
179179
}

aten/src/ATen/Declarations.cwrap

+3-3
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@
2929
- THTensor* self
3030
- THStorage* source
3131
- long storage_offset
32-
- IntListSize size
33-
- arg: IntList stride
32+
- IntArrayRefSize size
33+
- arg: IntArrayRef stride
3434
default: {}
3535
]]
3636
[[
@@ -131,7 +131,7 @@
131131
return: THTensor*
132132
arguments:
133133
- THTensor* self
134-
- arg: IntListSize size
134+
- arg: IntArrayRefSize size
135135
long_args: True
136136
]]
137137
[[

aten/src/ATen/ExpandUtils.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
namespace at {
44

5-
std::vector<int64_t> infer_size(IntList a, IntList b) {
5+
std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b) {
66
auto dimsA = a.size();
77
auto dimsB = b.size();
88
ptrdiff_t ndim = dimsA > dimsB ? dimsA : dimsB;
@@ -29,9 +29,9 @@ std::vector<int64_t> infer_size(IntList a, IntList b) {
2929
}
3030

3131
std::tuple<std::vector<int64_t>, std::vector<int64_t>> inferExpandGeometry(
32-
IntList tensor_sizes,
33-
IntList tensor_strides,
34-
IntList sizes) {
32+
IntArrayRef tensor_sizes,
33+
IntArrayRef tensor_strides,
34+
IntArrayRef sizes) {
3535
int64_t ndim = sizes.size();
3636
int64_t tensor_dim = tensor_sizes.size();
3737

aten/src/ATen/ExpandUtils.h

+9-9
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@
99

1010
namespace at {
1111

12-
CAFFE2_API std::vector<int64_t> infer_size(IntList a, IntList b);
12+
CAFFE2_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
1313
CAFFE2_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
1414
inferExpandGeometry(
15-
IntList tensor_sizes,
16-
IntList tensor_strides,
17-
IntList sizes);
15+
IntArrayRef tensor_sizes,
16+
IntArrayRef tensor_strides,
17+
IntArrayRef sizes);
1818

1919
// avoid copy-construction of Tensor by using a reference_wrapper.
2020
inline void check_defined(std::initializer_list<std::reference_wrapper<const Tensor>> tensors, const char *api_name) {
@@ -93,15 +93,15 @@ inline std::tuple<Tensor, Tensor, Tensor> expand_outplace(const Tensor &to_expan
9393
return expand_outplace(to_expand1, to_expand2, to_expand3);
9494
}
9595

96-
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntList sizes) {
96+
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntArrayRef sizes) {
9797
if(to_expand.sizes().equals(sizes)) {
9898
return std::make_tuple(to_expand);
9999
}
100100

101101
return std::make_tuple(to_expand.expand(sizes, /*implicit=*/true)); // see [expand implicit]
102102
}
103103

104-
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntList sizes, const char *api_name) {
104+
inline std::tuple<Tensor> expand_size(const Tensor &to_expand, IntArrayRef sizes, const char *api_name) {
105105
check_defined({to_expand}, api_name);
106106
return expand_size(to_expand, sizes);
107107
}
@@ -136,12 +136,12 @@ inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
136136

137137
// Sums `tensor` repeatedly to produce a tensor of shape `shape`.
138138
// Precondition: is_expandable_to(shape, tensor.sizes()) must be true
139-
static inline Tensor sum_to(Tensor tensor, const IntList shape) {
139+
static inline Tensor sum_to(Tensor tensor, const IntArrayRef shape) {
140140
if (shape.size() == 0) {
141141
return tensor.sum();
142142
}
143143
c10::SmallVector<int64_t, 8> reduce_dims;
144-
const at::IntList sizes = tensor.sizes();
144+
const at::IntArrayRef sizes = tensor.sizes();
145145
const int64_t leading_dims = sizes.size() - shape.size();
146146
for (int64_t i = 0; i < leading_dims; ++i) {
147147
reduce_dims.push_back(i);
@@ -158,7 +158,7 @@ static inline Tensor sum_to(Tensor tensor, const IntList shape) {
158158
}
159159

160160
// True if `shape` can be broadcasted to `desired`
161-
static inline bool is_expandable_to(IntList shape, IntList desired) {
161+
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
162162
int ndim = shape.size();
163163
int target_dim = desired.size();
164164
if (ndim > target_dim) {

aten/src/ATen/InferSize.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace at {
99

1010
// Infers the size of a dim with size -1, if it exists. Also checks that new
1111
// shape is compatible with the number of elements.
12-
static std::vector<int64_t> infer_size(IntList shape, int64_t numel) {
12+
static std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
1313
auto res = shape.vec();
1414
int64_t newsize = 1;
1515
auto infer_dim = c10::optional<int64_t>();

aten/src/ATen/SparseTensorImpl.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ SparseTensorImpl::SparseTensorImpl(at::TensorTypeId type_id, const caffe2::TypeM
3737
, indices_(at::empty({1, 0}, at::initialTensorOptions().device(sparseTensorIdToDeviceType(type_id)).dtype(ScalarType::Long)))
3838
, values_(at::empty({0}, at::initialTensorOptions().device(sparseTensorIdToDeviceType(type_id)).dtype(data_type))) {}
3939

40-
IntList SparseTensorImpl::sizes() const {
40+
IntArrayRef SparseTensorImpl::sizes() const {
4141
return sizes_;
4242
}
43-
IntList SparseTensorImpl::strides() const {
43+
IntArrayRef SparseTensorImpl::strides() const {
4444
AT_ERROR("sparse tensors do not have strides");
4545
}
4646
bool SparseTensorImpl::is_contiguous() const {
@@ -98,7 +98,7 @@ void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, cons
9898
auto dense_size_original = sizes().slice(sparse_dim_);
9999
std::vector<int64_t> expected_values_size_vec = {values.size(0)};
100100
expected_values_size_vec.insert(expected_values_size_vec.end(), dense_size_original.begin(), dense_size_original.end());
101-
IntList expected_values_size(expected_values_size_vec);
101+
IntArrayRef expected_values_size(expected_values_size_vec);
102102
auto new_values_size = values.sizes();
103103
AT_CHECK(
104104
std::equal(expected_values_size.begin(), expected_values_size.end(), new_values_size.begin()),

aten/src/ATen/SparseTensorImpl.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
4040
Tensor indices() const { return indices_; }
4141
Tensor values() const { return values_; }
4242

43-
IntList sizes() const override;
44-
IntList strides() const override;
43+
IntArrayRef sizes() const override;
44+
IntArrayRef strides() const override;
4545
bool is_contiguous() const override;
4646
int64_t stride(int64_t d) const override;
4747
void resize_dim(int64_t ndim) override;
@@ -56,7 +56,7 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
5656

5757
// WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim with
5858
// respect to indices and values
59-
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
59+
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
6060
AT_CHECK(allow_tensor_metadata_change(), "raw_resize_ is not allowed on Tensor created from .data or .detach()");
6161
sizes_ = size.vec();
6262
sparse_dim_ = sparse_dim;
@@ -86,7 +86,7 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
8686
// and for API consistency we don't support it).
8787
// 4. When we attempt to shrink the size of any of the sparse dimensions on a non-empty sparse tensor
8888
// (this could make some of the stored indices out-of-bound and thus unsafe).
89-
void resize_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
89+
void resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
9090
AT_CHECK(allow_tensor_metadata_change(), "resize_ is not allowed on Tensor created from .data or .detach()");
9191
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
9292
if (nnz() > 0) {
@@ -144,7 +144,7 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
144144
}
145145

146146
// NOTE: this function will resize the sparse tensor and also set `indices` and `values` to empty.
147-
void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntList size) {
147+
void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
148148
AT_CHECK(allow_tensor_metadata_change(), "resize_and_clear_ is not allowed on Tensor created from .data or .detach()");
149149
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
150150

aten/src/ATen/SparseTensorUtils.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) {
6666
// the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`.
6767
// if forceClone is true, the result will forced to be a clone of self.
6868
// if force_clone is true, the result will forced to be a clone of self.
69-
inline LongTensor flatten_indices(const Tensor& indices, IntList full_size, bool force_clone = false) {
69+
inline LongTensor flatten_indices(const Tensor& indices, IntArrayRef full_size, bool force_clone = false) {
7070
int64_t sparse_dim = indices.size(0);
7171
if (sparse_dim == 1) {
7272
if (force_clone) {
@@ -113,7 +113,7 @@ inline LongTensor flatten_indices(const Tensor& indices, IntList full_size, bool
113113
// Ex2:
114114
// dims_to_flatten = [1]
115115
// new_indices = [ 3, 1, 3 ] # uncoalesced
116-
inline LongTensor flatten_indices_by_dims(const LongTensor& indices, const IntList& sizes, const IntList& dims_to_flatten){
116+
inline LongTensor flatten_indices_by_dims(const LongTensor& indices, const IntArrayRef& sizes, const IntArrayRef& dims_to_flatten){
117117
LongTensor new_indices = at::zeros({indices.size(1)}, indices.options());
118118
for (auto d : dims_to_flatten) {
119119
new_indices.mul_(sizes[d]);

aten/src/ATen/TensorGeometry.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace at {
88
struct CAFFE2_API TensorGeometry {
99
TensorGeometry() : storage_offset_(0) {}
1010

11-
explicit TensorGeometry(IntList sizes)
11+
explicit TensorGeometry(IntArrayRef sizes)
1212
: sizes_(sizes.vec())
1313
, strides_(sizes.size())
1414
, storage_offset_(0) {
@@ -35,12 +35,12 @@ struct CAFFE2_API TensorGeometry {
3535
dim = maybe_wrap_dim(dim, this->dim());
3636
return sizes_.at(static_cast<size_t>(dim));
3737
}
38-
IntList sizes() const { return IntList{ sizes_ }; }
38+
IntArrayRef sizes() const { return IntArrayRef{ sizes_ }; }
3939
int64_t stride(int64_t dim) const {
4040
dim = maybe_wrap_dim(dim, this->dim());
4141
return strides_.at(static_cast<size_t>(dim));
4242
}
43-
IntList strides() const { return IntList{ strides_ }; }
43+
IntArrayRef strides() const { return IntArrayRef{ strides_ }; }
4444
int64_t storage_offset() const { return storage_offset_; }
4545
int64_t numel() const { return numel_; }
4646

aten/src/ATen/TensorUtils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {
4747
}
4848
}
4949

50-
void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntList sizes) {
50+
void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntArrayRef sizes) {
5151
checkDim(c, t, sizes.size());
5252
AT_CHECK(
5353
t->sizes().equals(sizes),
@@ -217,7 +217,7 @@ void * maybe_data_ptr(const TensorArg& tensor) {
217217
}
218218

219219
// See TensorUtils.h on why this is useful now that we cache is_contiguous.
220-
bool geometry_is_contiguous(IntList sizes, IntList strides) {
220+
bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides) {
221221
int64_t dim = sizes.size();
222222
int64_t expected_stride = 1;
223223
bool contig_if_nonempty = true;

aten/src/ATen/TensorUtils.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ CAFFE2_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts);
6969
CAFFE2_API void checkSize(
7070
CheckedFrom c,
7171
const TensorGeometryArg& t,
72-
IntList sizes);
72+
IntArrayRef sizes);
7373
CAFFE2_API void checkSize(
7474
CheckedFrom c,
7575
const TensorGeometryArg& t,
@@ -124,5 +124,5 @@ CAFFE2_API void* maybe_data_ptr(const TensorArg& tensor);
124124
// allows checking if a particular geometry is contiguous without explicitly
125125
// constructing a tensor, e.g., when you want to choose a kernel strategy based
126126
// on whether a subgeometry is contiguous.
127-
CAFFE2_API bool geometry_is_contiguous(IntList sizes, IntList strides);
127+
CAFFE2_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides);
128128
}

aten/src/ATen/WrapDimUtilsMulti.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ namespace at {
1212

1313
constexpr size_t dim_bitset_size = 64;
1414

15-
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(IntList dims, int64_t ndims) {
15+
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(IntArrayRef dims, int64_t ndims) {
1616
AT_CHECK(ndims <= (int64_t) dim_bitset_size, "only tensors with up to ", dim_bitset_size, " dims are supported");
1717
std::bitset<dim_bitset_size> seen;
1818
for (size_t i = 0; i < dims.size(); i++) {

0 commit comments

Comments
 (0)