Skip to content

Commit 7aae51c

Browse files
Roy Lifacebook-github-bot
Roy Li
authored andcommitted
Replace tensor.type().scalarType() calls with tensor.scalar_type()
Summary: Pull Request resolved: pytorch#17515 Reviewed By: ezyang Differential Revision: D14233250 fbshipit-source-id: 6c7af8d2291c0c2b148001b30cf03834f34366c0
1 parent efed875 commit 7aae51c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+215
-215
lines changed

aten/src/ATen/SparseTensorImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, cons
8989
AT_CHECK(!values.is_sparse(), "expected values to be a dense tensor, but got values of layout ", values.layout());
9090

9191
AT_CHECK(values.type().toSparse() == legacyTensorType(*this), "values type must match sparse tensor type");
92-
AT_CHECK(indices.type().scalarType() == kLong, "indices must be an int64 tensor");
92+
AT_CHECK(indices.scalar_type() == kLong, "indices must be an int64 tensor");
9393
AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")");
9494
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")");
9595

aten/src/ATen/TensorUtils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -142,15 +142,15 @@ void checkSameType(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
142142

143143
void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType ty) {
144144
AT_CHECK(
145-
t->type().scalarType() == ty,
145+
t->scalar_type() == ty,
146146
"Expected tensor for ", t, " to have scalar type ", toString(ty),
147147
"; but got ", t->toString(), " instead (while checking arguments for ", c,
148148
")");
149149
}
150150

151151
void checkScalarTypes(CheckedFrom c, const TensorArg& t,
152152
at::ArrayRef<ScalarType> l) {
153-
if (std::find(l.begin(), l.end(), t->type().scalarType()) == l.end()) {
153+
if (std::find(l.begin(), l.end(), t->scalar_type()) == l.end()) {
154154
std::ostringstream oss;
155155
oss << "Expected tensor for " << t << " to have one of the following "
156156
<< "scalar types: ";

aten/src/ATen/core/TensorMethods.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1342,11 +1342,11 @@ inline bool is_sparse(Tensor self) {
13421342
template <> \
13431343
inline T* Tensor::data() const { \
13441344
AT_CHECK( \
1345-
type().scalarType() == ScalarType::name, \
1345+
scalar_type() == ScalarType::name, \
13461346
"expected scalar type ", \
13471347
#name, \
13481348
" but found ", \
1349-
c10::toString(type().scalarType())); \
1349+
c10::toString(scalar_type())); \
13501350
return static_cast<T*>(this->data_ptr()); \
13511351
}
13521352

aten/src/ATen/core/jit_type.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ struct CAFFE2_API DimensionedTensorType : public TensorType {
368368

369369
protected:
370370
DimensionedTensorType(const at::Tensor& tensor, TypeKind kind=TypeKind::DimensionedTensorType)
371-
: DimensionedTensorType(tensor.type().scalarType(),
371+
: DimensionedTensorType(tensor.scalar_type(),
372372
tensor.device(),
373373
tensor.dim(),
374374
tensor.is_variable() && tensor.requires_grad(),

aten/src/ATen/cudnn/Types.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,15 @@
55
namespace at { namespace native {
66

77
cudnnDataType_t getCudnnDataType(const at::Tensor& tensor) {
8-
if (tensor.type().scalarType() == at::kFloat) {
8+
if (tensor.scalar_type() == at::kFloat) {
99
return CUDNN_DATA_FLOAT;
10-
} else if (tensor.type().scalarType() == at::kDouble) {
10+
} else if (tensor.scalar_type() == at::kDouble) {
1111
return CUDNN_DATA_DOUBLE;
12-
} else if (tensor.type().scalarType() == at::kHalf) {
12+
} else if (tensor.scalar_type() == at::kHalf) {
1313
return CUDNN_DATA_HALF;
1414
}
1515
std::string msg("getCudnnDataType() not supported for ");
16-
msg += toString(tensor.type().scalarType());
16+
msg += toString(tensor.scalar_type());
1717
throw std::runtime_error(msg);
1818
}
1919

aten/src/ATen/miopen/Types.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@
66
namespace at { namespace native {
77

88
miopenDataType_t getMiopenDataType(const at::Tensor& tensor) {
9-
if (tensor.type().scalarType() == at::kFloat) {
9+
if (tensor.scalar_type() == at::kFloat) {
1010
return miopenFloat;
11-
} else if (tensor.type().scalarType() == at::kHalf) {
11+
} else if (tensor.scalar_type() == at::kHalf) {
1212
return miopenHalf;
1313
}
1414
std::string msg("getMiopenDataType() not supported for ");
15-
msg += toString(tensor.type().scalarType());
15+
msg += toString(tensor.scalar_type());
1616
throw std::runtime_error(msg);
1717
}
1818

aten/src/ATen/native/Convolution.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ auto ConvParams::use_cudnn(const at::Tensor& input) const -> bool {
128128

129129
auto ConvParams::use_miopen(const at::Tensor& input) const -> bool {
130130

131-
return ((input.type().scalarType() == at::kFloat) || (input.type().scalarType() == at::kHalf))
131+
return ((input.scalar_type() == at::kFloat) || (input.scalar_type() == at::kHalf))
132132
&& detail::getCUDAHooks().compiledWithMIOpen()
133133
&& input.is_cuda()
134134
&& input.dim() <= MIOPEN_DIM_MAX
@@ -140,7 +140,7 @@ auto ConvParams::use_miopen(const at::Tensor& input) const -> bool {
140140
auto ConvParams::use_mkldnn(const at::Tensor& input) const -> bool {
141141
#if AT_MKLDNN_ENABLED()
142142
return input.type().backend() == at::Backend::CPU &&
143-
input.type().scalarType() == kFloat && // only on CPU Float Tensors
143+
input.scalar_type() == kFloat && // only on CPU Float Tensors
144144
!is_dilated() && // doesn't support dilation
145145
!transposed && // or transposed tensors
146146
input.ndimension() == 4; // must be in NCHW format
@@ -151,7 +151,7 @@ auto ConvParams::use_nnpack(const at::Tensor& input) const -> bool {
151151
#if AT_NNPACK_ENABLED()
152152
return at::_nnpack_available() &&
153153
input.type().backend() == at::Backend::CPU &&
154-
input.type().scalarType() == kFloat && // only on CPU Float Tensors
154+
input.scalar_type() == kFloat && // only on CPU Float Tensors
155155
!is_strided() && // doesn't support strides
156156
!is_dilated() && // or dilation
157157
!transposed && // or transposed tensors

aten/src/ATen/native/Distance.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -18,18 +18,18 @@ Tensor pairwise_distance(const Tensor& x1, const Tensor& x2, double p, double ep
1818
Tensor pdist(const Tensor& self, const double p) {
1919
AT_CHECK(self.dim() == 2,
2020
"pdist only supports 2D tensors, got: ", self.dim(), "D");
21-
AT_CHECK(at::isFloatingType(self.type().scalarType()), "pdist only supports floating-point dtypes");
21+
AT_CHECK(at::isFloatingType(self.scalar_type()), "pdist only supports floating-point dtypes");
2222
AT_CHECK(p >= 0, "pdist only supports non-negative p values");
2323
return at::_pdist_forward(self.contiguous(), p);
2424
}
2525

2626
Tensor cdist(const Tensor& x1, const Tensor& x2, const double p) {
2727
AT_CHECK(x1.dim() == 2, "cdist only supports 2D tensors, X1 got: ", x1.dim(), "D");
28-
AT_CHECK(at::isFloatingType(x1.type().scalarType()), "cdist only supports floating-point dtypes, X1 got: ", x1.type().scalarType());
28+
AT_CHECK(at::isFloatingType(x1.scalar_type()), "cdist only supports floating-point dtypes, X1 got: ", x1.scalar_type());
2929
auto device1 = x1.type().device_type();
3030
AT_CHECK(device1 == kCPU || device1 == kCUDA, "cdist only supports CPU and CUDA devices, X1 got: ", device1);
3131
AT_CHECK(x2.dim() == 2, "cdist only supports 2D tensors, X2 got: ", x2.dim(), "D");
32-
AT_CHECK(at::isFloatingType(x1.type().scalarType()), "cdist only supports floating-point dtypes, X2 got: ", x2.type().scalarType());
32+
AT_CHECK(at::isFloatingType(x1.scalar_type()), "cdist only supports floating-point dtypes, X2 got: ", x2.scalar_type());
3333
auto device2 = x2.type().device_type();
3434
AT_CHECK(device2 == kCPU || device2 == kCUDA, "cdist only supports CPU and CUDA devices, X2 got: ", device2);
3535
AT_CHECK(p >= 0, "cdist only supports non-negative p values");

aten/src/ATen/native/Indexing.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ static std::vector<Tensor> expandByteTensors(const Tensor & self, TensorList ind
9191
// Expands byte tensors (masks) into the equivalent indexing by LongTensors
9292
std::vector<Tensor> result;
9393
for (auto & index : indices) {
94-
if (index.type().scalarType() == kByte) {
94+
if (index.scalar_type() == kByte) {
9595
// The sizes of the ByteTensor mask must match the sizes of the
9696
// corresponding dimensions in self
9797
for (int64_t j = 0; j < index.dim(); j++) {
@@ -475,7 +475,7 @@ Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Ten
475475
if (source.dim() == 0 && numIndices != 1) {
476476
AT_INDEX_ERROR("index_copy_(): When source is scalar, index should have one element (got ", numIndices, ")");
477477
}
478-
if (index.type().scalarType() != ScalarType::Long) {
478+
if (index.scalar_type() != ScalarType::Long) {
479479
AT_INDEX_ERROR("index_copy_(): Expected LongTensor for index");
480480
}
481481

aten/src/ATen/native/LinearAlgebra.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static inline std::tuple<double, Tensor, int> _lu_det_P_diag_U_info(const Tensor
3535
}
3636

3737
Tensor det(const Tensor& self) {
38-
AT_CHECK(at::isFloatingType(self.type().scalarType()) &&
38+
AT_CHECK(at::isFloatingType(self.scalar_type()) &&
3939
self.dim() == 2 && self.size(0) == self.size(1),
4040
"det(", self.type(), "{", self.sizes(), "}): expected a 2D square tensor "
4141
"of floating types");
@@ -51,7 +51,7 @@ Tensor det(const Tensor& self) {
5151
}
5252

5353
Tensor logdet(const Tensor& self) {
54-
AT_CHECK(at::isFloatingType(self.type().scalarType()) &&
54+
AT_CHECK(at::isFloatingType(self.scalar_type()) &&
5555
self.dim() == 2 && self.size(0) == self.size(1),
5656
"logdet(", self.type(), "{", self.sizes(), "}): expected a 2D square tensor "
5757
"of floating types");
@@ -72,7 +72,7 @@ Tensor logdet(const Tensor& self) {
7272
}
7373

7474
std::tuple<Tensor, Tensor> slogdet(const Tensor& self) {
75-
AT_CHECK(at::isFloatingType(self.type().scalarType()) &&
75+
AT_CHECK(at::isFloatingType(self.scalar_type()) &&
7676
self.dim() == 2 && self.size(0) == self.size(1),
7777
"slogdet(", self.type(), "{", self.sizes(), "}): expected a 2D square tensor "
7878
"of floating types");
@@ -89,7 +89,7 @@ std::tuple<Tensor, Tensor> slogdet(const Tensor& self) {
8989
}
9090

9191
Tensor pinverse(const Tensor& self, double rcond) {
92-
AT_CHECK(at::isFloatingType(self.type().scalarType()) && self.dim() == 2,
92+
AT_CHECK(at::isFloatingType(self.scalar_type()) && self.dim() == 2,
9393
"pinverse(", self.type(), "{", self.sizes(), "}): expected a 2D tensor "
9494
"of floating types");
9595
if (self.numel() == 0) {
@@ -117,7 +117,7 @@ static inline Tensor _matrix_rank_helper(const Tensor& self, bool symmetric) {
117117
}
118118

119119
Tensor matrix_rank(const Tensor& self, double tol, bool symmetric) {
120-
AT_CHECK(at::isFloatingType(self.type().scalarType()) && self.dim() == 2,
120+
AT_CHECK(at::isFloatingType(self.scalar_type()) && self.dim() == 2,
121121
"matrix_rank(", self.type(), "{", self.sizes(), "}): expected a 2D tensor "
122122
"of floating types");
123123

@@ -126,12 +126,12 @@ Tensor matrix_rank(const Tensor& self, double tol, bool symmetric) {
126126
}
127127

128128
Tensor matrix_rank(const Tensor& self, bool symmetric) {
129-
AT_CHECK(at::isFloatingType(self.type().scalarType()) && self.dim() == 2,
129+
AT_CHECK(at::isFloatingType(self.scalar_type()) && self.dim() == 2,
130130
"matrix_rank(", self.type(), "{", self.sizes(), "}): expected a 2D tensor "
131131
"of floating types");
132132

133133
Tensor S = _matrix_rank_helper(self, symmetric);
134-
double tol = _get_epsilon(self.type().scalarType()) * std::max(self.size(0), self.size(1));
134+
double tol = _get_epsilon(self.scalar_type()) * std::max(self.size(0), self.size(1));
135135
return (S > S.max().mul_(tol)).sum();
136136
}
137137

@@ -484,7 +484,7 @@ Tensor& matmul_out(Tensor &result, const Tensor & tensor1, const Tensor & tensor
484484
}
485485

486486
Tensor matrix_power(const Tensor& a, int64_t n) {
487-
AT_CHECK(a.dim() >= 2 && at::isFloatingType(a.type().scalarType()),
487+
AT_CHECK(a.dim() >= 2 && at::isFloatingType(a.scalar_type()),
488488
"matrix_power(", a.type(), "{", a.sizes(), "}): expected a tensor "
489489
"of floating types with dim at least 2");
490490
if (n == 0) {
@@ -532,7 +532,7 @@ Tensor frobenius_norm(const Tensor& self, IntArrayRef dim, bool keepdim) {
532532
dim.size(),
533533
" dimensions instead.");
534534
if (dim.size() == 1) {
535-
return at::norm(self, 2, dim, keepdim, self.type().scalarType());
535+
return at::norm(self, 2, dim, keepdim, self.scalar_type());
536536
}
537537
return at::sqrt(at::sum(self * self, dim, keepdim));
538538
}
@@ -548,7 +548,7 @@ Tensor &frobenius_norm_out(
548548
dim.size(),
549549
" dimensions instead.");
550550
if (dim.size() == 1) {
551-
return at::norm_out(result, self, 2, dim, keepdim, self.type().scalarType());
551+
return at::norm_out(result, self, 2, dim, keepdim, self.scalar_type());
552552
}
553553
return at::sqrt_out(result, at::sum(self * self, dim, keepdim));
554554
}

aten/src/ATen/native/LossCTC.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_
308308
std::tuple<Tensor, Tensor> ctc_loss_cpu(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK, bool zero_infinity) {
309309
(void)zero_infinity; // only used for backwards
310310
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss", [&] {
311-
if (targets.type().scalarType() == kLong) {
311+
if (targets.scalar_type() == kLong) {
312312
return ctc_loss_cpu_template<scalar_t, kLong>(log_probs, targets, input_lengths, target_lengths, BLANK);
313313
} else {
314314
return ctc_loss_cpu_template<scalar_t, kInt>(log_probs, targets, input_lengths, target_lengths, BLANK);
@@ -319,7 +319,7 @@ std::tuple<Tensor, Tensor> ctc_loss_cpu(const Tensor& log_probs, const Tensor& t
319319
Tensor ctc_loss_backward_cpu(const Tensor& grad, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
320320
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
321321
return AT_DISPATCH_FLOATING_TYPES(log_probs.type(), "ctc_loss_backward", [&] {
322-
if (targets.type().scalarType() == kLong) {
322+
if (targets.scalar_type() == kLong) {
323323
return ctc_loss_backward_cpu_template<scalar_t,kLong>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
324324
} else {
325325
return ctc_loss_backward_cpu_template<scalar_t,kInt>(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, BLANK, zero_infinity);
@@ -338,8 +338,8 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef inpu
338338
(detail::getCUDAHooks().versionCuDNN() >= 7000) &&
339339
ctx.userEnabledCuDNN() &&
340340
(BLANK == 0) && (targets.dim()==1) &&
341-
(log_probs.type().scalarType() == at::kFloat) &&
342-
(targets.type().scalarType() == at::kInt) &&
341+
(log_probs.scalar_type() == at::kFloat) &&
342+
(targets.scalar_type() == at::kInt) &&
343343
(log_probs.type().backend() == Backend::CUDA);
344344

345345
if (use_cudnn) {
@@ -374,8 +374,8 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef inpu
374374

375375
// Convenience function accepting Tensors
376376
Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, const Tensor& input_lengths, const Tensor& target_lengths, int64_t BLANK, int64_t reduction, bool zero_infinity) {
377-
AT_CHECK(isIntegralType(input_lengths.type().scalarType()), "input_lenghts must be integral");
378-
AT_CHECK(isIntegralType(target_lengths.type().scalarType()), "target_lenghts must be integral");
377+
AT_CHECK(isIntegralType(input_lengths.scalar_type()), "input_lenghts must be integral");
378+
AT_CHECK(isIntegralType(target_lengths.scalar_type()), "target_lenghts must be integral");
379379

380380
Tensor ilc = input_lengths.toType(kLong).toBackend(Backend::CPU).contiguous();
381381
Tensor tlc = target_lengths.toType(kLong).toBackend(Backend::CPU).contiguous();

aten/src/ATen/native/Normalization.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -279,8 +279,8 @@ std::tuple<Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
279279

280280
bool use_cudnn = false;
281281
use_cudnn = (input.is_cuda()
282-
&& (input.type().scalarType() != at::kHalf
283-
|| weight.type().scalarType() == at::kFloat)
282+
&& (input.scalar_type() != at::kHalf
283+
|| weight.scalar_type() == at::kFloat)
284284
&& weight.defined() && bias.defined()
285285
&& ((running_mean.defined() && running_var.defined())
286286
|| (!running_mean.defined() && !running_var.defined() && training))
@@ -301,8 +301,8 @@ std::tuple<Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
301301

302302
bool use_miopen = (input.is_cuda()
303303
&& input.dim() <= MIOPEN_DIM_MAX
304-
&& input.type().scalarType() != at::kDouble
305-
&& (weight.type().scalarType() != at::kHalf)
304+
&& input.scalar_type() != at::kDouble
305+
&& (weight.scalar_type() != at::kHalf)
306306
&& weight.defined() && bias.defined()
307307
&& ((running_mean.defined() && running_var.defined())
308308
|| (!running_mean.defined() && !running_var.defined() && training))

0 commit comments

Comments
 (0)