-
Notifications
You must be signed in to change notification settings - Fork 527
/
Copy pathop_addmm.cpp
112 lines (98 loc) · 3.17 KB
/
op_addmm.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <executorch/kernels/portable/cpu/scalar_utils.h>
#include <executorch/kernels/portable/cpu/util/elementwise_util.h>
#include <executorch/kernels/portable/cpu/util/matmul_ops_util.h>
#include <executorch/kernels/portable/cpu/vec_ops.h>
#include <executorch/runtime/kernel/kernel_includes.h>
namespace torch {
namespace executor {
namespace native {
using Tensor = executorch::aten::Tensor;
using Scalar = executorch::aten::Scalar;
Tensor& addmm_out(
KernelRuntimeContext& ctx,
const Tensor& in,
const Tensor& mat1,
const Tensor& mat2,
const Scalar& beta,
const Scalar& alpha,
Tensor& out) {
ET_KERNEL_CHECK(
ctx,
check_addmm_args(in, mat1, mat2, beta, alpha, out),
InvalidArgument,
out);
size_t output_ndim = 0;
executorch::aten::SizesType output_sizes[kTensorDimensionLimit];
get_mm_out_target_size(mat1, mat2, output_sizes, &output_ndim);
ET_KERNEL_CHECK(
ctx,
resize_tensor(out, {output_sizes, output_ndim}) == Error::Ok,
InvalidArgument,
out);
ET_KERNEL_CHECK(
ctx, tensor_is_broadcastable_to(in, out), InvalidArgument, out);
ET_KERNEL_CHECK(
ctx,
tensors_have_same_dim_order(in, mat1, mat2, out),
InvalidArgument,
out);
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
// @lint-ignore CLANGTIDY facebook-hte-CArray
static constexpr const char op_name[] = "addmm.out";
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&]() {
CTYPE alpha_val = utils::scalar_to<CTYPE>(alpha);
CTYPE beta_val = utils::scalar_to<CTYPE>(beta);
size_t m = mat1.size(0);
size_t n = mat1.size(1);
size_t p = mat2.size(1);
if (out.sizes() == in.sizes()) {
// vec_addmm assumes that no broadcasting is required.
vec_addmm<CTYPE, CTYPE>(
out.mutable_data_ptr<CTYPE>(),
in.const_data_ptr<CTYPE>(),
mat1.const_data_ptr<CTYPE>(),
mat2.const_data_ptr<CTYPE>(),
m,
n,
p,
beta_val,
alpha_val);
} else {
// If broadcasting is required, them compute the matmul
// and addition separately, using
// apply_binary_elementwise_fn to perform the addition
// while applying broadcasting
vec_matmul<CTYPE, CTYPE>(
out.mutable_data_ptr<CTYPE>(),
mat1.const_data_ptr<CTYPE>(),
mat2.const_data_ptr<CTYPE>(),
m,
n,
p);
utils::apply_bitensor_elementwise_fn<
CTYPE,
op_name,
utils::SupportedTensorDtypes::REALHBF16>(
[alpha_val, beta_val](const auto val_a, const auto val_b) {
return val_a * alpha_val + val_b * beta_val;
},
ctx,
out,
utils::SupportedTensorDtypes::REALHBF16,
in,
utils::SupportedTensorDtypes::REALHBF16,
out);
}
});
return out;
}
} // namespace native
} // namespace executor
} // namespace torch