|
32 | 32 | 'aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)',
|
33 | 33 | 'aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)',
|
34 | 34 | 'aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor',
|
| 35 | + # 'aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor', |
35 | 36 | 'aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)',
|
36 | 37 | 'aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)',
|
37 | 38 | 'aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor',
|
|
75 | 76 | 'aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor',
|
76 | 77 | 'aten::gelu(Tensor self) -> Tensor',
|
77 | 78 | 'aten::gelu_backward(Tensor grad, Tensor self) -> Tensor',
|
78 |
| - 'aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=0, int? end=9223372036854775807, int step=1) -> Tensor(a)', |
| 79 | + 'aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor(a)', |
79 | 80 | 'aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)',
|
80 | 81 | 'aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)',
|
81 | 82 | 'aten::unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[]',
|
|
112 | 113 | 'aten::div.Scalar(Tensor self, Scalar other) -> Tensor',
|
113 | 114 | 'aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)',
|
114 | 115 | 'aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)',
|
| 116 | + 'aten::to.dtype_layout(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor', |
| 117 | + 'aten::to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor', |
| 118 | + 'aten::to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor', |
| 119 | + 'aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor', |
115 | 120 | ]
|
116 | 121 |
|
117 | 122 | _FN_IPEX_FUNCS_WITH_SIMPLE_ATEN_SIG = [
|
|
126 | 131 | 'aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)',
|
127 | 132 | ]
|
128 | 133 |
|
| 134 | +_FN_EXCLUDE_FUNCS_WITH_SIMPLE_ATEN_SIG = [ |
| 135 | + "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor", |
| 136 | + "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor", |
| 137 | + "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor", |
| 138 | + "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding=\"valid\", int[1] dilation=1, int groups=1) -> Tensor", |
| 139 | + "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding=\"valid\", int[2] dilation=1, int groups=1) -> Tensor", |
| 140 | + "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding=\"valid\", int[3] dilation=1, int groups=1) -> Tensor", |
| 141 | + "aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", |
| 142 | + "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", |
| 143 | + "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", |
| 144 | + "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor", |
| 145 | + "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor", |
| 146 | + "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor", |
| 147 | + "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", |
| 148 | + "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", |
| 149 | + "aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", |
| 150 | + "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", |
| 151 | + "aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", |
| 152 | + "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", |
| 153 | + "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", |
| 154 | + "aten::dropout(Tensor input, float p, bool train) -> Tensor", |
| 155 | + "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", |
| 156 | + "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", |
| 157 | + "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", |
| 158 | + "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", |
| 159 | + "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", |
| 160 | + "aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)", |
| 161 | + "aten::reshape(Tensor(a) self, int[] shape) -> Tensor(a)", |
| 162 | + "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", |
| 163 | + "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", |
| 164 | + "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", |
| 165 | + "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", |
| 166 | + "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", |
| 167 | +] |
| 168 | + |
129 | 169 | _SHALLOW_FALLBACK_TO_CPU_TENSOR_LIST = 'shallowFallbackToCPUTensorList'
|
130 | 170 | _SHALLOW_FALLBACK_TO_CPU_TENSOR = 'shallowFallbackToCPUTensor'
|
131 | 171 | _SHALLOW_UPGRADE_TO_DPCPP_TENSOR = 'shallowUpgradeToDPCPPTensor'
|
@@ -221,6 +261,13 @@ def is_dnnl_func(self, simple_aten_sig):
|
221 | 261 | return True
|
222 | 262 | return False
|
223 | 263 |
|
| 264 | + def is_exclude_func(self, simple_aten_sig): |
| 265 | + stripped_str = simple_aten_sig.replace(' ', '') |
| 266 | + for item in _FN_EXCLUDE_FUNCS_WITH_SIMPLE_ATEN_SIG: |
| 267 | + if stripped_str == item.replace(' ', ''): |
| 268 | + return True |
| 269 | + return False |
| 270 | + |
224 | 271 | def is_ipex_func(self, simple_aten_sig):
|
225 | 272 | stripped_str = simple_aten_sig.replace(' ', '')
|
226 | 273 | for item in _FN_IPEX_FUNCS_WITH_SIMPLE_ATEN_SIG:
|
@@ -580,6 +627,9 @@ def is_conv_overrideable_func(fname):
|
580 | 627 |
|
581 | 628 | func_defs = []
|
582 | 629 | for cpp_sig, aten_sig, native_cpp_sig, cpp_func_sig_str, aten_func_sig_str in self._sigs:
|
| 630 | + if self.is_exclude_func(aten_func_sig_str): |
| 631 | + continue |
| 632 | + |
583 | 633 | # The operator name should be unique because the new registration mechanism of PyTorch 1.7
|
584 | 634 | new_cpp_func_name = aten_sig.def_name.replace('.', '_')
|
585 | 635 | cpp_func_str_h, cpp_func_str_cpp = self.gen_func_signature(cpp_func_sig_str, cpp_sig.def_name, new_cpp_func_name)
|
|
0 commit comments