Skip to content

Latest commit

 

History

History
197 lines (196 loc) · 17.7 KB

input_variations.md

File metadata and controls

197 lines (196 loc) · 17.7 KB

High Level Operations Status

Operations Input Variations Converted Removed Fallback Completed Score
0 aten._softmax.default 1 1 0 0 1
1 aten._to_copy.default 6 0 4 0 🚧 0.67
2 aten._unsafe_view.default 1 0 0 0 0
3 aten.add.Tensor 3 3 0 0 1
4 aten.addmm.default 4 4 0 0 1
5 aten.arange.start 1 0 0 0 0
6 aten.baddbmm.default 1 1 0 0 1
7 aten.bmm.default 1 1 0 0 1
8 aten.clone.default 3 3 0 0 1
9 aten.cumsum.default 1 0 0 0 0
10 aten.embedding.default 1 1 0 0 1
11 aten.expand.default 2 1 1 0 1
12 aten.full.default 1 1 0 0 1
13 aten.lift_fresh_copy.default 1 0 0 0 0
14 aten.masked_fill.Scalar 2 0 0 0 0
15 aten.mm.default 1 1 0 0 1
16 aten.mul.Tensor 6 6 0 0 1
17 aten.native_layer_norm.default 1 1 0 0 1
18 aten.permute.default 2 2 0 0 1
19 aten.pow.Tensor_Tensor 1 0 0 0 0
20 aten.rsub.Scalar 1 1 0 0 1
21 aten.select.int 3 0 0 0 0
22 aten.slice.Tensor 6 0 6 0 1
23 aten.sub.Tensor 1 1 0 0 1
24 aten.t.default 5 5 0 0 1
25 aten.tanh.default 1 1 0 0 1
26 aten.transpose.int 1 1 0 0 1
27 aten.unsqueeze.default 5 4 0 0 🚧 0.8
28 aten.view.default 13 11 0 2 🚧 0.85

aten._softmax.default

ATen Input Variations Status
0 Tensor<[1, 16, 32, 32]> self = ?,
int dim = -1,
bool half_to_float = False
Done

aten._to_copy.default

ATen Input Variations Status
0 Tensor<[1, 1, 32, 32]> self = ?,
Optional[int] dtype = torch.bfloat16
Removed
1 Tensor<[1, 1, 32, 32]> self = ?,
Optional[int] dtype = torch.bool
None
2 Tensor<[1, 16, 32, 32]> self = ?,
Optional[int] dtype = torch.bfloat16
Removed
3 Tensor<[1, 16, 32, 32]> self = ?,
Optional[int] dtype = torch.float32,
Optional[int] layout = torch.strided,
Optional[Device] device = cpu
None
4 Tensor<[16, 1, 32]> self = ?,
Optional[int] dtype = torch.bfloat16
Removed
5 Tensor<[32, 32]> self = ?,
Optional[int] dtype = torch.bfloat16
Removed

aten._unsafe_view.default

ATen Input Variations Status
0 Tensor<[1, 32, 16, 96]> self = ?,
List[int] size = [1, 32, 1536]
None

aten.add.Tensor

ATen Input Variations Status
0 Tensor<[1, 32, 1536]> self = ?,
Tensor<[1, 32, 1536]> other = ?
Done
1 Tensor<[1, 32, 6144]> self = ?,
Tensor other = 1
Done
2 Tensor<[1, 32, 6144]> self = ?,
Tensor other = 1.0
Done

aten.addmm.default

ATen Input Variations Status
0 Tensor<[1536]> self = ?,
Tensor<[32, 1536]> mat1 = ?,
Tensor<[1536, 1536]> mat2 = ?
Done
1 Tensor<[1536]> self = ?,
Tensor<[32, 6144]> mat1 = ?,
Tensor<[6144, 1536]> mat2 = ?
Done
2 Tensor<[4608]> self = ?,
Tensor<[32, 1536]> mat1 = ?,
Tensor<[1536, 4608]> mat2 = ?
Done
3 Tensor<[6144]> self = ?,
Tensor<[32, 1536]> mat1 = ?,
Tensor<[1536, 6144]> mat2 = ?
Done

aten.arange.start

ATen Input Variations Status
0 number start = 1,
number end = 17,
Optional[int] dtype = torch.int32,
Optional[Device] device = cpu,
Optional[bool] pin_memory = False
None

aten.baddbmm.default

ATen Input Variations Status
0 Tensor<[16, 1, 32]> self = ?,
Tensor<[16, 32, 96]> batch1 = ?,
Tensor<[16, 96, 32]> batch2 = ?,
number beta = 1.0,
number alpha = 0.10206207261596577
Done

aten.bmm.default

ATen Input Variations Status
0 Tensor<[16, 32, 32]> self = ?,
Tensor<[16, 32, 96]> mat2 = ?
Done

aten.clone.default

ATen Input Variations Status
0 Tensor<[1, 16, 32, 32]> self = ? Done
1 Tensor<[1, 32, 1536]> self = ? Done
2 Tensor<[1, 32, 16, 96]> self = ?,
Optional[int] memory_format = torch.contiguous_format
Done

aten.cumsum.default

ATen Input Variations Status
0 Tensor<[1, 32]> self = ?,
int dim = -1
None

aten.embedding.default

ATen Input Variations Status
0 Tensor<[250880, 1536]> weight = ?,
Tensor<[1, 32]> indices = ?
Done

aten.expand.default

ATen Input Variations Status
0 Tensor<[1, 1, 1, 32]> self = ?,
List[int] size = [1, 1, 32, 32]
Done
1 Tensor<[1, 1, 32, 32]> self = ?,
List[int] size = [1, 1, 32, 32]
Removed

aten.full.default

ATen Input Variations Status
0 List[int] size = [32, 32],
number fill_value = -3.3895313892515355e+38,
Optional[Device] device = cpu,
Optional[bool] pin_memory = False
Done

aten.lift_fresh_copy.default

ATen Input Variations Status
0 Tensor self = ? None

aten.masked_fill.Scalar

ATen Input Variations Status
0 Tensor<[1, 1, 32, 32]> self = ?,
Tensor<[1, 1, 32, 32]> mask = ?,
number value = -3.3895313892515355e+38
None
1 Tensor<[1, 16, 32, 32]> self = ?,
Tensor<[1, 1, 32, 32]> mask = ?,
number value = -3.3895313892515355e+38
None

aten.mm.default

ATen Input Variations Status
0 Tensor<[32, 1536]> self = ?,
Tensor<[1536, 250880]> mat2 = ?
Done

aten.mul.Tensor

ATen Input Variations Status
0 Tensor<[1, 32, 6144]> self = ?,
Tensor other = 0.044715
Done
1 Tensor<[1, 32, 6144]> self = ?,
Tensor other = 0.5
Done
2 Tensor<[1, 32, 6144]> self = ?,
Tensor other = 0.79788456
Done
3 Tensor<[1, 32, 6144]> self = ?,
Tensor<[1, 32, 6144]> other = ?
Done
4 Tensor<[1, 32]> self = ?,
Tensor<[1, 32]> other = ?
Done
5 Tensor<[16, 1]> self = ?,
Tensor<[1, 1, 32]> other = ?
Done

aten.native_layer_norm.default

ATen Input Variations Status
0 Tensor<[1, 32, 1536]> input = ?,
List[int] normalized_shape = [1536],
Optional[Tensor]<[1536]> weight = ?,
Optional[Tensor]<[1536]> bias = ?,
float eps = 1e-05
Done

aten.permute.default

ATen Input Variations Status
0 Tensor<[1, 16, 32, 96]> self = ?,
List[int] dims = [0, 2, 1, 3]
Done
1 Tensor<[1, 32, 16, 96]> self = ?,
List[int] dims = [0, 2, 3, 1]
Done

aten.pow.Tensor_Tensor

ATen Input Variations Status
0 Tensor<[]> self = ?,
Tensor<[16]> exponent = ?
None

aten.rsub.Scalar

ATen Input Variations Status
0 Tensor<[1, 1, 32, 32]> self = ?,
number other = 1.0
Done

aten.select.int

ATen Input Variations Status
0 Tensor<[1, 32, 16, 3, 96]> self = ?,
int dim = 3,
int index = 0
None
1 Tensor<[1, 32, 16, 3, 96]> self = ?,
int dim = 3,
int index = 1
None
2 Tensor<[1, 32, 16, 3, 96]> self = ?,
int dim = 3,
int index = 2
None

aten.slice.Tensor

ATen Input Variations Status
0 Tensor<[1, 1, 1, 32]> self = ?,
int dim = 3,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed
1 Tensor<[1, 1, 32, 32]> self = ?,
int dim = 2,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed
2 Tensor<[1, 1, 32, 32]> self = ?,
int dim = 3,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed
3 Tensor<[1, 1, 32]> self = ?,
int dim = 2,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed
4 Tensor<[1, 32, 16, 96]> self = ?,
int dim = 3,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed
5 Tensor<[1, 32]> self = ?,
int dim = 0,
Optional[int] start = 0,
Optional[int] end = 9223372036854775807
Removed

aten.sub.Tensor

ATen Input Variations Status
0 Tensor<[1, 32]> self = ?,
Tensor other = 1
Done

aten.t.default

ATen Input Variations Status
0 Tensor<[1536, 1536]> self = ? Done
1 Tensor<[1536, 6144]> self = ? Done
2 Tensor<[250880, 1536]> self = ? Done
3 Tensor<[4608, 1536]> self = ? Done
4 Tensor<[6144, 1536]> self = ? Done

aten.tanh.default

ATen Input Variations Status
0 Tensor<[1, 32, 6144]> self = ? Done

aten.transpose.int

ATen Input Variations Status
0 Tensor<[1, 32, 16, 96]> self = ?,
int dim0 = 1,
int dim1 = 2
Done

aten.unsqueeze.default

ATen Input Variations Status
0 Tensor<[1, 1, 32]> self = ?,
int dim = 2
Done
1 Tensor<[1, 32, 32]> self = ?,
int dim = 1
Done
2 Tensor<[1, 32]> self = ?,
int dim = 1
Done
3 Tensor<[16]> self = ?,
int dim = 1
None
4 Tensor<[32, 32]> self = ?,
int dim = 0
Done

aten.view.default

ATen Input Variations Status
0 Tensor<[1, 16, 32, 32]> self = ?,
List[int] size = [16, 32, 32]
Done
1 Tensor<[1, 16, 32, 96]> self = ?,
List[int] size = [16, 32, 96]
Done
2 Tensor<[1, 16, 32]> self = ?,
List[int] size = [16, 1, 32]
Fallback
3 Tensor<[1, 16, 96, 32]> self = ?,
List[int] size = [16, 96, 32]
Done
4 Tensor<[1, 32, 1536]> self = ?,
List[int] size = [32, 1536]
Done
5 Tensor<[1, 32, 4608]> self = ?,
List[int] size = [1, 32, 16, 3, 96]
Fallback
6 Tensor<[1, 32, 6144]> self = ?,
List[int] size = [32, 6144]
Done
7 Tensor<[16, 32, 32]> self = ?,
List[int] size = [1, 16, 32, 32]
Done
8 Tensor<[16, 32, 96]> self = ?,
List[int] size = [1, 16, 32, 96]
Done
9 Tensor<[32, 1536]> self = ?,
List[int] size = [1, 32, 1536]
Done
10 Tensor<[32, 250880]> self = ?,
List[int] size = [1, 32, 250880]
Done
11 Tensor<[32, 4608]> self = ?,
List[int] size = [1, 32, 4608]
Done
12 Tensor<[32, 6144]> self = ?,
List[int] size = [1, 32, 6144]
Done