Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add atleast3d op #4067

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -10979,6 +10979,29 @@ def Torch_AtenAtleast2dOp : Torch_Op<"aten.atleast_2d", [
}];
}

def Torch_AtenAtleast3dOp : Torch_Op<"aten.atleast_3d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::atleast_3d : (Tensor) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenAtleast3dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 1, 1);
}
void AtenAtleast3dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 1, 1);
}
}];
}

def Torch_AtenEinsumOp : Torch_Op<"aten.einsum", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down
36 changes: 36 additions & 0 deletions lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10966,6 +10966,38 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" }\n"
" return %2 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.atleast_3d\"(%arg0: !torch.list<int>) -> !torch.list<int> {\n"
" %int0 = torch.constant.int 0\n"
" %int1 = torch.constant.int 1\n"
" %int2 = torch.constant.int 2\n"
" %0 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
" %1 = torch.aten.eq.int %0, %int0 : !torch.int, !torch.int -> !torch.bool\n"
" %2 = torch.prim.If %1 -> (!torch.list<int>) {\n"
" %3 = torch.prim.ListConstruct %int1, %int1, %int1 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>\n"
" torch.prim.If.yield %3 : !torch.list<int>\n"
" } else {\n"
" %3 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
" %4 = torch.aten.eq.int %3, %int1 : !torch.int, !torch.int -> !torch.bool\n"
" %5 = torch.prim.If %4 -> (!torch.list<int>) {\n"
" %6 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int\n"
" %7 = torch.prim.ListConstruct %int1, %6, %int1 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>\n"
" torch.prim.If.yield %7 : !torch.list<int>\n"
" } else {\n"
" %6 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
" %7 = torch.aten.eq.int %6, %int2 : !torch.int, !torch.int -> !torch.bool\n"
" %8 = torch.prim.If %7 -> (!torch.list<int>) {\n"
" %9:2 = torch.prim.ListUnpack %arg0 : !torch.list<int> -> !torch.int, !torch.int\n"
" %10 = torch.prim.ListConstruct %9#0, %9#1, %int1 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>\n"
" torch.prim.If.yield %10 : !torch.list<int>\n"
" } else {\n"
" torch.prim.If.yield %arg0 : !torch.list<int>\n"
" }\n"
" torch.prim.If.yield %8 : !torch.list<int>\n"
" }\n"
" torch.prim.If.yield %5 : !torch.list<int>\n"
" }\n"
" return %2 : !torch.list<int>\n"
" }\n"
" func.func @\"__torch_mlir_shape_fn.aten.stack\"(%arg0: !torch.list<list<int>>, %arg1: !torch.int) -> !torch.list<int> {\n"
" %0 = call @__torch__.torch.jit._shape_functions.stack(%arg0, %arg1) : (!torch.list<list<int>>, !torch.int) -> !torch.list<int>\n"
" return %0 : !torch.list<int>\n"
Expand Down Expand Up @@ -15858,6 +15890,10 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
" return %0#1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.atleast_3d\"(%arg0: !torch.tuple<int, int>) -> !torch.int {\n"
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
" return %0#1 : !torch.int\n"
" }\n"
" func.func @\"__torch_mlir_dtype_fn.aten.hstack\"(%arg0: !torch.list<tuple<int, int>>) -> !torch.int {\n"
" %true = torch.constant.bool true\n"
" %none = torch.constant.none\n"
Expand Down
36 changes: 36 additions & 0 deletions lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1941,6 +1941,41 @@ class DecomposeAtenAtleast2dOp : public OpRewritePattern<AtenAtleast2dOp> {
};
} // namespace

class DecomposeAtenAtleast3dOp : public OpRewritePattern<AtenAtleast3dOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(AtenAtleast3dOp op,
PatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value input = op.getSelf();
Type opType = op.getType();

auto inputType = cast<BaseTensorType>(input.getType());
SmallVector<int64_t> inputShape(inputType.getSizes());

if (inputShape.size() >= 3) {
rewriter.replaceOp(op, input);
return success();
}

auto atleast2dResShape =
inputShape.empty()
? SmallVector<int64_t>{1, 1}
: (inputShape.size() == 1 ? SmallVector<int64_t>{1, inputShape[0]}
: inputShape);
auto atleast2dResType = rewriter.getType<ValueTensorType>(
atleast2dResShape, inputType.getOptionalDtype());
auto atleast2dRes =
rewriter.create<AtenAtleast2dOp>(loc, atleast2dResType, input);

Value zero = rewriter.create<Torch::ConstantIntOp>(
loc, rewriter.getI64IntegerAttr(0));
rewriter.replaceOpWithNewOp<AtenUnsqueezeOp>(op, opType, atleast2dRes,
zero);
return success();
}
};

namespace {
// Decompose AtenEinsumOp to AtenMatmulOp, and supports possible reduce
// operation and permute operation. Currently, this pass doesn't support
Expand Down Expand Up @@ -11722,6 +11757,7 @@ class DecomposeComplexOpsPass
addPatternIfTargetOpIsIllegal<DecomposeAtenCeluOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenAtleast1dOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenAtleast2dOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenAtleast3dOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenEinsumOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAten_TrilinearOp>(patterns);
addPatternIfTargetOpIsIllegal<DecomposeAtenTraceOp>(patterns);
Expand Down
1 change: 1 addition & 0 deletions lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,7 @@ static void markDecomposedOpsAsIllegal(MLIRContext *context,
target.addIllegalOp<AtenTanhBackwardOp>();
target.addIllegalOp<AtenAtleast1dOp>();
target.addIllegalOp<AtenAtleast2dOp>();
target.addIllegalOp<AtenAtleast3dOp>();
target.addIllegalOp<AtenEinsumOp>();
target.addIllegalOp<Aten_TrilinearOp>();
target.addIllegalOp<AtenTraceOp>();
Expand Down
10 changes: 10 additions & 0 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -1004,6 +1004,11 @@
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"Atleast2dModule3dInput_basic",
"Atleast3dModule0dInput_basic",
"Atleast3dModule1dInput_basic",
"Atleast3dModule2dInput_basic",
"Atleast3dModule3dInput_basic",
"AtenLinear1D_basic",
"AtenLinear2D_basic",
"AtenLinear3DBias_basic",
Expand Down Expand Up @@ -2006,6 +2011,11 @@
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"Atleast2dModule3dInput_basic",
"Atleast3dModule0dInput_basic",
"Atleast3dModule1dInput_basic",
"Atleast3dModule2dInput_basic",
"Atleast3dModule3dInput_basic",
"AtenLinear2D_basic",
"AtenLinear3DBias_basic",
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2318,6 +2318,18 @@ def aten〇atleast_2d〡shape(self: List[int]) -> List[int]:
else:
return self

def aten〇atleast_3d〡shape(self: List[int]) -> List[int]:
if len(self)==0:
return [1, 1, 1]
elif len(self)==1:
x=self[0]
return [1, 1, x]
elif len(self)==2:
x, y = self
return [1, x, y]
else:
return self

def aten〇stack〡shape(tensors: List[List[int]], dim: int = 0) -> List[int]:
return upstream_shape_functions.stack(tensors, dim)

Expand Down Expand Up @@ -5676,6 +5688,11 @@ def aten〇atleast_2d〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return self_dtype

@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1))
def aten〇atleast_3d〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
self_rank, self_dtype = self_rank_dtype
return self_dtype

@check_dtype_function(
[Invocation([NonZeroDTensorWithDtype(torch.bool), NonZeroDTensorWithDtype(torch.int32), NonZeroDTensorWithDtype(torch.int64)]),
Invocation([NonZeroDTensorWithDtype(torch.float32), NonZeroDTensorWithDtype(torch.int32)]),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -844,6 +844,7 @@ def emit_with_mutating_variants(key, **kwargs):
emit("aten::one_hot : (Tensor, int) -> (Tensor)")
emit("aten::atleast_1d : (Tensor) -> (Tensor)")
emit("aten::atleast_2d : (Tensor) -> (Tensor)")
emit("aten::atleast_3d : (Tensor) -> (Tensor)")
emit("aten::einsum : (str, Tensor[], int[]?) -> (Tensor)")
emit("aten::trace : (Tensor) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")
Expand Down
96 changes: 96 additions & 0 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/reshape_like.py
Original file line number Diff line number Diff line change
Expand Up @@ -1551,6 +1551,102 @@ def Atleast1dModule1dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(4))


class Atleast2dModule0dInput(torch.nn.Module):
@export
@annotate_args([None, [(), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_2d(x)


@register_test_case(module_factory=lambda: Atleast2dModule0dInput())
def Atleast2dModule0dInput_basic(module, tu: TestUtils):
module.forward(tu.rand())


class Atleast2dModule1dInput(torch.nn.Module):
@export
@annotate_args([None, [(10,), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_2d(x)


@register_test_case(module_factory=lambda: Atleast2dModule1dInput())
def Atleast2dModule1dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(10))


class Atleast2dModule2dInput(torch.nn.Module):
@export
@annotate_args([None, [(3, 4), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_2d(x)


@register_test_case(module_factory=lambda: Atleast2dModule2dInput())
def Atleast2dModule2dInput_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 4))


class Atleast2dModule3dInput(torch.nn.Module):
@export
@annotate_args([None, [(2, 3, 4), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_2d(x)


@register_test_case(module_factory=lambda: Atleast2dModule3dInput())
def Atleast2dModule3dInput_basic(module, tu: TestUtils):
result = module.forward(tu.rand(2, 3, 4))


class Atleast3dModule0dInput(torch.nn.Module):
@export
@annotate_args([None, [(), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_3d(x)


@register_test_case(module_factory=lambda: Atleast3dModule0dInput())
def Atleast3dModule0dInput_basic(module, tu: TestUtils):
result = module.forward(tu.rand())


class Atleast3dModule1dInput(torch.nn.Module):
@export
@annotate_args([None, [(10,), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_3d(x)


@register_test_case(module_factory=lambda: Atleast3dModule1dInput())
def Atleast3dModule1dInput_basic(module, tu: TestUtils):
result = module.forward(tu.rand(10))


class Atleast3dModule2dInput(torch.nn.Module):
@export
@annotate_args([None, [(4, 5), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_3d(x)


@register_test_case(module_factory=lambda: Atleast3dModule2dInput())
def Atleast3dModule2dInput_basic(module, tu: TestUtils):
result = module.forward(tu.rand(4, 5))


class Atleast3dModule3dInput(torch.nn.Module):
@export
@annotate_args([None, [(2, 3, 4), torch.float32, True]])
def forward(self, x):
return torch.ops.aten.atleast_3d(x)


@register_test_case(module_factory=lambda: Atleast3dModule3dInput())
def Atleast3dModule3dInput_basic(module, tu: TestUtils):
result = module.forward(tu.rand(2, 3, 4))


# ==============================================================================


Expand Down