Skip to content

Commit 7dbb08a

Browse files
authored
Merge pull request #545 from Xilinx/bump_to_62eb38bc
[AutoBump] Merge with 62eb38b (Jan 14) (152)
2 parents 8b465e3 + b8d39ab commit 7dbb08a

File tree

11 files changed

+14
-174
lines changed

11 files changed

+14
-174
lines changed

build_tools/python_deploy/build_linux_packages.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -324,9 +324,6 @@ function test_in_tree() {
324324
;;
325325
esac
326326

327-
echo ":::: Run make_fx + TOSA e2e integration tests"
328-
python -m e2e_testing.main --config=make_fx_tosa -v
329-
330327
echo ":::: Run TOSA e2e integration tests"
331328
python -m e2e_testing.main --config=tosa -v
332329
}

projects/pt1/e2e_testing/main.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@
4242
from .xfail_sets import (
4343
LINALG_XFAIL_SET,
4444
LINALG_CRASHING_SET,
45-
MAKE_FX_TOSA_PASS_SET,
46-
MAKE_FX_TOSA_CRASHING_SET,
4745
STABLEHLO_PASS_SET,
4846
STABLEHLO_CRASHING_SET,
4947
TOSA_PASS_SET,
@@ -76,7 +74,6 @@ def _get_argparse():
7674
"torchscript",
7775
"linalg",
7876
"stablehlo",
79-
"make_fx_tosa",
8077
"tosa",
8178
"lazy_tensor_core",
8279
"torchdynamo",
@@ -166,10 +163,6 @@ def main():
166163
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend())
167164
xfail_set = all_test_unique_names - TOSA_PASS_SET
168165
crashing_set = TOSA_CRASHING_SET
169-
elif args.config == "make_fx_tosa":
170-
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend(), use_make_fx=True)
171-
xfail_set = all_test_unique_names - MAKE_FX_TOSA_PASS_SET
172-
crashing_set = MAKE_FX_TOSA_CRASHING_SET
173166
elif args.config == "native_torch":
174167
config = NativeTorchTestConfig()
175168
xfail_set = set()

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 0 additions & 142 deletions
Original file line numberDiff line numberDiff line change
@@ -2587,148 +2587,6 @@
25872587
"IndexTensorStaticNonContiguousWithNoneModule_basic",
25882588
}
25892589

2590-
MAKE_FX_TOSA_CRASHING_SET = TOSA_CRASHING_SET | {
2591-
# Runtime op verification: static result dims in reassoc group do not divide src dim evenly
2592-
"FlattenDynamicModule_basic",
2593-
"ReshapeDynamicModule_basic",
2594-
"ViewFlattenAndExpandModule_basic",
2595-
"ViewSizeDimLedAndFollowedByExpandedOnesModule_basic",
2596-
"ViewSizeDimLedByExpandedOnesModule_basic",
2597-
}
2598-
2599-
MAKE_FX_TOSA_PASS_SET = (
2600-
TOSA_PASS_SET
2601-
| {
2602-
### Tests additionally passing in make_fx_tosa
2603-
"AdaptiveAvgPool1dStaticEvenMultiple_basic",
2604-
"IsInfiniteModule_basic",
2605-
"AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule_basic",
2606-
"AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic",
2607-
"ResNet18StaticModule_basic",
2608-
"AdaptiveAvgPool1dStaticLargerOutput_basic",
2609-
"ScaledDotProductAttentionBoolMaskModule_basic",
2610-
"ScaledDotProductAttentionDifferentDynamicCausalModule_basic",
2611-
"ArgminIntModule_basic",
2612-
"ArgminIntModule_multiple_mins",
2613-
"ArgminModule_basic",
2614-
"ArgminModule_keepDim",
2615-
"ReduceAllDimBool_basic",
2616-
"ReduceAllDimFloat_basic",
2617-
"ReduceAllDimInt_basic",
2618-
"ReduceAllFloatModule_basic",
2619-
"ReduceAllIntModule_basic",
2620-
"ReduceAnyFloatModule_basic",
2621-
"ReduceAnyIntModule_basic",
2622-
"ReduceMaxAllDims_basic",
2623-
"ReduceMaxFloatModule_basic",
2624-
"ReduceMaxSignedIntModule_basic",
2625-
"ReduceMaxUnsignedIntModule_basic",
2626-
"ReduceMinFloatModule_basic",
2627-
"ReduceMinSignedIntModule_basic",
2628-
"ReduceMinUnsignedIntModule_basic",
2629-
"ReduceProdDtypeFloatModule_basic",
2630-
"ReduceProdDtypeIntModule_basic",
2631-
"ReduceProdElementTypeBoolModule_basic",
2632-
"ReduceProdFloatModule_basic",
2633-
"ReduceProdSignedIntModule_basic",
2634-
"ReduceProdUnsignedIntModule_basic",
2635-
"ReduceSumDimIntListDtypeFloatModule_basic",
2636-
"ReduceSumDimIntListDtypeIntModule_basic",
2637-
"ReduceSumDimIntListElementTypeBoolModule_basic",
2638-
"ReduceSumDtypeFloatModule_basic",
2639-
"ReduceSumDtypeIntModule_basic",
2640-
"ReduceSumElementTypeBoolModule_basic",
2641-
"ScaledDotProductAttentionDifferentModule_basic",
2642-
"ScaledDotProductAttentionMaskModule_basic",
2643-
"ScaledDotProductAttentionSameModule_basic",
2644-
"AvgPool2dCountIncludePadFalseStaticModule_basic",
2645-
"AtenLinear1D_basic",
2646-
"AtenLinearMatVec_basic",
2647-
"AtenLinearVecMatBias_basic",
2648-
"Atleast1dModule0dInput_basic",
2649-
"Atleast1dModule1dInput_basic",
2650-
"Atleast2dModule0dInput_basic",
2651-
"Atleast2dModule1dInput_basic",
2652-
"Atleast2dModule2dInput_basic",
2653-
"MaxPool1dEmptyStrideStaticModule_basic",
2654-
"MaxPool1dStaticCeilModeTrueModule_basic",
2655-
"MaxPool1dStaticModule_basic",
2656-
"AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic",
2657-
"CosineSimilarityModule_basic",
2658-
"NativeGroupNormBackwardModule_basic",
2659-
"ReduceFrobeniusNormKeepDimModule_basic",
2660-
"ReduceFrobeniusNormModule_basic",
2661-
"SliceWholeTensorModule_basic",
2662-
"TensorFloatModule_basic",
2663-
"TensorIntModule_basic",
2664-
"RepeatInterleaveSelfIntModule_basic",
2665-
"TorchPrimLoopForLikeTensorArgModule_basic",
2666-
"ViewSizeDimFollowedByCollapsedOnesModule_basic",
2667-
"ViewSizeDimFollowedByExpandedOnesModule_basic",
2668-
"ViewSizeDimLedAndFollowedByCollapsedOnesModule_basic",
2669-
"ViewSizeDimLedByCollapsedOnesModule_basic",
2670-
"ViewSizeFromOtherTensor_basic",
2671-
"RenormModuleFloat32NegativeDim_basic",
2672-
"RenormModuleFloat32_basic",
2673-
"RreluWithNoiseBackwardEvalModule_basic",
2674-
"RreluWithNoiseBackwardEvalStaticModule_basic",
2675-
"RreluWithNoiseBackwardTrainModule_basic",
2676-
"RreluWithNoiseBackwardTrainStaticModule_basic",
2677-
}
2678-
) - {
2679-
### Test failing in make_fx_tosa but not in tosa
2680-
"ElementwiseRreluEvalStaticModule_basic",
2681-
"ElementwiseRreluTrainStaticModule_basic",
2682-
"AdaptiveMaxPool1dDimOneStatic_basic",
2683-
"FloatPowerTensorTensorStaticModule_basic",
2684-
# Dynamic shape, has extra unsupported broadcast ops
2685-
"Matmul_3d",
2686-
# Unimplemented operator 'aten._index_put_impl_.hacked_twin'
2687-
"IndexPutImpl1DFloatNonAccumulateModule_basic",
2688-
"IndexPutImpl1DIntNonAccumulateModule_basic",
2689-
# RuntimeError: The size of tensor a (7) must match the size of tensor b (3) at non-singleton dimension 1
2690-
"Add_Module_basic",
2691-
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
2692-
"AtenEyeModuleInt2D_basic",
2693-
"AtenEyeMModuleInt2D_basic",
2694-
"Conv2dBiasNoPaddingModule_basic",
2695-
"Conv2dNoPaddingModule_basic",
2696-
"Conv2dWithPaddingDilationStrideModule_basic",
2697-
"Conv2dWithPaddingModule_basic",
2698-
"Conv2dWithSamePaddingModule_basic",
2699-
"Conv2dWithValidPaddingModule_basic",
2700-
# failed to legalize operation 'torch.operator'
2701-
"ElementwisePreluModule_basic",
2702-
"ElementwisePreluStaticModule_basic",
2703-
"ElementwiseLogSigmoidModule_basic",
2704-
# failed to legalize operation 'torch.aten.rrelu_with_noise'
2705-
"ElementwiseRreluEvalModule_basic",
2706-
# incompatible return type failure for tosa.concat.
2707-
"HstackBasicComplexModule_basic",
2708-
"HstackBasicFloatModule_basic",
2709-
"HstackBasicIntFloatModule_basic",
2710-
"HstackBasicIntModule_basic",
2711-
# Shape Related failures
2712-
"PrimListUnpackNumMismatchModule_basic",
2713-
"ReshapeExpandModule_basic",
2714-
"UnsafeViewCollapseModule_basic",
2715-
"UnsafeViewDynamicExpandModule_basic",
2716-
"ViewCollapseModule_basic",
2717-
"ViewDynamicExpandCollapseModule_basic",
2718-
"ViewDynamicExpandModule_basic",
2719-
"ViewExpandDynamicDimModule_basic",
2720-
"ViewNoChange1dModule_basic",
2721-
"ViewNoChange2dModule_basic",
2722-
"ViewNoChange3dModule_basic",
2723-
}
2724-
2725-
if torch_version_for_comparison() < version.parse("2.5.0.dev"):
2726-
MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET | {
2727-
"ScaledDotProductAttentionDifferentModule_basic",
2728-
"ScaledDotProductAttentionMaskModule_basic",
2729-
"ScaledDotProductAttentionSameModule_basic",
2730-
}
2731-
27322590
LTC_CRASHING_SET = {
27332591
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
27342592
# Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.

projects/pt1/python/torch_mlir/torchscript.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@
1414
from torch._functorch.compile_utils import strip_overloads
1515
import torch
1616
import torch.fx
17-
from torch_mlir.dynamo import _get_decomposition_table
18-
from torch.fx.experimental.proxy_tensor import make_fx
1917

2018
from torch_mlir.compiler_utils import (
2119
run_pipeline_with_repro_report,
@@ -203,7 +201,6 @@ def compile(
203201
backend_legal_ops: Optional[Sequence[str]] = None,
204202
extra_library: Iterable[Callable] = [],
205203
verbose: bool = False,
206-
use_make_fx: bool = False,
207204
enable_ir_printing: bool = False,
208205
):
209206
"""Convert a PyTorch model to MLIR.
@@ -266,12 +263,6 @@ def compile(
266263
else:
267264
backend_legal_ops = BACKEND_LEGAL_OPS.get(output_type, [])
268265

269-
if use_make_fx:
270-
args = example_args._get_for_tracing(
271-
use_tracing=True, ignore_traced_shapes=True
272-
)["forward"]
273-
model = make_fx(model, decomposition_table=_get_decomposition_table())(*args)
274-
275266
# For FX-based models, automatically strip overloads.
276267
if isinstance(model, torch.fx.GraphModule):
277268
strip_overloads(model)

projects/pt1/python/torch_mlir_e2e_test/configs/onnx_backend.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,12 +132,10 @@ class OnnxBackendTestConfig(TestConfig):
132132
def __init__(
133133
self,
134134
backend,
135-
use_make_fx: bool = False,
136135
output_type="linalg-on-tensors",
137136
):
138137
super().__init__()
139138
self.backend = backend
140-
self.use_make_fx = use_make_fx
141139
self.output_type = output_type
142140

143141
def compile(self, program: torch.nn.Module, verbose: bool = False) -> Any:

projects/pt1/python/torch_mlir_e2e_test/configs/tosa_backend.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,16 @@ class TosaBackendTestConfig(TestConfig):
2424
reaching the TOSA abstraction level.
2525
"""
2626

27-
def __init__(self, backend: TosaBackend, use_make_fx: bool = False):
27+
def __init__(self, backend: TosaBackend):
2828
super().__init__()
2929
self.backend = backend
30-
self.use_make_fx = use_make_fx
3130

3231
def compile(self, program: torch.nn.Module, verbose: bool = False) -> Any:
3332
example_args = convert_annotations_to_placeholders(program.forward)
3433
module = torchscript.compile(
3534
program,
3635
example_args,
3736
output_type="tosa",
38-
use_make_fx=self.use_make_fx,
3937
verbose=verbose,
4038
)
4139

python/torch_mlir/compiler_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,8 @@
1010
from typing import Union, List
1111

1212
import torch
13-
from torch_mlir.passmanager import PassManager
14-
from torch_mlir.ir import StringAttr
13+
from .passmanager import PassManager
14+
from .ir import StringAttr
1515

1616

1717
class TensorPlaceholder:

python/torch_mlir/extras/onnx_importer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,7 @@ def _sanitize_name(self, name):
742742

743743
# Remove characters that are invalid in MLIR identifier names.
744744
# https://mlir.llvm.org/docs/LangRef/#identifiers-and-keywords
745-
return re.sub("[:/-]", "_", name)
745+
return re.sub("[^\w\.]", "_", name)
746746

747747
def tensor_proto_to_attr(self, tp: onnx.TensorProto) -> Attribute:
748748
tensor_type = self.tensor_proto_to_builtin_type(tp)

python/torch_mlir/fx.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,11 @@
1313
import torch.nn as nn
1414
from torch.export import ExportedProgram
1515

16-
from torch_mlir.extras.fx_importer import FxImporter, FxImporterHooks
17-
from torch_mlir import ir
18-
from torch_mlir.dialects import torch as torch_d
19-
from torch_mlir.extras.fx_decomp_util import get_decomposition_table
20-
from torch_mlir.compiler_utils import (
16+
from .extras.fx_importer import FxImporter, FxImporterHooks
17+
from . import ir
18+
from .dialects import torch as torch_d
19+
from .extras.fx_decomp_util import get_decomposition_table
20+
from .compiler_utils import (
2121
OutputType,
2222
run_pipeline_with_repro_report,
2323
lower_mlir_module,
124 Bytes
Binary file not shown.

0 commit comments

Comments
 (0)