|
2587 | 2587 | "IndexTensorStaticNonContiguousWithNoneModule_basic",
|
2588 | 2588 | }
|
2589 | 2589 |
|
2590 |
| -MAKE_FX_TOSA_CRASHING_SET = TOSA_CRASHING_SET | { |
2591 |
| - # Runtime op verification: static result dims in reassoc group do not divide src dim evenly |
2592 |
| - "FlattenDynamicModule_basic", |
2593 |
| - "ReshapeDynamicModule_basic", |
2594 |
| - "ViewFlattenAndExpandModule_basic", |
2595 |
| - "ViewSizeDimLedAndFollowedByExpandedOnesModule_basic", |
2596 |
| - "ViewSizeDimLedByExpandedOnesModule_basic", |
2597 |
| -} |
2598 |
| - |
2599 |
| -MAKE_FX_TOSA_PASS_SET = ( |
2600 |
| - TOSA_PASS_SET |
2601 |
| - | { |
2602 |
| - ### Tests additionally passing in make_fx_tosa |
2603 |
| - "AdaptiveAvgPool1dStaticEvenMultiple_basic", |
2604 |
| - "IsInfiniteModule_basic", |
2605 |
| - "AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule_basic", |
2606 |
| - "AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic", |
2607 |
| - "ResNet18StaticModule_basic", |
2608 |
| - "AdaptiveAvgPool1dStaticLargerOutput_basic", |
2609 |
| - "ScaledDotProductAttentionBoolMaskModule_basic", |
2610 |
| - "ScaledDotProductAttentionDifferentDynamicCausalModule_basic", |
2611 |
| - "ArgminIntModule_basic", |
2612 |
| - "ArgminIntModule_multiple_mins", |
2613 |
| - "ArgminModule_basic", |
2614 |
| - "ArgminModule_keepDim", |
2615 |
| - "ReduceAllDimBool_basic", |
2616 |
| - "ReduceAllDimFloat_basic", |
2617 |
| - "ReduceAllDimInt_basic", |
2618 |
| - "ReduceAllFloatModule_basic", |
2619 |
| - "ReduceAllIntModule_basic", |
2620 |
| - "ReduceAnyFloatModule_basic", |
2621 |
| - "ReduceAnyIntModule_basic", |
2622 |
| - "ReduceMaxAllDims_basic", |
2623 |
| - "ReduceMaxFloatModule_basic", |
2624 |
| - "ReduceMaxSignedIntModule_basic", |
2625 |
| - "ReduceMaxUnsignedIntModule_basic", |
2626 |
| - "ReduceMinFloatModule_basic", |
2627 |
| - "ReduceMinSignedIntModule_basic", |
2628 |
| - "ReduceMinUnsignedIntModule_basic", |
2629 |
| - "ReduceProdDtypeFloatModule_basic", |
2630 |
| - "ReduceProdDtypeIntModule_basic", |
2631 |
| - "ReduceProdElementTypeBoolModule_basic", |
2632 |
| - "ReduceProdFloatModule_basic", |
2633 |
| - "ReduceProdSignedIntModule_basic", |
2634 |
| - "ReduceProdUnsignedIntModule_basic", |
2635 |
| - "ReduceSumDimIntListDtypeFloatModule_basic", |
2636 |
| - "ReduceSumDimIntListDtypeIntModule_basic", |
2637 |
| - "ReduceSumDimIntListElementTypeBoolModule_basic", |
2638 |
| - "ReduceSumDtypeFloatModule_basic", |
2639 |
| - "ReduceSumDtypeIntModule_basic", |
2640 |
| - "ReduceSumElementTypeBoolModule_basic", |
2641 |
| - "ScaledDotProductAttentionDifferentModule_basic", |
2642 |
| - "ScaledDotProductAttentionMaskModule_basic", |
2643 |
| - "ScaledDotProductAttentionSameModule_basic", |
2644 |
| - "AvgPool2dCountIncludePadFalseStaticModule_basic", |
2645 |
| - "AtenLinear1D_basic", |
2646 |
| - "AtenLinearMatVec_basic", |
2647 |
| - "AtenLinearVecMatBias_basic", |
2648 |
| - "Atleast1dModule0dInput_basic", |
2649 |
| - "Atleast1dModule1dInput_basic", |
2650 |
| - "Atleast2dModule0dInput_basic", |
2651 |
| - "Atleast2dModule1dInput_basic", |
2652 |
| - "Atleast2dModule2dInput_basic", |
2653 |
| - "MaxPool1dEmptyStrideStaticModule_basic", |
2654 |
| - "MaxPool1dStaticCeilModeTrueModule_basic", |
2655 |
| - "MaxPool1dStaticModule_basic", |
2656 |
| - "AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic", |
2657 |
| - "CosineSimilarityModule_basic", |
2658 |
| - "NativeGroupNormBackwardModule_basic", |
2659 |
| - "ReduceFrobeniusNormKeepDimModule_basic", |
2660 |
| - "ReduceFrobeniusNormModule_basic", |
2661 |
| - "SliceWholeTensorModule_basic", |
2662 |
| - "TensorFloatModule_basic", |
2663 |
| - "TensorIntModule_basic", |
2664 |
| - "RepeatInterleaveSelfIntModule_basic", |
2665 |
| - "TorchPrimLoopForLikeTensorArgModule_basic", |
2666 |
| - "ViewSizeDimFollowedByCollapsedOnesModule_basic", |
2667 |
| - "ViewSizeDimFollowedByExpandedOnesModule_basic", |
2668 |
| - "ViewSizeDimLedAndFollowedByCollapsedOnesModule_basic", |
2669 |
| - "ViewSizeDimLedByCollapsedOnesModule_basic", |
2670 |
| - "ViewSizeFromOtherTensor_basic", |
2671 |
| - "RenormModuleFloat32NegativeDim_basic", |
2672 |
| - "RenormModuleFloat32_basic", |
2673 |
| - "RreluWithNoiseBackwardEvalModule_basic", |
2674 |
| - "RreluWithNoiseBackwardEvalStaticModule_basic", |
2675 |
| - "RreluWithNoiseBackwardTrainModule_basic", |
2676 |
| - "RreluWithNoiseBackwardTrainStaticModule_basic", |
2677 |
| - } |
2678 |
| -) - { |
2679 |
| - ### Test failing in make_fx_tosa but not in tosa |
2680 |
| - "ElementwiseRreluEvalStaticModule_basic", |
2681 |
| - "ElementwiseRreluTrainStaticModule_basic", |
2682 |
| - "AdaptiveMaxPool1dDimOneStatic_basic", |
2683 |
| - "FloatPowerTensorTensorStaticModule_basic", |
2684 |
| - # Dynamic shape, has extra unsupported broadcast ops |
2685 |
| - "Matmul_3d", |
2686 |
| - # Unimplemented operator 'aten._index_put_impl_.hacked_twin' |
2687 |
| - "IndexPutImpl1DFloatNonAccumulateModule_basic", |
2688 |
| - "IndexPutImpl1DIntNonAccumulateModule_basic", |
2689 |
| - # RuntimeError: The size of tensor a (7) must match the size of tensor b (3) at non-singleton dimension 1 |
2690 |
| - "Add_Module_basic", |
2691 |
| - # failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal |
2692 |
| - "AtenEyeModuleInt2D_basic", |
2693 |
| - "AtenEyeMModuleInt2D_basic", |
2694 |
| - "Conv2dBiasNoPaddingModule_basic", |
2695 |
| - "Conv2dNoPaddingModule_basic", |
2696 |
| - "Conv2dWithPaddingDilationStrideModule_basic", |
2697 |
| - "Conv2dWithPaddingModule_basic", |
2698 |
| - "Conv2dWithSamePaddingModule_basic", |
2699 |
| - "Conv2dWithValidPaddingModule_basic", |
2700 |
| - # failed to legalize operation 'torch.operator' |
2701 |
| - "ElementwisePreluModule_basic", |
2702 |
| - "ElementwisePreluStaticModule_basic", |
2703 |
| - "ElementwiseLogSigmoidModule_basic", |
2704 |
| - # failed to legalize operation 'torch.aten.rrelu_with_noise' |
2705 |
| - "ElementwiseRreluEvalModule_basic", |
2706 |
| - # incompatible return type failure for tosa.concat. |
2707 |
| - "HstackBasicComplexModule_basic", |
2708 |
| - "HstackBasicFloatModule_basic", |
2709 |
| - "HstackBasicIntFloatModule_basic", |
2710 |
| - "HstackBasicIntModule_basic", |
2711 |
| - # Shape Related failures |
2712 |
| - "PrimListUnpackNumMismatchModule_basic", |
2713 |
| - "ReshapeExpandModule_basic", |
2714 |
| - "UnsafeViewCollapseModule_basic", |
2715 |
| - "UnsafeViewDynamicExpandModule_basic", |
2716 |
| - "ViewCollapseModule_basic", |
2717 |
| - "ViewDynamicExpandCollapseModule_basic", |
2718 |
| - "ViewDynamicExpandModule_basic", |
2719 |
| - "ViewExpandDynamicDimModule_basic", |
2720 |
| - "ViewNoChange1dModule_basic", |
2721 |
| - "ViewNoChange2dModule_basic", |
2722 |
| - "ViewNoChange3dModule_basic", |
2723 |
| -} |
2724 |
| - |
2725 |
| -if torch_version_for_comparison() < version.parse("2.5.0.dev"): |
2726 |
| - MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET | { |
2727 |
| - "ScaledDotProductAttentionDifferentModule_basic", |
2728 |
| - "ScaledDotProductAttentionMaskModule_basic", |
2729 |
| - "ScaledDotProductAttentionSameModule_basic", |
2730 |
| - } |
2731 |
| - |
2732 | 2590 | LTC_CRASHING_SET = {
|
2733 | 2591 | # TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
|
2734 | 2592 | # Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.
|
|
0 commit comments