Skip to content

Commit c77d202

Browse files
committed
Revert "[RISCV] Decompose single source shuffles (without exact VLEN) (#126108)"
This reverts commit 8374d42. A miscompile was reported against the review thread, reverting while we investigate.
1 parent 5836d91 commit c77d202

File tree

5 files changed

+61
-180
lines changed

5 files changed

+61
-180
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

-40
Original file line numberDiff line numberDiff line change
@@ -5354,12 +5354,6 @@ static bool isLocalRepeatingShuffle(ArrayRef<int> Mask, int Span) {
53545354
return true;
53555355
}
53565356

5357-
/// Is this mask only using elements from the first span of the input?
5358-
static bool isLowSourceShuffle(ArrayRef<int> Mask, int Span) {
5359-
return all_of(Mask,
5360-
[&](const auto &Idx) { return Idx == -1 || Idx < Span; });
5361-
}
5362-
53635357
/// Try to widen element type to get a new mask value for a better permutation
53645358
/// sequence. This doesn't try to inspect the widened mask for profitability;
53655359
/// we speculate the widened form is equal or better. This has the effect of
@@ -5775,40 +5769,6 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
57755769
Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather,
57765770
SubVec, SubIdx);
57775771
}
5778-
} else if (NumElts > MinVLMAX && isLowSourceShuffle(Mask, MinVLMAX)) {
5779-
// If we have a shuffle which only uses the first register in our
5780-
// source register group, we can do a linear number of m1 vrgathers
5781-
// reusing the same source register (but with different indices)
5782-
// TODO: This can be generalized for m2 or m4, or for any shuffle
5783-
// for which we can do a vslidedown followed by this expansion.
5784-
const MVT M1VT = getLMUL1VT(ContainerVT);
5785-
EVT SubIndexVT = M1VT.changeVectorElementType(IndexVT.getScalarType());
5786-
auto [InnerTrueMask, InnerVL] =
5787-
getDefaultScalableVLOps(M1VT, DL, DAG, Subtarget);
5788-
int N = ContainerVT.getVectorMinNumElements() /
5789-
M1VT.getVectorMinNumElements();
5790-
assert(isPowerOf2_32(N) && N <= 8);
5791-
Gather = DAG.getUNDEF(ContainerVT);
5792-
SDValue SlideAmt =
5793-
DAG.getElementCount(DL, XLenVT, M1VT.getVectorElementCount());
5794-
for (int i = 0; i < N; i++) {
5795-
if (i != 0)
5796-
LHSIndices = getVSlidedown(DAG, Subtarget, DL, IndexContainerVT,
5797-
DAG.getUNDEF(IndexContainerVT), LHSIndices,
5798-
SlideAmt, TrueMask, VL);
5799-
SDValue SubIdx =
5800-
DAG.getVectorIdxConstant(M1VT.getVectorMinNumElements() * i, DL);
5801-
SDValue SubV1 =
5802-
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, V1, SubIdx);
5803-
SDValue SubIndex =
5804-
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubIndexVT, LHSIndices,
5805-
DAG.getVectorIdxConstant(0, DL));
5806-
SDValue SubVec =
5807-
DAG.getNode(GatherVVOpc, DL, M1VT, SubV1, SubIndex,
5808-
DAG.getUNDEF(M1VT), InnerTrueMask, InnerVL);
5809-
Gather = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, Gather,
5810-
SubVec, SubIdx);
5811-
}
58125772
} else {
58135773
Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
58145774
DAG.getUNDEF(ContainerVT), TrueMask, VL);

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll

+7-12
Original file line numberDiff line numberDiff line change
@@ -38,20 +38,15 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) {
3838
define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
3939
; V128-LABEL: interleave_v2f64:
4040
; V128: # %bb.0:
41-
; V128-NEXT: csrr a0, vlenb
4241
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
43-
; V128-NEXT: vid.v v10
44-
; V128-NEXT: srli a0, a0, 3
45-
; V128-NEXT: vsrl.vi v10, v10, 1
46-
; V128-NEXT: vslidedown.vx v12, v10, a0
47-
; V128-NEXT: vsetvli a0, zero, e64, m1, ta, ma
48-
; V128-NEXT: vrgatherei16.vv v13, v11, v12
49-
; V128-NEXT: vrgatherei16.vv v12, v9, v10
42+
; V128-NEXT: vmv1r.v v12, v9
43+
; V128-NEXT: vid.v v9
5044
; V128-NEXT: vmv.v.i v0, 10
51-
; V128-NEXT: vrgatherei16.vv v14, v8, v10
52-
; V128-NEXT: vmv.v.v v15, v13
53-
; V128-NEXT: vsetivli zero, 4, e64, m2, ta, ma
54-
; V128-NEXT: vmerge.vvm v8, v14, v12, v0
45+
; V128-NEXT: vsrl.vi v14, v9, 1
46+
; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
47+
; V128-NEXT: vrgatherei16.vv v10, v8, v14
48+
; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
49+
; V128-NEXT: vmv.v.v v8, v10
5550
; V128-NEXT: ret
5651
;
5752
; RV32-V512-LABEL: interleave_v2f64:

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll

+7-12
Original file line numberDiff line numberDiff line change
@@ -51,20 +51,15 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) {
5151
define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
5252
; V128-LABEL: interleave_v2i64:
5353
; V128: # %bb.0:
54-
; V128-NEXT: csrr a0, vlenb
5554
; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
56-
; V128-NEXT: vid.v v10
57-
; V128-NEXT: srli a0, a0, 3
58-
; V128-NEXT: vsrl.vi v10, v10, 1
59-
; V128-NEXT: vslidedown.vx v12, v10, a0
60-
; V128-NEXT: vsetvli a0, zero, e64, m1, ta, ma
61-
; V128-NEXT: vrgatherei16.vv v13, v11, v12
62-
; V128-NEXT: vrgatherei16.vv v12, v9, v10
55+
; V128-NEXT: vmv1r.v v12, v9
56+
; V128-NEXT: vid.v v9
6357
; V128-NEXT: vmv.v.i v0, 10
64-
; V128-NEXT: vrgatherei16.vv v14, v8, v10
65-
; V128-NEXT: vmv.v.v v15, v13
66-
; V128-NEXT: vsetivli zero, 4, e64, m2, ta, ma
67-
; V128-NEXT: vmerge.vvm v8, v14, v12, v0
58+
; V128-NEXT: vsrl.vi v14, v9, 1
59+
; V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
60+
; V128-NEXT: vrgatherei16.vv v10, v8, v14
61+
; V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
62+
; V128-NEXT: vmv.v.v v8, v10
6863
; V128-NEXT: ret
6964
;
7065
; RV32-V512-LABEL: interleave_v2i64:

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll

+13-45
Original file line numberDiff line numberDiff line change
@@ -817,17 +817,13 @@ define <8 x i32> @shuffle_spread2_singlesrc_e32_index1(<8 x i32> %v) {
817817
define <8 x i32> @shuffle_spread2_singlesrc_e32_index2(<8 x i32> %v) {
818818
; CHECK-LABEL: shuffle_spread2_singlesrc_e32_index2:
819819
; CHECK: # %bb.0:
820-
; CHECK-NEXT: csrr a0, vlenb
821820
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
822821
; CHECK-NEXT: vid.v v10
823-
; CHECK-NEXT: srli a0, a0, 2
824822
; CHECK-NEXT: vsrl.vi v10, v10, 1
825823
; CHECK-NEXT: vadd.vi v12, v10, -1
826-
; CHECK-NEXT: vslidedown.vx v10, v12, a0
827-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
828-
; CHECK-NEXT: vrgatherei16.vv v11, v9, v10
824+
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
829825
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
830-
; CHECK-NEXT: vmv2r.v v8, v10
826+
; CHECK-NEXT: vmv.v.v v8, v10
831827
; CHECK-NEXT: ret
832828
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 undef, i32 undef, i32 0, i32 undef, i32 1, i32 undef, i32 2, i32 undef>
833829
ret <8 x i32> %out
@@ -840,13 +836,9 @@ define <8 x i32> @shuffle_spread3_singlesrc_e32(<8 x i32> %v) {
840836
; CHECK-NEXT: vmv.v.i v10, 0
841837
; CHECK-NEXT: li a0, 1
842838
; CHECK-NEXT: vslide1down.vx v12, v10, a0
843-
; CHECK-NEXT: csrr a0, vlenb
844-
; CHECK-NEXT: srli a0, a0, 3
845-
; CHECK-NEXT: vslidedown.vx v10, v12, a0
846-
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
847-
; CHECK-NEXT: vrgatherei16.vv v11, v9, v10
839+
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
848840
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
849-
; CHECK-NEXT: vmv2r.v v8, v10
841+
; CHECK-NEXT: vmv.v.v v8, v10
850842
; CHECK-NEXT: ret
851843
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 2, i32 undef>
852844
ret <8 x i32> %out
@@ -856,16 +848,12 @@ define <8 x i32> @shuffle_spread3_singlesrc_e32(<8 x i32> %v) {
856848
define <8 x i32> @shuffle_spread4_singlesrc_e32(<8 x i32> %v) {
857849
; CHECK-LABEL: shuffle_spread4_singlesrc_e32:
858850
; CHECK: # %bb.0:
859-
; CHECK-NEXT: csrr a0, vlenb
860851
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
861852
; CHECK-NEXT: vid.v v10
862-
; CHECK-NEXT: srli a0, a0, 2
863853
; CHECK-NEXT: vsrl.vi v12, v10, 2
864-
; CHECK-NEXT: vslidedown.vx v10, v12, a0
865-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
866-
; CHECK-NEXT: vrgatherei16.vv v11, v9, v10
854+
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
867855
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
868-
; CHECK-NEXT: vmv2r.v v8, v10
856+
; CHECK-NEXT: vmv.v.v v8, v10
869857
; CHECK-NEXT: ret
870858
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 1, i32 undef, i32 undef, i32 undef>
871859
ret <8 x i32> %out
@@ -992,16 +980,12 @@ define <8 x i32> @shuffle_repeat3_singlesrc_e32(<8 x i32> %v) {
992980
; CHECK-NEXT: vmv.v.i v11, 1
993981
; CHECK-NEXT: li a0, 192
994982
; CHECK-NEXT: vmv.s.x v10, a0
995-
; CHECK-NEXT: csrr a0, vlenb
996983
; CHECK-NEXT: vmerge.vim v11, v11, 0, v0
997984
; CHECK-NEXT: vmv.v.v v0, v10
998985
; CHECK-NEXT: vmerge.vim v12, v11, 2, v0
999-
; CHECK-NEXT: srli a0, a0, 2
1000-
; CHECK-NEXT: vslidedown.vx v10, v12, a0
1001-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1002-
; CHECK-NEXT: vrgatherei16.vv v11, v9, v10
986+
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
1003987
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
1004-
; CHECK-NEXT: vmv2r.v v8, v10
988+
; CHECK-NEXT: vmv.v.v v8, v10
1005989
; CHECK-NEXT: ret
1006990
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 2, i32 2>
1007991
ret <8 x i32> %out
@@ -1010,16 +994,12 @@ define <8 x i32> @shuffle_repeat3_singlesrc_e32(<8 x i32> %v) {
1010994
define <8 x i32> @shuffle_repeat4_singlesrc_e32(<8 x i32> %v) {
1011995
; CHECK-LABEL: shuffle_repeat4_singlesrc_e32:
1012996
; CHECK: # %bb.0:
1013-
; CHECK-NEXT: csrr a0, vlenb
1014997
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1015998
; CHECK-NEXT: vid.v v10
1016-
; CHECK-NEXT: srli a0, a0, 2
1017999
; CHECK-NEXT: vsrl.vi v12, v10, 2
1018-
; CHECK-NEXT: vslidedown.vx v10, v12, a0
1019-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
1020-
; CHECK-NEXT: vrgatherei16.vv v11, v9, v10
1000+
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
10211001
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
1022-
; CHECK-NEXT: vmv2r.v v8, v10
1002+
; CHECK-NEXT: vmv.v.v v8, v10
10231003
; CHECK-NEXT: ret
10241004
%out = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1>
10251005
ret <8 x i32> %out
@@ -1311,23 +1291,11 @@ define void @shuffle_i128_splat(ptr %p) nounwind {
13111291
; CHECK: # %bb.0:
13121292
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
13131293
; CHECK-NEXT: vle64.v v8, (a0)
1314-
; CHECK-NEXT: csrr a1, vlenb
1315-
; CHECK-NEXT: lui a2, 16
1316-
; CHECK-NEXT: srli a1, a1, 3
1294+
; CHECK-NEXT: lui a1, 16
13171295
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1318-
; CHECK-NEXT: vmv.v.x v12, a2
1319-
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1320-
; CHECK-NEXT: vslidedown.vx v13, v12, a1
1321-
; CHECK-NEXT: vslidedown.vx v14, v13, a1
1322-
; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
1323-
; CHECK-NEXT: vrgatherei16.vv v17, v9, v13
1324-
; CHECK-NEXT: vrgatherei16.vv v16, v8, v12
1325-
; CHECK-NEXT: vrgatherei16.vv v18, v10, v14
1326-
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
1327-
; CHECK-NEXT: vslidedown.vx v8, v14, a1
1328-
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1329-
; CHECK-NEXT: vrgatherei16.vv v19, v11, v8
1296+
; CHECK-NEXT: vmv.v.x v12, a1
13301297
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1298+
; CHECK-NEXT: vrgatherei16.vv v16, v8, v12
13311299
; CHECK-NEXT: vse64.v v16, (a0)
13321300
; CHECK-NEXT: ret
13331301
%a = load <4 x i128>, ptr %p

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll

+34-71
Original file line numberDiff line numberDiff line change
@@ -237,15 +237,10 @@ define <8 x i32> @v8i32_v4i32(<4 x i32>) {
237237
; CHECK: # %bb.0:
238238
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
239239
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
240-
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
241-
; CHECK-NEXT: vle16.v v9, (a0)
242-
; CHECK-NEXT: csrr a0, vlenb
243-
; CHECK-NEXT: srli a0, a0, 2
244-
; CHECK-NEXT: vslidedown.vx v10, v9, a0
245-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
246-
; CHECK-NEXT: vrgatherei16.vv v11, v12, v10
247-
; CHECK-NEXT: vrgatherei16.vv v10, v8, v9
248-
; CHECK-NEXT: vmv2r.v v8, v10
240+
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
241+
; CHECK-NEXT: vle16.v v12, (a0)
242+
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12
243+
; CHECK-NEXT: vmv.v.v v8, v10
249244
; CHECK-NEXT: ret
250245
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3>
251246
ret <8 x i32> %2
@@ -254,38 +249,30 @@ define <8 x i32> @v8i32_v4i32(<4 x i32>) {
254249
define <16 x i32> @v16i32_v4i32(<4 x i32>) {
255250
; CHECK-LABEL: v16i32_v4i32:
256251
; CHECK: # %bb.0:
257-
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
258-
; CHECK-NEXT: vmv1r.v v10, v8
259252
; CHECK-NEXT: lui a0, 2
260-
; CHECK-NEXT: vmv.v.i v11, 3
253+
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
254+
; CHECK-NEXT: vmv.v.i v9, 3
261255
; CHECK-NEXT: addi a1, a0, 265
262256
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
263257
; CHECK-NEXT: vmv.s.x v0, a1
264258
; CHECK-NEXT: lui a1, 4
265259
; CHECK-NEXT: addi a1, a1, 548
266-
; CHECK-NEXT: vmv.s.x v8, a1
267-
; CHECK-NEXT: csrr a1, vlenb
260+
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
261+
; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
262+
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
263+
; CHECK-NEXT: vmv.s.x v0, a1
268264
; CHECK-NEXT: addi a0, a0, -1856
269-
; CHECK-NEXT: srli a1, a1, 2
270-
; CHECK-NEXT: vmv.s.x v9, a0
271265
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
272-
; CHECK-NEXT: vmerge.vim v11, v11, 2, v0
273-
; CHECK-NEXT: vmv1r.v v0, v8
274-
; CHECK-NEXT: vmerge.vim v8, v11, 0, v0
275-
; CHECK-NEXT: vmv1r.v v0, v9
276-
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
266+
; CHECK-NEXT: vmerge.vim v9, v9, 0, v0
277267
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
278-
; CHECK-NEXT: vsext.vf2 v14, v8
279-
; CHECK-NEXT: vslidedown.vx v16, v14, a1
280-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
281-
; CHECK-NEXT: vrgatherei16.vv v9, v12, v16
282-
; CHECK-NEXT: vrgatherei16.vv v8, v10, v14
283-
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
284-
; CHECK-NEXT: vslidedown.vx v12, v16, a1
285-
; CHECK-NEXT: vslidedown.vx v14, v12, a1
286-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
287-
; CHECK-NEXT: vrgatherei16.vv v10, v11, v12
288-
; CHECK-NEXT: vrgatherei16.vv v11, v12, v14
268+
; CHECK-NEXT: vmv.s.x v0, a0
269+
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
270+
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
271+
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
272+
; CHECK-NEXT: vsext.vf2 v16, v9
273+
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
274+
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
275+
; CHECK-NEXT: vmv.v.v v8, v12
289276
; CHECK-NEXT: ret
290277
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
291278
ret <16 x i32> %2
@@ -294,55 +281,31 @@ define <16 x i32> @v16i32_v4i32(<4 x i32>) {
294281
define <32 x i32> @v32i32_v4i32(<4 x i32>) {
295282
; CHECK-LABEL: v32i32_v4i32:
296283
; CHECK: # %bb.0:
297-
; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma
298-
; CHECK-NEXT: vmv1r.v v10, v8
299284
; CHECK-NEXT: li a0, 32
300285
; CHECK-NEXT: lui a1, 135432
301286
; CHECK-NEXT: addi a1, a1, 1161
287+
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
302288
; CHECK-NEXT: vmv.s.x v0, a1
303289
; CHECK-NEXT: lui a1, 270865
304290
; CHECK-NEXT: addi a1, a1, 548
305-
; CHECK-NEXT: vmv.s.x v8, a1
306-
; CHECK-NEXT: lui a1, 100550
307-
; CHECK-NEXT: addi a1, a1, 64
308291
; CHECK-NEXT: vmv.s.x v9, a1
309-
; CHECK-NEXT: csrr a1, vlenb
292+
; CHECK-NEXT: lui a1, 100550
310293
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
311-
; CHECK-NEXT: vmv.v.i v12, 3
312-
; CHECK-NEXT: srli a1, a1, 2
313-
; CHECK-NEXT: vmerge.vim v12, v12, 2, v0
314-
; CHECK-NEXT: vmv1r.v v0, v8
315-
; CHECK-NEXT: vmerge.vim v12, v12, 0, v0
294+
; CHECK-NEXT: vmv.v.i v10, 3
295+
; CHECK-NEXT: addi a0, a1, 64
296+
; CHECK-NEXT: vmerge.vim v18, v10, 2, v0
297+
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
298+
; CHECK-NEXT: vmv.s.x v16, a0
316299
; CHECK-NEXT: vmv1r.v v0, v9
317-
; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
300+
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
301+
; CHECK-NEXT: vmerge.vim v18, v18, 0, v0
302+
; CHECK-NEXT: vmv1r.v v0, v16
303+
; CHECK-NEXT: vmerge.vim v16, v18, 1, v0
318304
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
319-
; CHECK-NEXT: vsext.vf2 v16, v8
320-
; CHECK-NEXT: vslidedown.vx v12, v16, a1
321-
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
322-
; CHECK-NEXT: vrgatherei16.vv v9, v11, v12
323-
; CHECK-NEXT: vrgatherei16.vv v8, v10, v16
324-
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
325-
; CHECK-NEXT: vslidedown.vx v12, v12, a1
326-
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
327-
; CHECK-NEXT: vrgatherei16.vv v10, v11, v12
328-
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
329-
; CHECK-NEXT: vslidedown.vx v12, v12, a1
330-
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
331-
; CHECK-NEXT: vrgatherei16.vv v11, v16, v12
332-
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
333-
; CHECK-NEXT: vslidedown.vx v20, v12, a1
334-
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
335-
; CHECK-NEXT: vrgatherei16.vv v12, v17, v20
336-
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
337-
; CHECK-NEXT: vslidedown.vx v20, v20, a1
338-
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
339-
; CHECK-NEXT: vrgatherei16.vv v13, v18, v20
340-
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
341-
; CHECK-NEXT: vslidedown.vx v20, v20, a1
342-
; CHECK-NEXT: vslidedown.vx v24, v20, a1
343-
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
344-
; CHECK-NEXT: vrgatherei16.vv v14, v19, v20
345-
; CHECK-NEXT: vrgatherei16.vv v15, v16, v24
305+
; CHECK-NEXT: vsext.vf2 v24, v16
306+
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
307+
; CHECK-NEXT: vrgatherei16.vv v16, v8, v24
308+
; CHECK-NEXT: vmv.v.v v8, v16
346309
; CHECK-NEXT: ret
347310
%2 = shufflevector <4 x i32> %0, <4 x i32> poison, <32 x i32> <i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 2, i32 3, i32 0, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3, i32 1, i32 2, i32 0, i32 3, i32 1, i32 1, i32 2, i32 0, i32 3>
348311
ret <32 x i32> %2

0 commit comments

Comments
 (0)