Skip to content

[RISCV] Set the exact flag on the SRL created for converting vscale to a read of vlenb. #144571

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jun 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7360,20 +7360,25 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
uint64_t Val = Op.getConstantOperandVal(0);
if (isPowerOf2_64(Val)) {
uint64_t Log2 = Log2_64(Val);
if (Log2 < 3)
if (Log2 < 3) {
SDNodeFlags Flags;
Flags.setExact(true);
Res = DAG.getNode(ISD::SRL, DL, XLenVT, Res,
DAG.getConstant(3 - Log2, DL, VT));
else if (Log2 > 3)
DAG.getConstant(3 - Log2, DL, XLenVT), Flags);
} else if (Log2 > 3) {
Res = DAG.getNode(ISD::SHL, DL, XLenVT, Res,
DAG.getConstant(Log2 - 3, DL, XLenVT));
}
} else if ((Val % 8) == 0) {
// If the multiplier is a multiple of 8, scale it down to avoid needing
// to shift the VLENB value.
Res = DAG.getNode(ISD::MUL, DL, XLenVT, Res,
DAG.getConstant(Val / 8, DL, XLenVT));
} else {
SDNodeFlags Flags;
Flags.setExact(true);
SDValue VScale = DAG.getNode(ISD::SRL, DL, XLenVT, Res,
DAG.getConstant(3, DL, XLenVT));
DAG.getConstant(3, DL, XLenVT), Flags);
Res = DAG.getNode(ISD::MUL, DL, XLenVT, VScale,
DAG.getConstant(Val, DL, XLenVT));
}
Expand Down
12 changes: 5 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_6:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: srli a1, a0, 2
; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
Expand All @@ -314,8 +313,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_22:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: srli a1, a0, 2
; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v10, a0
Expand All @@ -341,9 +339,9 @@ define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
; CHECK-LABEL: extract_nxv4i8_nxv1i8_3:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: ret
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/get_vector_length.ll
Original file line number Diff line number Diff line change
Expand Up @@ -257,9 +257,9 @@ define i32 @vector_length_vf3_i32(i32 zeroext %tc) {
; RV32-LABEL: vector_length_vf3_i32:
; RV32: # %bb.0:
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: slli a2, a1, 1
; RV32-NEXT: add a1, a2, a1
; RV32-NEXT: srli a2, a1, 3
; RV32-NEXT: srli a1, a1, 2
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: bltu a0, a1, .LBB22_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
Expand All @@ -270,9 +270,9 @@ define i32 @vector_length_vf3_i32(i32 zeroext %tc) {
; RV64: # %bb.0:
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: srli a2, a1, 3
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: bltu a0, a1, .LBB22_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
Expand All @@ -286,9 +286,9 @@ define i32 @vector_length_vf3_XLen(iXLen zeroext %tc) {
; RV32-LABEL: vector_length_vf3_XLen:
; RV32: # %bb.0:
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: slli a2, a1, 1
; RV32-NEXT: add a1, a2, a1
; RV32-NEXT: srli a2, a1, 3
; RV32-NEXT: srli a1, a1, 2
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: bltu a0, a1, .LBB23_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
Expand All @@ -299,9 +299,9 @@ define i32 @vector_length_vf3_XLen(iXLen zeroext %tc) {
; RV64: # %bb.0:
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: slli a2, a1, 1
; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: srli a2, a1, 3
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: bltu a0, a1, .LBB23_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,12 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
; CHECK-LABEL: insert_nxv1i8_nxv4i8_3:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
ret <vscale x 4 x i8> %v
Expand Down Expand Up @@ -309,12 +309,12 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_3(<vscale x 16 x i8> %vec, <vsc
; CHECK-LABEL: insert_nxv16i8_nxv1i8_3:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
ret <vscale x 16 x i8> %v
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ define <vscale x 3 x i8> @load_nxv3i8(ptr %ptr) {
; CHECK-LABEL: load_nxv3i8:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 3
; CHECK-NEXT: slli a2, a1, 1
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: srli a1, a1, 2
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: ret
Expand All @@ -22,9 +22,9 @@ define <vscale x 5 x half> @load_nxv5f16(ptr %ptr) {
; CHECK-LABEL: load_nxv5f16:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 3
; CHECK-NEXT: slli a2, a1, 2
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: srli a1, a1, 1
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: ret
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ define void @store_nxv3i8(<vscale x 3 x i8> %val, ptr %ptr) {
; CHECK-LABEL: store_nxv3i8:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 3
; CHECK-NEXT: slli a2, a1, 1
; CHECK-NEXT: add a1, a2, a1
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: srli a1, a1, 2
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
Expand Down
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/stepvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -637,21 +637,21 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
; RV32-NEXT: lui a1, 797989
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: lui a3, 11557
; RV32-NEXT: lui a4, 92455
; RV32-NEXT: addi a1, a1, -683
; RV32-NEXT: addi a3, a3, -683
; RV32-NEXT: srli a4, a2, 2
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: srli a0, a2, 3
; RV32-NEXT: addi a1, a4, -1368
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: mulhu a1, a0, a1
; RV32-NEXT: slli a3, a0, 1
; RV32-NEXT: slli a0, a0, 6
; RV32-NEXT: sub a0, a0, a3
; RV32-NEXT: slli a0, a2, 3
; RV32-NEXT: sub a0, a0, a4
; RV32-NEXT: lui a1, 92455
; RV32-NEXT: addi a3, a3, -683
; RV32-NEXT: mul a3, a2, a3
; RV32-NEXT: srli a2, a2, 3
; RV32-NEXT: addi a1, a1, -1368
; RV32-NEXT: mulhu a1, a2, a1
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: sw a2, 0(sp)
; RV32-NEXT: sw a3, 0(sp)
; RV32-NEXT: sw a0, 4(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a1), zero
Expand Down
48 changes: 21 additions & 27 deletions llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2240,20 +2240,19 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-RV32: # %bb.0: # %entry
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: srli a3, a4, 3
; CHECK-RV32-NEXT: li a2, 64
; CHECK-RV32-NEXT: srli a2, a4, 3
; CHECK-RV32-NEXT: li a3, 64
; CHECK-RV32-NEXT: not a1, a1
; CHECK-RV32-NEXT: bgeu a2, a3, .LBB98_2
; CHECK-RV32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: li a3, 0
; CHECK-RV32-NEXT: li a2, 0
; CHECK-RV32-NEXT: j .LBB98_5
; CHECK-RV32-NEXT: .LBB98_2: # %vector.ph
; CHECK-RV32-NEXT: li a2, 0
; CHECK-RV32-NEXT: slli a3, a3, 2
; CHECK-RV32-NEXT: neg a3, a3
; CHECK-RV32-NEXT: andi a3, a3, 256
; CHECK-RV32-NEXT: srli a4, a4, 1
; CHECK-RV32-NEXT: neg a3, a4
; CHECK-RV32-NEXT: andi a3, a3, 256
; CHECK-RV32-NEXT: li a6, 0
; CHECK-RV32-NEXT: li a5, 0
; CHECK-RV32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
Expand Down Expand Up @@ -2300,10 +2299,9 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV64-NEXT: li a2, 0
; CHECK-RV64-NEXT: j .LBB98_5
; CHECK-RV64-NEXT: .LBB98_2: # %vector.ph
; CHECK-RV64-NEXT: slli a2, a2, 2
; CHECK-RV64-NEXT: negw a2, a2
; CHECK-RV64-NEXT: andi a2, a2, 256
; CHECK-RV64-NEXT: srli a3, a4, 1
; CHECK-RV64-NEXT: negw a2, a3
; CHECK-RV64-NEXT: andi a2, a2, 256
; CHECK-RV64-NEXT: slli a4, a4, 1
; CHECK-RV64-NEXT: mv a5, a0
; CHECK-RV64-NEXT: mv a6, a2
Expand Down Expand Up @@ -2335,19 +2333,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-NOZBB32: # %bb.0: # %entry
; CHECK-ZVKB-NOZBB32-NEXT: csrr a4, vlenb
; CHECK-ZVKB-NOZBB32-NEXT: srli a3, a4, 3
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 64
; CHECK-ZVKB-NOZBB32-NEXT: bgeu a2, a3, .LBB98_2
; CHECK-ZVKB-NOZBB32-NEXT: srli a2, a4, 3
; CHECK-ZVKB-NOZBB32-NEXT: li a3, 64
; CHECK-ZVKB-NOZBB32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.1:
; CHECK-ZVKB-NOZBB32-NEXT: li a3, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB32-NEXT: slli a3, a3, 2
; CHECK-ZVKB-NOZBB32-NEXT: neg a3, a3
; CHECK-ZVKB-NOZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-NOZBB32-NEXT: srli a4, a4, 1
; CHECK-ZVKB-NOZBB32-NEXT: neg a3, a4
; CHECK-ZVKB-NOZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-NOZBB32-NEXT: li a6, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a5, 0
; CHECK-ZVKB-NOZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
Expand Down Expand Up @@ -2395,10 +2392,9 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB64-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB64-NEXT: j .LBB98_5
; CHECK-ZVKB-NOZBB64-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-NOZBB64-NEXT: slli a2, a2, 2
; CHECK-ZVKB-NOZBB64-NEXT: negw a2, a2
; CHECK-ZVKB-NOZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-NOZBB64-NEXT: srli a3, a4, 1
; CHECK-ZVKB-NOZBB64-NEXT: negw a2, a3
; CHECK-ZVKB-NOZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-NOZBB64-NEXT: slli a4, a4, 1
; CHECK-ZVKB-NOZBB64-NEXT: mv a5, a0
; CHECK-ZVKB-NOZBB64-NEXT: mv a6, a2
Expand Down Expand Up @@ -2431,19 +2427,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-ZBB32: # %bb.0: # %entry
; CHECK-ZVKB-ZBB32-NEXT: csrr a4, vlenb
; CHECK-ZVKB-ZBB32-NEXT: srli a3, a4, 3
; CHECK-ZVKB-ZBB32-NEXT: li a2, 64
; CHECK-ZVKB-ZBB32-NEXT: bgeu a2, a3, .LBB98_2
; CHECK-ZVKB-ZBB32-NEXT: srli a2, a4, 3
; CHECK-ZVKB-ZBB32-NEXT: li a3, 64
; CHECK-ZVKB-ZBB32-NEXT: bgeu a3, a2, .LBB98_2
; CHECK-ZVKB-ZBB32-NEXT: # %bb.1:
; CHECK-ZVKB-ZBB32-NEXT: li a3, 0
; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
; CHECK-ZVKB-ZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
; CHECK-ZVKB-ZBB32-NEXT: slli a3, a3, 2
; CHECK-ZVKB-ZBB32-NEXT: neg a3, a3
; CHECK-ZVKB-ZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-ZBB32-NEXT: srli a4, a4, 1
; CHECK-ZVKB-ZBB32-NEXT: neg a3, a4
; CHECK-ZVKB-ZBB32-NEXT: andi a3, a3, 256
; CHECK-ZVKB-ZBB32-NEXT: li a6, 0
; CHECK-ZVKB-ZBB32-NEXT: li a5, 0
; CHECK-ZVKB-ZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
Expand Down Expand Up @@ -2489,10 +2484,9 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB64-NEXT: li a2, 0
; CHECK-ZVKB-ZBB64-NEXT: j .LBB98_5
; CHECK-ZVKB-ZBB64-NEXT: .LBB98_2: # %vector.ph
; CHECK-ZVKB-ZBB64-NEXT: slli a2, a2, 2
; CHECK-ZVKB-ZBB64-NEXT: negw a2, a2
; CHECK-ZVKB-ZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-ZBB64-NEXT: srli a3, a4, 1
; CHECK-ZVKB-ZBB64-NEXT: negw a2, a3
; CHECK-ZVKB-ZBB64-NEXT: andi a2, a2, 256
; CHECK-ZVKB-ZBB64-NEXT: slli a4, a4, 1
; CHECK-ZVKB-ZBB64-NEXT: mv a5, a0
; CHECK-ZVKB-ZBB64-NEXT: mv a6, a2
Expand Down
22 changes: 9 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -338,16 +338,14 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
; CHECK-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v10, v9, a1
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: slli a3, a1, 1
; CHECK-NEXT: add a3, a0, a0
; CHECK-NEXT: add a1, a4, a1
; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v10, v11, a2
; CHECK-NEXT: vslideup.vx v8, v13, a2
; CHECK-NEXT: add a2, a0, a0
; CHECK-NEXT: add a3, a3, a1
; CHECK-NEXT: add a1, a3, a1
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v14, a3
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v14, a4
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs1r.v v8, (a0)
Expand Down Expand Up @@ -381,20 +379,18 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: add a3, a1, a1
; CHECK-NEXT: add a4, a2, a1
; CHECK-NEXT: slli a5, a1, 1
; CHECK-NEXT: add a6, a0, a0
; CHECK-NEXT: add a5, a0, a0
; CHECK-NEXT: vsetvli zero, a3, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v10, v9, a1
; CHECK-NEXT: add a5, a5, a1
; CHECK-NEXT: vslideup.vx v8, v13, a1
; CHECK-NEXT: add a1, a4, a1
; CHECK-NEXT: vsetvli zero, a4, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v10, v11, a2
; CHECK-NEXT: add a1, a5, a1
; CHECK-NEXT: vslideup.vx v8, v14, a2
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a5
; CHECK-NEXT: vslideup.vx v8, v15, a5
; CHECK-NEXT: vsetvli zero, a6, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v10, v12, a4
; CHECK-NEXT: vslideup.vx v8, v15, a4
; CHECK-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs1r.v v8, (a0)
Expand Down
Loading