Skip to content

[RISCV] Call SimplifyDemandedBits on the scalar input of vmv_s_x_vl #131711

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19183,6 +19183,14 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Scalar = N->getOperand(1);
SDValue VL = N->getOperand(2);

// The vmv.s.x instruction copies the scalar integer register to element 0
// of the destination vector register. If SEW < XLEN, the least-significant
// bits are copied and the upper XLEN-SEW bits are ignored.
unsigned ScalarSize = Scalar.getValueSizeInBits();
unsigned EltWidth = VT.getScalarSizeInBits();
if (ScalarSize > EltWidth && SimplifyDemandedLowBitsHelper(1, EltWidth))
return SDValue(N, 0);

if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
Scalar.getOperand(0).getValueType() == N->getValueType(0))
return Scalar.getOperand(0);
Expand Down
121 changes: 32 additions & 89 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ declare i8 @llvm.vp.reduce.umax.v2i8(i8, <2 x i8>, <2 x i1>, i32)
define signext i8 @vpreduce_umax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 255
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
Expand Down Expand Up @@ -55,7 +54,6 @@ declare i8 @llvm.vp.reduce.umin.v2i8(i8, <2 x i8>, <2 x i1>, i32)
define signext i8 @vpreduce_umin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umin_v2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 255
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
Expand Down Expand Up @@ -131,7 +129,6 @@ declare i8 @llvm.vp.reduce.umin.v3i8(i8, <3 x i8>, <3 x i1>, i32)
define signext i8 @vpreduce_umin_v3i8(i8 signext %s, <3 x i8> %v, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umin_v3i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 255
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
Expand Down Expand Up @@ -162,7 +159,6 @@ declare i8 @llvm.vp.reduce.umax.v4i8(i8, <4 x i8>, <4 x i1>, i32)
define signext i8 @vpreduce_umax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umax_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 255
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
Expand Down Expand Up @@ -193,7 +189,6 @@ declare i8 @llvm.vp.reduce.umin.v4i8(i8, <4 x i8>, <4 x i1>, i32)
define signext i8 @vpreduce_umin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_umin_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 255
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
Expand Down Expand Up @@ -282,27 +277,14 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m
declare i16 @llvm.vp.reduce.umax.v2i16(i16, <2 x i16>, <2 x i1>, i32)

define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpreduce_umax_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpreduce_umax_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vmv.s.x v9, a0
; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: ret
; CHECK-LABEL: vpreduce_umax_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; CHECK-NEXT: vmv.x.s a0, v9
; CHECK-NEXT: ret
%r = call i16 @llvm.vp.reduce.umax.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl)
ret i16 %r
}
Expand All @@ -325,27 +307,14 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %
declare i16 @llvm.vp.reduce.umin.v2i16(i16, <2 x i16>, <2 x i1>, i32)

define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpreduce_umin_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t
; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpreduce_umin_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vmv.s.x v9, a0
; RV64-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t
; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: ret
; CHECK-LABEL: vpreduce_umin_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t
; CHECK-NEXT: vmv.x.s a0, v9
; CHECK-NEXT: ret
%r = call i16 @llvm.vp.reduce.umin.v2i16(i16 %s, <2 x i16> %v, <2 x i1> %m, i32 %evl)
ret i16 %r
}
Expand Down Expand Up @@ -428,27 +397,14 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m
declare i16 @llvm.vp.reduce.umax.v4i16(i16, <4 x i16>, <4 x i1>, i32)

define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpreduce_umax_v4i16:
; RV32: # %bb.0:
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpreduce_umax_v4i16:
; RV64: # %bb.0:
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vmv.s.x v9, a0
; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: ret
; CHECK-LABEL: vpreduce_umax_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t
; CHECK-NEXT: vmv.x.s a0, v9
; CHECK-NEXT: ret
%r = call i16 @llvm.vp.reduce.umax.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl)
ret i16 %r
}
Expand All @@ -471,27 +427,14 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %
declare i16 @llvm.vp.reduce.umin.v4i16(i16, <4 x i16>, <4 x i1>, i32)

define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpreduce_umin_v4i16:
; RV32: # %bb.0:
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: srli a0, a0, 16
; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t
; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpreduce_umin_v4i16:
; RV64: # %bb.0:
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: srli a0, a0, 48
; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vmv.s.x v9, a0
; RV64-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t
; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: ret
; CHECK-LABEL: vpreduce_umin_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t
; CHECK-NEXT: vmv.x.s a0, v9
; CHECK-NEXT: ret
%r = call i16 @llvm.vp.reduce.umin.v4i16(i16 %s, <4 x i16> %v, <4 x i1> %m, i32 %evl)
ret i16 %r
}
Expand Down
Loading
Loading