-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[TargetLowering][RISCV] Propagate fastmath flags for the vector operations emitted in expandVecReduce. #85164
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
…tions emitted in expandVecReduce.
@llvm/pr-subscribers-llvm-selectiondag Author: Craig Topper (topperc) ChangesPatch is 50.50 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/85164.diff 2 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index b3dc9de7137311..57f8fc409de453 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10694,7 +10694,7 @@ SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(Op, dl);
- Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi);
+ Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi, Node->getFlags());
VT = HalfVT;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 68740eec56e4c4..073b60b47343d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1619,15 +1619,10 @@ define float @vreduce_fminimum_v2f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %x
@@ -1670,24 +1665,14 @@ define float @vreduce_fminimum_v4f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %x
@@ -1739,33 +1724,18 @@ define float @vreduce_fminimum_v8f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
+; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v10
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x float>, ptr %x
@@ -1826,42 +1796,22 @@ define float @vreduce_fminimum_v16f32_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f32_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vle32.v v12, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
+; CHECK-NEXT: vslidedown.vi v12, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
+; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v10
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %x
@@ -1933,51 +1883,26 @@ define float @vreduce_fminimum_v32f32_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vle32.v v16, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
+; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
+; CHECK-NEXT: vslidedown.vi v12, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
+; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v10
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <32 x float>, ptr %x
@@ -2073,51 +1998,26 @@ define float @vreduce_fminimum_v64f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v16, (a0)
-; CHECK-NEXT: vfmin.vv v16, v8, v16
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
+; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
+; CHECK-NEXT: vslidedown.vi v12, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
+; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v10
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <64 x float>, ptr %x
@@ -2281,51 +2181,26 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) {
; CHECK-NEXT: vle32.v v0, (a1)
; CHECK-NEXT: vfmin.vv v16, v24, v16
; CHECK-NEXT: vfmin.vv v8, v8, v0
-; CHECK-NEXT: vfmin.vv v16, v8, v16
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 16
+; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 8
+; CHECK-NEXT: vslidedown.vi v12, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 4
+; CHECK-NEXT: vslidedown.vi v10, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v10
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v9, v11, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <128 x float>, ptr %x
@@ -2359,15 +2234,10 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v2f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v9, (a0)
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <2 x double>, ptr %x
@@ -2410,24 +2280,14 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v4f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v10, (a0)
+; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
+; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v10
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <4 x double>, ptr %x
@@ -2479,33 +2339,18 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v8f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a0)
+; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
+; CHECK-NEXT: vslidedown.vi v12, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
+; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v10
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v10, v10
-; CHECK-NEXT: vmfeq.vv v8, v9, v9
-; CHECK-NEXT: vmerge.vvm v11, v10, v9, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
-; CHECK-NEXT: vfmin.vv v8, v11, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <8 x double>, ptr %x
@@ -2566,42 +2411,22 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) {
; CHECK-LABEL: vreduce_fminimum_v16f64_nonans:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
+; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 8, e64, m8, ta, ma
-; CHECK-NEXT: vslidedown.vi v24, v16, 8
+; CHECK-NEXT: vslidedown.vi v16, v8, 8
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v8, v16, v16
-; CHECK-NEXT: vmerge.vvm v12, v24, v16, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vfmin.vv v12, v12, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v16
; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v16, v12, 4
+; CHECK-NEXT: vslidedown.vi v12, v8, 4
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v8, v12, v12
-; CHECK-NEXT: vmerge.vvm v10, v16, v12, v0
-; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v12, v16, v0
-; CHECK-NEXT: vfmin.vv v10, v10, v8
+; CHECK-NEXT: vfmin.vv v8, v8, v12
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v10, 2
+; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vmfeq.vv v0, v12, v12
-; CHECK-NEXT: vmfeq.vv v8, v10, v10
-; CHECK-NEXT: vmerge.vvm v9, v12, v10, v0
-; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v12, v0
-; CHECK-NEXT: vfmin.vv v9, v9, v8
-; CHECK-NEXT: vslidedown.vi v10, v9, 1
+; CHECK-NEXT: vfmin.vv v8, v8, v10
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma...
[truncated]
|
arsenm
approved these changes
Mar 14, 2024
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
We used the fastmath flags for any scalar ops created, but not vector.