-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV][VLOPT] Add vector mask producing integer instructions to isSupportedInstr and getOperandInfo #119733
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Michael Maitland (michaelmaitland) ChangesPatch is 23.36 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/119733.diff 3 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 1d5684d6038ea9..e3b21ec05171e8 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -521,6 +521,32 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
}
+ // Vector Integer Compare Instructions
+ // Dest EEW=1 and EMUL=(EEW/SEW)*LMUL. Source EEW=SEW and EMUL=LMUL.
+ case RISCV::VMSEQ_VI:
+ case RISCV::VMSEQ_VV:
+ case RISCV::VMSEQ_VX:
+ case RISCV::VMSNE_VI:
+ case RISCV::VMSNE_VV:
+ case RISCV::VMSNE_VX:
+ case RISCV::VMSLTU_VV:
+ case RISCV::VMSLTU_VX:
+ case RISCV::VMSLT_VV:
+ case RISCV::VMSLT_VX:
+ case RISCV::VMSLEU_VV:
+ case RISCV::VMSLEU_VI:
+ case RISCV::VMSLEU_VX:
+ case RISCV::VMSLE_VV:
+ case RISCV::VMSLE_VI:
+ case RISCV::VMSLE_VX:
+ case RISCV::VMSGTU_VI:
+ case RISCV::VMSGTU_VX:
+ case RISCV::VMSGT_VI:
+ case RISCV::VMSGT_VX:
+ if (IsMODef)
+ return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
+ return OperandInfo(MIVLMul, MILog2SEW);
+
default:
return {};
}
@@ -599,7 +625,26 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VNSRA_WV:
case RISCV::VNSRA_WX:
// Vector Integer Compare Instructions
- // FIXME: Add support
+ case RISCV::VMSEQ_VI:
+ case RISCV::VMSEQ_VV:
+ case RISCV::VMSEQ_VX:
+ case RISCV::VMSNE_VI:
+ case RISCV::VMSNE_VV:
+ case RISCV::VMSNE_VX:
+ case RISCV::VMSLTU_VV:
+ case RISCV::VMSLTU_VX:
+ case RISCV::VMSLT_VV:
+ case RISCV::VMSLT_VX:
+ case RISCV::VMSLEU_VV:
+ case RISCV::VMSLEU_VI:
+ case RISCV::VMSLEU_VX:
+ case RISCV::VMSLE_VV:
+ case RISCV::VMSLE_VI:
+ case RISCV::VMSLE_VX:
+ case RISCV::VMSGTU_VI:
+ case RISCV::VMSGTU_VX:
+ case RISCV::VMSGT_VI:
+ case RISCV::VMSGT_VX:
// Vector Integer Min/Max Instructions
case RISCV::VMINU_VV:
case RISCV::VMINU_VX:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index a21e3df85193fb..eda78733429024 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1082,6 +1082,425 @@ define <vscale x 4 x i16> @vnsra_wv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b
ret <vscale x 4 x i16> %2
}
+define <vscale x 4 x i1> @vmseq_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmseq_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmseq_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsltu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsltu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsltu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsltu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsltu.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsltu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsltu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsltu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsltu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsltu.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmslt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmslt_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmslt.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmslt_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmslt.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmslt_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmslt_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmslt.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmslt_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmslt.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgtu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgtu_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgtu.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgtu_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgtu.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgtu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgtu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgtu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgtu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgtu.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgt_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgt_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgt.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgt_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgt.vi v10, v8, 5
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgt_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgt.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgt_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgt.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
define <vscale x 4 x i32> @vminu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
; NOVLOPT-LABEL: vminu_vv:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index f1e7bb446482e1..165f1ebfa5aa7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -602,4 +60...
[truncated]
|
…r and getOperandInfo
ecb294e
to
cad3091
Compare
✅ With the latest revision this PR passed the C/C++ code formatter. |
7169e86
to
b805561
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/190/builds/11267 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/3/builds/9089 Here is the relevant piece of the build log for the reference
|
No description provided.