@@ -1664,32 +1664,50 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
1664
1664
switch (RISCVTargetLowering::getLMUL (Src1VT)) {
1665
1665
default :
1666
1666
llvm_unreachable (" Unexpected LMUL!" );
1667
- #define CASE_VMSLT_VMNAND_VMSET_OPCODES (lmulenum, suffix, suffix_b ) \
1667
+ #define CASE_VMSLT_OPCODES (lmulenum, suffix ) \
1668
1668
case RISCVII::VLMUL::lmulenum: \
1669
1669
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1670
1670
: RISCV::PseudoVMSLT_VX_##suffix; \
1671
1671
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
1672
1672
: RISCV::PseudoVMSGT_VX_##suffix; \
1673
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix_b; \
1674
- VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1675
1673
break ;
1676
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_F8, MF8, B64)
1677
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_F4, MF4, B32)
1678
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_F2, MF2, B16)
1679
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_1, M1, B8)
1680
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_2, M2, B4)
1681
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_4, M4, B2)
1682
- CASE_VMSLT_VMNAND_VMSET_OPCODES (LMUL_8, M8, B1)
1683
- #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1674
+ CASE_VMSLT_OPCODES (LMUL_F8, MF8)
1675
+ CASE_VMSLT_OPCODES (LMUL_F4, MF4)
1676
+ CASE_VMSLT_OPCODES (LMUL_F2, MF2)
1677
+ CASE_VMSLT_OPCODES (LMUL_1, M1)
1678
+ CASE_VMSLT_OPCODES (LMUL_2, M2)
1679
+ CASE_VMSLT_OPCODES (LMUL_4, M4)
1680
+ CASE_VMSLT_OPCODES (LMUL_8, M8)
1681
+ #undef CASE_VMSLT_OPCODES
1682
+ }
1683
+ // Mask operations use the LMUL from the mask type.
1684
+ switch (RISCVTargetLowering::getLMUL (VT)) {
1685
+ default :
1686
+ llvm_unreachable (" Unexpected LMUL!" );
1687
+ #define CASE_VMNAND_VMSET_OPCODES (lmulenum, suffix ) \
1688
+ case RISCVII::VLMUL::lmulenum: \
1689
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1690
+ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \
1691
+ break ;
1692
+ CASE_VMNAND_VMSET_OPCODES (LMUL_F8, B64)
1693
+ CASE_VMNAND_VMSET_OPCODES (LMUL_F4, B32)
1694
+ CASE_VMNAND_VMSET_OPCODES (LMUL_F2, B16)
1695
+ CASE_VMNAND_VMSET_OPCODES (LMUL_1, B8)
1696
+ CASE_VMNAND_VMSET_OPCODES (LMUL_2, B4)
1697
+ CASE_VMNAND_VMSET_OPCODES (LMUL_4, B2)
1698
+ CASE_VMNAND_VMSET_OPCODES (LMUL_8, B1)
1699
+ #undef CASE_VMNAND_VMSET_OPCODES
1684
1700
}
1685
1701
SDValue SEW = CurDAG->getTargetConstant (
1686
1702
Log2_32 (Src1VT.getScalarSizeInBits ()), DL, XLenVT);
1703
+ SDValue MaskSEW = CurDAG->getTargetConstant (0 , DL, XLenVT);
1687
1704
SDValue VL;
1688
1705
selectVLOp (Node->getOperand (3 ), VL);
1689
1706
1690
1707
// If vmsge(u) with minimum value, expand it to vmset.
1691
1708
if (IsCmpMinimum) {
1692
- ReplaceNode (Node, CurDAG->getMachineNode (VMSetOpcode, DL, VT, VL, SEW));
1709
+ ReplaceNode (Node,
1710
+ CurDAG->getMachineNode (VMSetOpcode, DL, VT, VL, MaskSEW));
1693
1711
return ;
1694
1712
}
1695
1713
@@ -1708,7 +1726,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
1708
1726
CurDAG->getMachineNode (VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1709
1727
0 );
1710
1728
ReplaceNode (Node, CurDAG->getMachineNode (VMNANDOpcode, DL, VT,
1711
- {Cmp, Cmp, VL, SEW }));
1729
+ {Cmp, Cmp, VL, MaskSEW }));
1712
1730
return ;
1713
1731
}
1714
1732
case Intrinsic::riscv_vmsgeu_mask:
@@ -1742,7 +1760,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
1742
1760
switch (RISCVTargetLowering::getLMUL (Src1VT)) {
1743
1761
default :
1744
1762
llvm_unreachable (" Unexpected LMUL!" );
1745
- #define CASE_VMSLT_OPCODES (lmulenum, suffix, suffix_b ) \
1763
+ #define CASE_VMSLT_OPCODES (lmulenum, suffix ) \
1746
1764
case RISCVII::VLMUL::lmulenum: \
1747
1765
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1748
1766
: RISCV::PseudoVMSLT_VX_##suffix; \
@@ -1751,13 +1769,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
1751
1769
VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
1752
1770
: RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
1753
1771
break ;
1754
- CASE_VMSLT_OPCODES (LMUL_F8, MF8, B64 )
1755
- CASE_VMSLT_OPCODES (LMUL_F4, MF4, B32 )
1756
- CASE_VMSLT_OPCODES (LMUL_F2, MF2, B16 )
1757
- CASE_VMSLT_OPCODES (LMUL_1, M1, B8 )
1758
- CASE_VMSLT_OPCODES (LMUL_2, M2, B4 )
1759
- CASE_VMSLT_OPCODES (LMUL_4, M4, B2 )
1760
- CASE_VMSLT_OPCODES (LMUL_8, M8, B1 )
1772
+ CASE_VMSLT_OPCODES (LMUL_F8, MF8)
1773
+ CASE_VMSLT_OPCODES (LMUL_F4, MF4)
1774
+ CASE_VMSLT_OPCODES (LMUL_F2, MF2)
1775
+ CASE_VMSLT_OPCODES (LMUL_1, M1)
1776
+ CASE_VMSLT_OPCODES (LMUL_2, M2)
1777
+ CASE_VMSLT_OPCODES (LMUL_4, M4)
1778
+ CASE_VMSLT_OPCODES (LMUL_8, M8)
1761
1779
#undef CASE_VMSLT_OPCODES
1762
1780
}
1763
1781
// Mask operations use the LMUL from the mask type.
0 commit comments