Skip to content

Commit b805561

Browse files
fixup! cleanup
1 parent b9fbab7 commit b805561

File tree

3 files changed

+114
-13
lines changed

3 files changed

+114
-13
lines changed

llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -444,11 +444,7 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
444444
case RISCV::VWMACC_VX:
445445
case RISCV::VWMACCSU_VV:
446446
case RISCV::VWMACCSU_VX:
447-
case RISCV::VWMACCUS_VX:
448-
// Vector Single-Width Fractional Multiply with Rounding and Saturation
449-
// Destination EEW=2*SEW and EMUL=2*EMUL. Source EEW=SEW and EMUL=LMUL.
450-
case RISCV::VSMUL_VV:
451-
case RISCV::VSMUL_VX: {
447+
case RISCV::VWMACCUS_VX: {
452448
unsigned Log2EEW = IsMODef ? MILog2SEW + 1 : MILog2SEW;
453449
RISCVII::VLMUL EMUL =
454450
IsMODef ? RISCVVType::twoTimesVLMUL(MIVLMul) : MIVLMul;
@@ -642,6 +638,11 @@ static bool isSupportedInstr(const MachineInstr &MI) {
642638
case RISCV::VSEXT_VF8:
643639
// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
644640
// FIXME: Add support
641+
case RISCV::VMADC_VV:
642+
case RISCV::VMADC_VI:
643+
case RISCV::VMADC_VX:
644+
case RISCV::VMSBC_VV:
645+
case RISCV::VMSBC_VX:
645646
// Vector Narrowing Integer Right Shift Instructions
646647
case RISCV::VNSRL_WX:
647648
case RISCV::VNSRL_WI:

llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -962,6 +962,106 @@ define <vscale x 4 x i64> @vzext_vf8(<vscale x 4 x i8> %a, <vscale x 4 x i64> %b
962962
ret <vscale x 4 x i64> %2
963963
}
964964

965+
define <vscale x 4 x i1> @vmadc_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
966+
; NOVLOPT-LABEL: vmadc_vi:
967+
; NOVLOPT: # %bb.0:
968+
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
969+
; NOVLOPT-NEXT: vmadc.vi v10, v8, 5
970+
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
971+
; NOVLOPT-NEXT: vmand.mm v0, v10, v0
972+
; NOVLOPT-NEXT: ret
973+
;
974+
; VLOPT-LABEL: vmadc_vi:
975+
; VLOPT: # %bb.0:
976+
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
977+
; VLOPT-NEXT: vmadc.vi v10, v8, 5
978+
; VLOPT-NEXT: vmand.mm v0, v10, v0
979+
; VLOPT-NEXT: ret
980+
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
981+
%2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
982+
ret <vscale x 4 x i1> %2
983+
}
984+
985+
define <vscale x 4 x i1> @vmadc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
986+
; NOVLOPT-LABEL: vmadc_vx:
987+
; NOVLOPT: # %bb.0:
988+
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
989+
; NOVLOPT-NEXT: vmadc.vx v10, v8, a0
990+
; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
991+
; NOVLOPT-NEXT: vmand.mm v0, v10, v0
992+
; NOVLOPT-NEXT: ret
993+
;
994+
; VLOPT-LABEL: vmadc_vx:
995+
; VLOPT: # %bb.0:
996+
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
997+
; VLOPT-NEXT: vmadc.vx v10, v8, a0
998+
; VLOPT-NEXT: vmand.mm v0, v10, v0
999+
; VLOPT-NEXT: ret
1000+
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
1001+
%2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
1002+
ret <vscale x 4 x i1> %2
1003+
}
1004+
1005+
define <vscale x 4 x i1> @vmadc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
1006+
; NOVLOPT-LABEL: vmadc_vv:
1007+
; NOVLOPT: # %bb.0:
1008+
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1009+
; NOVLOPT-NEXT: vmadc.vv v12, v8, v10
1010+
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1011+
; NOVLOPT-NEXT: vmand.mm v0, v12, v0
1012+
; NOVLOPT-NEXT: ret
1013+
;
1014+
; VLOPT-LABEL: vmadc_vv:
1015+
; VLOPT: # %bb.0:
1016+
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1017+
; VLOPT-NEXT: vmadc.vv v12, v8, v10
1018+
; VLOPT-NEXT: vmand.mm v0, v12, v0
1019+
; VLOPT-NEXT: ret
1020+
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
1021+
%2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
1022+
ret <vscale x 4 x i1> %2
1023+
}
1024+
1025+
define <vscale x 4 x i1> @vmsbc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
1026+
; NOVLOPT-LABEL: vmsbc_vx:
1027+
; NOVLOPT: # %bb.0:
1028+
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1029+
; NOVLOPT-NEXT: vmsbc.vx v10, v8, a0
1030+
; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1031+
; NOVLOPT-NEXT: vmand.mm v0, v10, v0
1032+
; NOVLOPT-NEXT: ret
1033+
;
1034+
; VLOPT-LABEL: vmsbc_vx:
1035+
; VLOPT: # %bb.0:
1036+
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1037+
; VLOPT-NEXT: vmsbc.vx v10, v8, a0
1038+
; VLOPT-NEXT: vmand.mm v0, v10, v0
1039+
; VLOPT-NEXT: ret
1040+
%1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
1041+
%2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
1042+
ret <vscale x 4 x i1> %2
1043+
}
1044+
1045+
define <vscale x 4 x i1> @vmsbc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
1046+
; NOVLOPT-LABEL: vmsbc_vv:
1047+
; NOVLOPT: # %bb.0:
1048+
; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1049+
; NOVLOPT-NEXT: vmsbc.vv v12, v8, v10
1050+
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1051+
; NOVLOPT-NEXT: vmand.mm v0, v12, v0
1052+
; NOVLOPT-NEXT: ret
1053+
;
1054+
; VLOPT-LABEL: vmsbc_vv:
1055+
; VLOPT: # %bb.0:
1056+
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1057+
; VLOPT-NEXT: vmsbc.vv v12, v8, v10
1058+
; VLOPT-NEXT: vmand.mm v0, v12, v0
1059+
; VLOPT-NEXT: ret
1060+
%1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
1061+
%2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
1062+
ret <vscale x 4 x i1> %2
1063+
}
1064+
9651065
define <vscale x 4 x i16> @vnsrl_wi(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %vl) {
9661066
; NOVLOPT-LABEL: vnsrl_wi:
9671067
; NOVLOPT: # %bb.0:

llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -606,7 +606,7 @@ body: |
606606
name: vmop_vv
607607
body: |
608608
bb.0:
609-
; CHECK-LABEL: name: vmcmp_vv
609+
; CHECK-LABEL: name: vmop_vv
610610
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
611611
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
612612
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -616,7 +616,7 @@ body: |
616616
name: vmop_vv_maskuser
617617
body: |
618618
bb.0:
619-
; CHECK-LABEL: name: vmcmp_vv_maskuser
619+
; CHECK-LABEL: name: vmop_vv_maskuser
620620
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
621621
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
622622
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -626,7 +626,7 @@ body: |
626626
name: vmop_vv_maskuser_incompatible_eew
627627
body: |
628628
bb.0:
629-
; CHECK-LABEL: name: vmcmp_vv_maskuser_incompatible_eew
629+
; CHECK-LABEL: name: vmop_vv_maskuser_incompatible_eew
630630
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
631631
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
632632
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -636,7 +636,7 @@ body: |
636636
name: vmop_vv_incompatible_emul
637637
body: |
638638
bb.0:
639-
; CHECK-LABEL: name: vmcmp_vv_incompatible_emul
639+
; CHECK-LABEL: name: vmop_vv_incompatible_emul
640640
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
641641
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
642642
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -646,7 +646,7 @@ body: |
646646
name: vmop_vv_maskuser_incompaible_emul
647647
body: |
648648
bb.0:
649-
; CHECK-LABEL: name: vmcmp_vv_maskuser_incompaible_emul
649+
; CHECK-LABEL: name: vmop_vv_maskuser_incompaible_emul
650650
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
651651
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
652652
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -656,7 +656,7 @@ body: |
656656
name: vmop_vv_maskuser_larger_emul
657657
body: |
658658
bb.0:
659-
; CHECK-LABEL: name: vmcmp_vv_maskuser_larger_emul
659+
; CHECK-LABEL: name: vmop_vv_maskuser_larger_emul
660660
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
661661
; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
662662
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -666,7 +666,7 @@ body: |
666666
name: vmop_vv_consumer_incompatible_eew
667667
body: |
668668
bb.0:
669-
; CHECK-LABEL: name: vmcmp_vv_consumer
669+
; CHECK-LABEL: name: vmop_vv_consumer_incompatible_eew
670670
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
671671
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 0 /* e8 */
672672
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
@@ -676,7 +676,7 @@ body: |
676676
name: vmop_vv_consumer_incompatible_emul
677677
body: |
678678
bb.0:
679-
; CHECK-LABEL: name: vmcmp_vv_consumer_incompatible_emul
679+
; CHECK-LABEL: name: vmop_vv_consumer_incompatible_emul
680680
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
681681
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 0 /* e8 */
682682
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0

0 commit comments

Comments
 (0)