@@ -1745,13 +1745,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1745
1745
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1746
1746
MachineMemOperand::MOVolatile;
1747
1747
return true;
1748
- case Intrinsic::riscv_seg2_load:
1749
- case Intrinsic::riscv_seg3_load:
1750
- case Intrinsic::riscv_seg4_load:
1751
- case Intrinsic::riscv_seg5_load:
1752
- case Intrinsic::riscv_seg6_load:
1753
- case Intrinsic::riscv_seg7_load:
1754
- case Intrinsic::riscv_seg8_load:
1755
1748
case Intrinsic::riscv_seg2_load_mask:
1756
1749
case Intrinsic::riscv_seg3_load_mask:
1757
1750
case Intrinsic::riscv_seg4_load_mask:
@@ -1761,17 +1754,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1761
1754
case Intrinsic::riscv_seg8_load_mask:
1762
1755
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
1763
1756
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1764
- case Intrinsic::riscv_seg2_store:
1765
- case Intrinsic::riscv_seg3_store:
1766
- case Intrinsic::riscv_seg4_store:
1767
- case Intrinsic::riscv_seg5_store:
1768
- case Intrinsic::riscv_seg6_store:
1769
- case Intrinsic::riscv_seg7_store:
1770
- case Intrinsic::riscv_seg8_store:
1771
- // Operands are (vec, ..., vec, ptr, vl)
1772
- return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1773
- /*IsStore*/ true,
1774
- /*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1775
1757
case Intrinsic::riscv_seg2_store_mask:
1776
1758
case Intrinsic::riscv_seg3_store_mask:
1777
1759
case Intrinsic::riscv_seg4_store_mask:
@@ -10591,13 +10573,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
10591
10573
switch (IntNo) {
10592
10574
default:
10593
10575
break;
10594
- case Intrinsic::riscv_seg2_load:
10595
- case Intrinsic::riscv_seg3_load:
10596
- case Intrinsic::riscv_seg4_load:
10597
- case Intrinsic::riscv_seg5_load:
10598
- case Intrinsic::riscv_seg6_load:
10599
- case Intrinsic::riscv_seg7_load:
10600
- case Intrinsic::riscv_seg8_load:
10601
10576
case Intrinsic::riscv_seg2_load_mask:
10602
10577
case Intrinsic::riscv_seg3_load_mask:
10603
10578
case Intrinsic::riscv_seg4_load_mask:
@@ -10620,12 +10595,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
10620
10595
ContainerVT.getScalarSizeInBits();
10621
10596
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
10622
10597
10623
- // Masked: (pointer, mask, vl)
10624
- // Non-masked: (pointer, vl)
10625
- bool IsMasked = Op.getNumOperands() > 4;
10598
+ // Operands: (chain, int_id, pointer, mask, vl)
10626
10599
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10627
- SDValue Mask =
10628
- IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10600
+ SDValue Mask = Op.getOperand(3);
10629
10601
MVT MaskVT = Mask.getSimpleValueType();
10630
10602
if (MaskVT.isFixedLengthVector()) {
10631
10603
MVT MaskContainerVT =
@@ -10699,13 +10671,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10699
10671
switch (IntNo) {
10700
10672
default:
10701
10673
break;
10702
- case Intrinsic::riscv_seg2_store:
10703
- case Intrinsic::riscv_seg3_store:
10704
- case Intrinsic::riscv_seg4_store:
10705
- case Intrinsic::riscv_seg5_store:
10706
- case Intrinsic::riscv_seg6_store:
10707
- case Intrinsic::riscv_seg7_store:
10708
- case Intrinsic::riscv_seg8_store:
10709
10674
case Intrinsic::riscv_seg2_store_mask:
10710
10675
case Intrinsic::riscv_seg3_store_mask:
10711
10676
case Intrinsic::riscv_seg4_store_mask:
@@ -10720,24 +10685,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10720
10685
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
10721
10686
Intrinsic::riscv_vsseg8_mask};
10722
10687
10723
- bool IsMasked = false;
10724
- switch (IntNo) {
10725
- case Intrinsic::riscv_seg2_store_mask:
10726
- case Intrinsic::riscv_seg3_store_mask:
10727
- case Intrinsic::riscv_seg4_store_mask:
10728
- case Intrinsic::riscv_seg5_store_mask:
10729
- case Intrinsic::riscv_seg6_store_mask:
10730
- case Intrinsic::riscv_seg7_store_mask:
10731
- case Intrinsic::riscv_seg8_store_mask:
10732
- IsMasked = true;
10733
- break;
10734
- default:
10735
- break;
10736
- }
10737
-
10738
- // Non-masked: (chain, int_id, vec*, ptr, vl)
10739
- // Masked: (chain, int_id, vec*, ptr, mask, vl)
10740
- unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10688
+ // Operands: (chain, int_id, vec*, ptr, mask, vl)
10689
+ unsigned NF = Op->getNumOperands() - 5;
10741
10690
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
10742
10691
MVT XLenVT = Subtarget.getXLenVT();
10743
10692
MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10747,8 +10696,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10747
10696
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
10748
10697
10749
10698
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10750
- SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10751
- : getAllOnesMask(ContainerVT, VL, DL, DAG);
10699
+ SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
10752
10700
MVT MaskVT = Mask.getSimpleValueType();
10753
10701
if (MaskVT.isFixedLengthVector()) {
10754
10702
MVT MaskContainerVT =
@@ -23823,10 +23771,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
23823
23771
}
23824
23772
23825
23773
static const Intrinsic::ID FixedVlsegIntrIds[] = {
23826
- Intrinsic::riscv_seg2_load , Intrinsic::riscv_seg3_load ,
23827
- Intrinsic::riscv_seg4_load , Intrinsic::riscv_seg5_load ,
23828
- Intrinsic::riscv_seg6_load , Intrinsic::riscv_seg7_load ,
23829
- Intrinsic::riscv_seg8_load };
23774
+ Intrinsic::riscv_seg2_load_mask , Intrinsic::riscv_seg3_load_mask ,
23775
+ Intrinsic::riscv_seg4_load_mask , Intrinsic::riscv_seg5_load_mask ,
23776
+ Intrinsic::riscv_seg6_load_mask , Intrinsic::riscv_seg7_load_mask ,
23777
+ Intrinsic::riscv_seg8_load_mask };
23830
23778
23831
23779
/// Lower an interleaved load into a vlsegN intrinsic.
23832
23780
///
@@ -23877,10 +23825,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
23877
23825
};
23878
23826
23879
23827
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23880
-
23881
- CallInst *VlsegN = Builder.CreateIntrinsic(
23882
- FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23883
- {LI->getPointerOperand(), VL});
23828
+ // All-ones mask.
23829
+ Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23830
+ CallInst *VlsegN =
23831
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23832
+ {LI->getPointerOperand(), Mask, VL});
23884
23833
23885
23834
for (unsigned i = 0; i < Shuffles.size(); i++) {
23886
23835
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23891,10 +23840,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
23891
23840
}
23892
23841
23893
23842
static const Intrinsic::ID FixedVssegIntrIds[] = {
23894
- Intrinsic::riscv_seg2_store , Intrinsic::riscv_seg3_store ,
23895
- Intrinsic::riscv_seg4_store , Intrinsic::riscv_seg5_store ,
23896
- Intrinsic::riscv_seg6_store , Intrinsic::riscv_seg7_store ,
23897
- Intrinsic::riscv_seg8_store };
23843
+ Intrinsic::riscv_seg2_store_mask , Intrinsic::riscv_seg3_store_mask ,
23844
+ Intrinsic::riscv_seg4_store_mask , Intrinsic::riscv_seg5_store_mask ,
23845
+ Intrinsic::riscv_seg6_store_mask , Intrinsic::riscv_seg7_store_mask ,
23846
+ Intrinsic::riscv_seg8_store_mask };
23898
23847
23899
23848
/// Lower an interleaved store into a vssegN intrinsic.
23900
23849
///
@@ -23954,8 +23903,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
23954
23903
}
23955
23904
23956
23905
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23957
- SI->getModule(), FixedVssegIntrIds[Factor - 2],
23958
- {VTy, SI->getPointerOperandType(), XLenTy});
23906
+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
23959
23907
23960
23908
SmallVector<Value *, 10> Ops;
23961
23909
SmallVector<int, 16> NewShuffleMask;
@@ -23975,7 +23923,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
23975
23923
// potentially under larger LMULs) because we checked that the fixed vector
23976
23924
// type fits in isLegalInterleavedAccessType
23977
23925
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23978
- Ops.append({SI->getPointerOperand(), VL});
23926
+ // All-ones mask.
23927
+ Value *StoreMask = ConstantVector::getSplat(
23928
+ VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23929
+ Ops.append({SI->getPointerOperand(), StoreMask, VL});
23979
23930
23980
23931
Builder.CreateCall(VssegNFunc, Ops);
23981
23932
@@ -24004,10 +23955,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
24004
23955
24005
23956
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
24006
23957
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23958
+ // All-ones mask.
23959
+ Value *Mask = ConstantVector::getSplat(
23960
+ FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
24007
23961
Return =
24008
- Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
24009
- {ResVTy, LI->getPointerOperandType(), XLenTy},
24010
- {LI->getPointerOperand(), VL});
23962
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23963
+ {LI->getPointerOperand(), Mask, VL});
24011
23964
} else {
24012
23965
static const Intrinsic::ID IntrIds[] = {
24013
23966
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -24071,12 +24024,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
24071
24024
24072
24025
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
24073
24026
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
24074
- SI->getModule(), FixedVssegIntrIds[Factor - 2],
24075
- {InVTy, SI->getPointerOperandType(), XLenTy});
24027
+ SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
24076
24028
24077
24029
SmallVector<Value *, 10> Ops(InterleaveValues);
24078
24030
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
24079
- Ops.append({SI->getPointerOperand(), VL});
24031
+ // All-ones mask.
24032
+ Value *Mask = ConstantVector::getSplat(
24033
+ FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
24034
+ Ops.append({SI->getPointerOperand(), Mask, VL});
24080
24035
24081
24036
Builder.CreateCall(VssegNFunc, Ops);
24082
24037
} else {
@@ -24198,15 +24153,9 @@ bool RISCVTargetLowering::lowerInterleavedVPLoad(
24198
24153
24199
24154
Value *Return = nullptr;
24200
24155
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24201
- static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
24202
- Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
24203
- Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
24204
- Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
24205
- Intrinsic::riscv_seg8_load_mask};
24206
-
24207
- Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
24208
- {FVTy, XLenTy},
24209
- {Load->getArgOperand(0), Mask, EVL});
24156
+ Return =
24157
+ Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
24158
+ {Load->getArgOperand(0), Mask, EVL});
24210
24159
} else {
24211
24160
static const Intrinsic::ID IntrMaskIds[] = {
24212
24161
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -24318,15 +24267,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
24318
24267
XLenTy);
24319
24268
24320
24269
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24321
- static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
24322
- Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
24323
- Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
24324
- Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
24325
- Intrinsic::riscv_seg8_store_mask};
24326
-
24327
24270
SmallVector<Value *, 8> Operands(InterleaveOperands);
24328
24271
Operands.append({Store->getArgOperand(1), Mask, EVL});
24329
- Builder.CreateIntrinsic(FixedMaskedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
24272
+ Builder.CreateIntrinsic(FixedVssegIntrIds [Factor - 2], {FVTy, XLenTy},
24330
24273
Operands);
24331
24274
return true;
24332
24275
}
0 commit comments