Skip to content

Commit 01bf50f

Browse files
committed
[RISCV] Deprecate riscv.segN.load/store in favor of their mask variants
RISCVVectorPeepholePass would replace instructions with all-ones mask with their unmask variant, so there isn't really a point to keep separate versions of intrinsics.
1 parent 0d0ef58 commit 01bf50f

File tree

7 files changed

+84
-243
lines changed

7 files changed

+84
-243
lines changed

llvm/include/llvm/IR/IntrinsicsRISCV.td

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,14 +1704,10 @@ let TargetPrefix = "riscv" in {
17041704
}
17051705

17061706
// Segment loads/stores for fixed vectors.
1707+
// Note: we only have the masked variants because RISCVVectorPeephole
1708+
// would lower any instructions with all-ones mask into unmasked version
1709+
// anyway.
17071710
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1708-
// Input: (pointer, vl)
1709-
def int_riscv_seg # nf # _load
1710-
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1711-
!listsplat(LLVMMatchType<0>,
1712-
!add(nf, -1))),
1713-
[llvm_anyptr_ty, llvm_anyint_ty],
1714-
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17151711
// Input: (pointer, mask, vl)
17161712
def int_riscv_seg # nf # _load_mask
17171713
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
@@ -1721,15 +1717,7 @@ let TargetPrefix = "riscv" in {
17211717
llvm_anyint_ty],
17221718
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
17231719

1724-
// Input: (<stored values>, pointer, vl)
1725-
def int_riscv_seg # nf # _store
1726-
: DefaultAttrsIntrinsic<[],
1727-
!listconcat([llvm_anyvector_ty],
1728-
!listsplat(LLVMMatchType<0>,
1729-
!add(nf, -1)),
1730-
[llvm_anyptr_ty, llvm_anyint_ty]),
1731-
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1732-
// Input: (<stored values>, pointer, mask, vl)
1720+
// Input: (<stored values>..., pointer, mask, vl)
17331721
def int_riscv_seg # nf # _store_mask
17341722
: DefaultAttrsIntrinsic<[],
17351723
!listconcat([llvm_anyvector_ty],

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 37 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1745,13 +1745,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17451745
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
17461746
MachineMemOperand::MOVolatile;
17471747
return true;
1748-
case Intrinsic::riscv_seg2_load:
1749-
case Intrinsic::riscv_seg3_load:
1750-
case Intrinsic::riscv_seg4_load:
1751-
case Intrinsic::riscv_seg5_load:
1752-
case Intrinsic::riscv_seg6_load:
1753-
case Intrinsic::riscv_seg7_load:
1754-
case Intrinsic::riscv_seg8_load:
17551748
case Intrinsic::riscv_seg2_load_mask:
17561749
case Intrinsic::riscv_seg3_load_mask:
17571750
case Intrinsic::riscv_seg4_load_mask:
@@ -1761,17 +1754,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17611754
case Intrinsic::riscv_seg8_load_mask:
17621755
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
17631756
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
1764-
case Intrinsic::riscv_seg2_store:
1765-
case Intrinsic::riscv_seg3_store:
1766-
case Intrinsic::riscv_seg4_store:
1767-
case Intrinsic::riscv_seg5_store:
1768-
case Intrinsic::riscv_seg6_store:
1769-
case Intrinsic::riscv_seg7_store:
1770-
case Intrinsic::riscv_seg8_store:
1771-
// Operands are (vec, ..., vec, ptr, vl)
1772-
return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
1773-
/*IsStore*/ true,
1774-
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
17751757
case Intrinsic::riscv_seg2_store_mask:
17761758
case Intrinsic::riscv_seg3_store_mask:
17771759
case Intrinsic::riscv_seg4_store_mask:
@@ -10591,13 +10573,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1059110573
switch (IntNo) {
1059210574
default:
1059310575
break;
10594-
case Intrinsic::riscv_seg2_load:
10595-
case Intrinsic::riscv_seg3_load:
10596-
case Intrinsic::riscv_seg4_load:
10597-
case Intrinsic::riscv_seg5_load:
10598-
case Intrinsic::riscv_seg6_load:
10599-
case Intrinsic::riscv_seg7_load:
10600-
case Intrinsic::riscv_seg8_load:
1060110576
case Intrinsic::riscv_seg2_load_mask:
1060210577
case Intrinsic::riscv_seg3_load_mask:
1060310578
case Intrinsic::riscv_seg4_load_mask:
@@ -10620,12 +10595,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
1062010595
ContainerVT.getScalarSizeInBits();
1062110596
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1062210597

10623-
// Masked: (pointer, mask, vl)
10624-
// Non-masked: (pointer, vl)
10625-
bool IsMasked = Op.getNumOperands() > 4;
10598+
// Operands: (chain, int_id, pointer, mask, vl)
1062610599
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10627-
SDValue Mask =
10628-
IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
10600+
SDValue Mask = Op.getOperand(3);
1062910601
MVT MaskVT = Mask.getSimpleValueType();
1063010602
if (MaskVT.isFixedLengthVector()) {
1063110603
MVT MaskContainerVT =
@@ -10699,13 +10671,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1069910671
switch (IntNo) {
1070010672
default:
1070110673
break;
10702-
case Intrinsic::riscv_seg2_store:
10703-
case Intrinsic::riscv_seg3_store:
10704-
case Intrinsic::riscv_seg4_store:
10705-
case Intrinsic::riscv_seg5_store:
10706-
case Intrinsic::riscv_seg6_store:
10707-
case Intrinsic::riscv_seg7_store:
10708-
case Intrinsic::riscv_seg8_store:
1070910674
case Intrinsic::riscv_seg2_store_mask:
1071010675
case Intrinsic::riscv_seg3_store_mask:
1071110676
case Intrinsic::riscv_seg4_store_mask:
@@ -10720,24 +10685,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1072010685
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
1072110686
Intrinsic::riscv_vsseg8_mask};
1072210687

10723-
bool IsMasked = false;
10724-
switch (IntNo) {
10725-
case Intrinsic::riscv_seg2_store_mask:
10726-
case Intrinsic::riscv_seg3_store_mask:
10727-
case Intrinsic::riscv_seg4_store_mask:
10728-
case Intrinsic::riscv_seg5_store_mask:
10729-
case Intrinsic::riscv_seg6_store_mask:
10730-
case Intrinsic::riscv_seg7_store_mask:
10731-
case Intrinsic::riscv_seg8_store_mask:
10732-
IsMasked = true;
10733-
break;
10734-
default:
10735-
break;
10736-
}
10737-
10738-
// Non-masked: (chain, int_id, vec*, ptr, vl)
10739-
// Masked: (chain, int_id, vec*, ptr, mask, vl)
10740-
unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
10688+
// Operands: (chain, int_id, vec*, ptr, mask, vl)
10689+
unsigned NF = Op->getNumOperands() - 5;
1074110690
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
1074210691
MVT XLenVT = Subtarget.getXLenVT();
1074310692
MVT VT = Op->getOperand(2).getSimpleValueType();
@@ -10747,8 +10696,7 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1074710696
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
1074810697

1074910698
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
10750-
SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
10751-
: getAllOnesMask(ContainerVT, VL, DL, DAG);
10699+
SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
1075210700
MVT MaskVT = Mask.getSimpleValueType();
1075310701
if (MaskVT.isFixedLengthVector()) {
1075410702
MVT MaskContainerVT =
@@ -23823,10 +23771,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2382323771
}
2382423772

2382523773
static const Intrinsic::ID FixedVlsegIntrIds[] = {
23826-
Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
23827-
Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
23828-
Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
23829-
Intrinsic::riscv_seg8_load};
23774+
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
23775+
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
23776+
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
23777+
Intrinsic::riscv_seg8_load_mask};
2383023778

2383123779
/// Lower an interleaved load into a vlsegN intrinsic.
2383223780
///
@@ -23877,10 +23825,11 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2387723825
};
2387823826

2387923827
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23880-
23881-
CallInst *VlsegN = Builder.CreateIntrinsic(
23882-
FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
23883-
{LI->getPointerOperand(), VL});
23828+
// All-ones mask.
23829+
Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
23830+
CallInst *VlsegN =
23831+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
23832+
{LI->getPointerOperand(), Mask, VL});
2388423833

2388523834
for (unsigned i = 0; i < Shuffles.size(); i++) {
2388623835
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
@@ -23891,10 +23840,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
2389123840
}
2389223841

2389323842
static const Intrinsic::ID FixedVssegIntrIds[] = {
23894-
Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
23895-
Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
23896-
Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
23897-
Intrinsic::riscv_seg8_store};
23843+
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
23844+
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
23845+
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
23846+
Intrinsic::riscv_seg8_store_mask};
2389823847

2389923848
/// Lower an interleaved store into a vssegN intrinsic.
2390023849
///
@@ -23954,8 +23903,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2395423903
}
2395523904

2395623905
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
23957-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
23958-
{VTy, SI->getPointerOperandType(), XLenTy});
23906+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});
2395923907

2396023908
SmallVector<Value *, 10> Ops;
2396123909
SmallVector<int, 16> NewShuffleMask;
@@ -23975,7 +23923,10 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
2397523923
// potentially under larger LMULs) because we checked that the fixed vector
2397623924
// type fits in isLegalInterleavedAccessType
2397723925
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
23978-
Ops.append({SI->getPointerOperand(), VL});
23926+
// All-ones mask.
23927+
Value *StoreMask = ConstantVector::getSplat(
23928+
VTy->getElementCount(), ConstantInt::getTrue(SVI->getContext()));
23929+
Ops.append({SI->getPointerOperand(), StoreMask, VL});
2397923930

2398023931
Builder.CreateCall(VssegNFunc, Ops);
2398123932

@@ -24004,10 +23955,12 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
2400423955

2400523956
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
2400623957
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
23958+
// All-ones mask.
23959+
Value *Mask = ConstantVector::getSplat(
23960+
FVTy->getElementCount(), ConstantInt::getTrue(LI->getContext()));
2400723961
Return =
24008-
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
24009-
{ResVTy, LI->getPointerOperandType(), XLenTy},
24010-
{LI->getPointerOperand(), VL});
23962+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
23963+
{LI->getPointerOperand(), Mask, VL});
2401123964
} else {
2401223965
static const Intrinsic::ID IntrIds[] = {
2401323966
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
@@ -24071,12 +24024,14 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
2407124024

2407224025
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
2407324026
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
24074-
SI->getModule(), FixedVssegIntrIds[Factor - 2],
24075-
{InVTy, SI->getPointerOperandType(), XLenTy});
24027+
SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});
2407624028

2407724029
SmallVector<Value *, 10> Ops(InterleaveValues);
2407824030
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
24079-
Ops.append({SI->getPointerOperand(), VL});
24031+
// All-ones mask.
24032+
Value *Mask = ConstantVector::getSplat(
24033+
FVTy->getElementCount(), ConstantInt::getTrue(SI->getContext()));
24034+
Ops.append({SI->getPointerOperand(), Mask, VL});
2408024035

2408124036
Builder.CreateCall(VssegNFunc, Ops);
2408224037
} else {
@@ -24198,15 +24153,9 @@ bool RISCVTargetLowering::lowerInterleavedVPLoad(
2419824153

2419924154
Value *Return = nullptr;
2420024155
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24201-
static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
24202-
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
24203-
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
24204-
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
24205-
Intrinsic::riscv_seg8_load_mask};
24206-
24207-
Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
24208-
{FVTy, XLenTy},
24209-
{Load->getArgOperand(0), Mask, EVL});
24156+
Return =
24157+
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
24158+
{Load->getArgOperand(0), Mask, EVL});
2421024159
} else {
2421124160
static const Intrinsic::ID IntrMaskIds[] = {
2421224161
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
@@ -24318,15 +24267,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
2431824267
XLenTy);
2431924268

2432024269
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
24321-
static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
24322-
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
24323-
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
24324-
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
24325-
Intrinsic::riscv_seg8_store_mask};
24326-
2432724270
SmallVector<Value *, 8> Operands(InterleaveOperands);
2432824271
Operands.append({Store->getArgOperand(1), Mask, EVL});
24329-
Builder.CreateIntrinsic(FixedMaskedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
24272+
Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
2433024273
Operands);
2433124274
return true;
2433224275
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ define <8 x i8> @load_factor2(ptr %ptr) {
77
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
88
; CHECK-NEXT: vlseg2e8.v v7, (a0)
99
; CHECK-NEXT: ret
10-
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
10+
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
1111
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
1212
%3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
1313
ret <8 x i8> %3
@@ -19,7 +19,7 @@ define <8 x i8> @load_factor3(ptr %ptr) {
1919
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
2020
; CHECK-NEXT: vlseg3e8.v v6, (a0)
2121
; CHECK-NEXT: ret
22-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
22+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
2323
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
2424
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
2525
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -32,7 +32,7 @@ define <8 x i8> @load_factor4(ptr %ptr) {
3232
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
3333
; CHECK-NEXT: vlseg4e8.v v5, (a0)
3434
; CHECK-NEXT: ret
35-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
35+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
3636
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
3737
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
3838
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -46,7 +46,7 @@ define <8 x i8> @load_factor5(ptr %ptr) {
4646
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
4747
; CHECK-NEXT: vlseg5e8.v v4, (a0)
4848
; CHECK-NEXT: ret
49-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
49+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
5050
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
5151
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
5252
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -61,7 +61,7 @@ define <8 x i8> @load_factor6(ptr %ptr) {
6161
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
6262
; CHECK-NEXT: vlseg6e8.v v3, (a0)
6363
; CHECK-NEXT: ret
64-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
64+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
6565
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
6666
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
6767
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -77,7 +77,7 @@ define <8 x i8> @load_factor7(ptr %ptr) {
7777
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
7878
; CHECK-NEXT: vlseg7e8.v v2, (a0)
7979
; CHECK-NEXT: ret
80-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
80+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
8181
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
8282
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
8383
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -94,7 +94,7 @@ define <8 x i8> @load_factor8(ptr %ptr) {
9494
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
9595
; CHECK-NEXT: vlseg8e8.v v1, (a0)
9696
; CHECK-NEXT: ret
97-
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
97+
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
9898
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
9999
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
100100
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
@@ -105,10 +105,3 @@ define <8 x i8> @load_factor8(ptr %ptr) {
105105
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 7
106106
ret <8 x i8> %9
107107
}
108-
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr, i64)
109-
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr, i64)
110-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr, i64)
111-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr, i64)
112-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr, i64)
113-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr, i64)
114-
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr, i64)

0 commit comments

Comments
 (0)