Skip to content

Commit 809f61d

Browse files
committed
fixup! [llvm][RISCV] Support RISCV vector tuple CodeGen and Calling Convention
1 parent a12b83f commit 809f61d

32 files changed

+51495
-51487
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 62 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -160,14 +160,17 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
160160
static const MVT::SimpleValueType F64VecVTs[] = {
161161
MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
162162
static const MVT::SimpleValueType VecTupleVTs[] = {
163-
MVT::riscv_mf8x2, MVT::riscv_mf8x3, MVT::riscv_mf8x4, MVT::riscv_mf8x5,
164-
MVT::riscv_mf8x6, MVT::riscv_mf8x7, MVT::riscv_mf8x8, MVT::riscv_mf4x2,
165-
MVT::riscv_mf4x3, MVT::riscv_mf4x4, MVT::riscv_mf4x5, MVT::riscv_mf4x6,
166-
MVT::riscv_mf4x7, MVT::riscv_mf4x8, MVT::riscv_mf2x2, MVT::riscv_mf2x3,
167-
MVT::riscv_mf2x4, MVT::riscv_mf2x5, MVT::riscv_mf2x6, MVT::riscv_mf2x7,
168-
MVT::riscv_mf2x8, MVT::riscv_m1x2, MVT::riscv_m1x3, MVT::riscv_m1x4,
169-
MVT::riscv_m1x5, MVT::riscv_m1x6, MVT::riscv_m1x7, MVT::riscv_m1x8,
170-
MVT::riscv_m2x2, MVT::riscv_m2x3, MVT::riscv_m2x4, MVT::riscv_m4x2};
163+
MVT::riscv_nxv1i8x2, MVT::riscv_nxv1i8x3, MVT::riscv_nxv1i8x4,
164+
MVT::riscv_nxv1i8x5, MVT::riscv_nxv1i8x6, MVT::riscv_nxv1i8x7,
165+
MVT::riscv_nxv1i8x8, MVT::riscv_nxv2i8x2, MVT::riscv_nxv2i8x3,
166+
MVT::riscv_nxv2i8x4, MVT::riscv_nxv2i8x5, MVT::riscv_nxv2i8x6,
167+
MVT::riscv_nxv2i8x7, MVT::riscv_nxv2i8x8, MVT::riscv_nxv4i8x2,
168+
MVT::riscv_nxv4i8x3, MVT::riscv_nxv4i8x4, MVT::riscv_nxv4i8x5,
169+
MVT::riscv_nxv4i8x6, MVT::riscv_nxv4i8x7, MVT::riscv_nxv4i8x8,
170+
MVT::riscv_nxv8i8x2, MVT::riscv_nxv8i8x3, MVT::riscv_nxv8i8x4,
171+
MVT::riscv_nxv8i8x5, MVT::riscv_nxv8i8x6, MVT::riscv_nxv8i8x7,
172+
MVT::riscv_nxv8i8x8, MVT::riscv_nxv16i8x2, MVT::riscv_nxv16i8x3,
173+
MVT::riscv_nxv16i8x4, MVT::riscv_nxv32i8x2};
171174

172175
if (Subtarget.hasVInstructions()) {
173176
auto addRegClassForRVV = [this](MVT VT) {
@@ -234,38 +237,38 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
234237
addRegClassForFixedVectors(VT);
235238
}
236239

237-
addRegisterClass(MVT::riscv_mf8x2, &RISCV::VRN2M1RegClass);
238-
addRegisterClass(MVT::riscv_mf8x3, &RISCV::VRN3M1RegClass);
239-
addRegisterClass(MVT::riscv_mf8x4, &RISCV::VRN4M1RegClass);
240-
addRegisterClass(MVT::riscv_mf8x5, &RISCV::VRN5M1RegClass);
241-
addRegisterClass(MVT::riscv_mf8x6, &RISCV::VRN6M1RegClass);
242-
addRegisterClass(MVT::riscv_mf8x7, &RISCV::VRN7M1RegClass);
243-
addRegisterClass(MVT::riscv_mf8x8, &RISCV::VRN8M1RegClass);
244-
addRegisterClass(MVT::riscv_mf4x2, &RISCV::VRN2M1RegClass);
245-
addRegisterClass(MVT::riscv_mf4x3, &RISCV::VRN3M1RegClass);
246-
addRegisterClass(MVT::riscv_mf4x4, &RISCV::VRN4M1RegClass);
247-
addRegisterClass(MVT::riscv_mf4x5, &RISCV::VRN5M1RegClass);
248-
addRegisterClass(MVT::riscv_mf4x6, &RISCV::VRN6M1RegClass);
249-
addRegisterClass(MVT::riscv_mf4x7, &RISCV::VRN7M1RegClass);
250-
addRegisterClass(MVT::riscv_mf4x8, &RISCV::VRN8M1RegClass);
251-
addRegisterClass(MVT::riscv_mf2x2, &RISCV::VRN2M1RegClass);
252-
addRegisterClass(MVT::riscv_mf2x3, &RISCV::VRN3M1RegClass);
253-
addRegisterClass(MVT::riscv_mf2x4, &RISCV::VRN4M1RegClass);
254-
addRegisterClass(MVT::riscv_mf2x5, &RISCV::VRN5M1RegClass);
255-
addRegisterClass(MVT::riscv_mf2x6, &RISCV::VRN6M1RegClass);
256-
addRegisterClass(MVT::riscv_mf2x7, &RISCV::VRN7M1RegClass);
257-
addRegisterClass(MVT::riscv_mf2x8, &RISCV::VRN8M1RegClass);
258-
addRegisterClass(MVT::riscv_m1x2, &RISCV::VRN2M1RegClass);
259-
addRegisterClass(MVT::riscv_m1x3, &RISCV::VRN3M1RegClass);
260-
addRegisterClass(MVT::riscv_m1x4, &RISCV::VRN4M1RegClass);
261-
addRegisterClass(MVT::riscv_m1x5, &RISCV::VRN5M1RegClass);
262-
addRegisterClass(MVT::riscv_m1x6, &RISCV::VRN6M1RegClass);
263-
addRegisterClass(MVT::riscv_m1x7, &RISCV::VRN7M1RegClass);
264-
addRegisterClass(MVT::riscv_m1x8, &RISCV::VRN8M1RegClass);
265-
addRegisterClass(MVT::riscv_m2x2, &RISCV::VRN2M2RegClass);
266-
addRegisterClass(MVT::riscv_m2x3, &RISCV::VRN3M2RegClass);
267-
addRegisterClass(MVT::riscv_m2x4, &RISCV::VRN4M2RegClass);
268-
addRegisterClass(MVT::riscv_m4x2, &RISCV::VRN2M4RegClass);
240+
addRegisterClass(MVT::riscv_nxv1i8x2, &RISCV::VRN2M1RegClass);
241+
addRegisterClass(MVT::riscv_nxv1i8x3, &RISCV::VRN3M1RegClass);
242+
addRegisterClass(MVT::riscv_nxv1i8x4, &RISCV::VRN4M1RegClass);
243+
addRegisterClass(MVT::riscv_nxv1i8x5, &RISCV::VRN5M1RegClass);
244+
addRegisterClass(MVT::riscv_nxv1i8x6, &RISCV::VRN6M1RegClass);
245+
addRegisterClass(MVT::riscv_nxv1i8x7, &RISCV::VRN7M1RegClass);
246+
addRegisterClass(MVT::riscv_nxv1i8x8, &RISCV::VRN8M1RegClass);
247+
addRegisterClass(MVT::riscv_nxv2i8x2, &RISCV::VRN2M1RegClass);
248+
addRegisterClass(MVT::riscv_nxv2i8x3, &RISCV::VRN3M1RegClass);
249+
addRegisterClass(MVT::riscv_nxv2i8x4, &RISCV::VRN4M1RegClass);
250+
addRegisterClass(MVT::riscv_nxv2i8x5, &RISCV::VRN5M1RegClass);
251+
addRegisterClass(MVT::riscv_nxv2i8x6, &RISCV::VRN6M1RegClass);
252+
addRegisterClass(MVT::riscv_nxv2i8x7, &RISCV::VRN7M1RegClass);
253+
addRegisterClass(MVT::riscv_nxv2i8x8, &RISCV::VRN8M1RegClass);
254+
addRegisterClass(MVT::riscv_nxv4i8x2, &RISCV::VRN2M1RegClass);
255+
addRegisterClass(MVT::riscv_nxv4i8x3, &RISCV::VRN3M1RegClass);
256+
addRegisterClass(MVT::riscv_nxv4i8x4, &RISCV::VRN4M1RegClass);
257+
addRegisterClass(MVT::riscv_nxv4i8x5, &RISCV::VRN5M1RegClass);
258+
addRegisterClass(MVT::riscv_nxv4i8x6, &RISCV::VRN6M1RegClass);
259+
addRegisterClass(MVT::riscv_nxv4i8x7, &RISCV::VRN7M1RegClass);
260+
addRegisterClass(MVT::riscv_nxv4i8x8, &RISCV::VRN8M1RegClass);
261+
addRegisterClass(MVT::riscv_nxv8i8x2, &RISCV::VRN2M1RegClass);
262+
addRegisterClass(MVT::riscv_nxv8i8x3, &RISCV::VRN3M1RegClass);
263+
addRegisterClass(MVT::riscv_nxv8i8x4, &RISCV::VRN4M1RegClass);
264+
addRegisterClass(MVT::riscv_nxv8i8x5, &RISCV::VRN5M1RegClass);
265+
addRegisterClass(MVT::riscv_nxv8i8x6, &RISCV::VRN6M1RegClass);
266+
addRegisterClass(MVT::riscv_nxv8i8x7, &RISCV::VRN7M1RegClass);
267+
addRegisterClass(MVT::riscv_nxv8i8x8, &RISCV::VRN8M1RegClass);
268+
addRegisterClass(MVT::riscv_nxv16i8x2, &RISCV::VRN2M2RegClass);
269+
addRegisterClass(MVT::riscv_nxv16i8x3, &RISCV::VRN3M2RegClass);
270+
addRegisterClass(MVT::riscv_nxv16i8x4, &RISCV::VRN4M2RegClass);
271+
addRegisterClass(MVT::riscv_nxv32i8x2, &RISCV::VRN2M4RegClass);
269272
}
270273

271274
// Compute derived properties from the register classes.
@@ -2499,17 +2502,22 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
24992502

25002503
RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
25012504
if (VT.isRISCVVectorTuple()) {
2502-
if (VT.SimpleTy >= MVT::riscv_mf8x2 && VT.SimpleTy <= MVT::riscv_mf8x8)
2505+
if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 &&
2506+
VT.SimpleTy <= MVT::riscv_nxv1i8x8)
25032507
return RISCVII::LMUL_F8;
2504-
if (VT.SimpleTy >= MVT::riscv_mf4x2 && VT.SimpleTy <= MVT::riscv_mf4x8)
2508+
if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 &&
2509+
VT.SimpleTy <= MVT::riscv_nxv2i8x8)
25052510
return RISCVII::LMUL_F4;
2506-
if (VT.SimpleTy >= MVT::riscv_mf2x2 && VT.SimpleTy <= MVT::riscv_mf2x8)
2511+
if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 &&
2512+
VT.SimpleTy <= MVT::riscv_nxv4i8x8)
25072513
return RISCVII::LMUL_F2;
2508-
if (VT.SimpleTy >= MVT::riscv_m1x2 && VT.SimpleTy <= MVT::riscv_m1x8)
2514+
if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 &&
2515+
VT.SimpleTy <= MVT::riscv_nxv8i8x8)
25092516
return RISCVII::LMUL_1;
2510-
if (VT.SimpleTy >= MVT::riscv_m2x2 && VT.SimpleTy <= MVT::riscv_m2x4)
2517+
if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 &&
2518+
VT.SimpleTy <= MVT::riscv_nxv16i8x4)
25112519
return RISCVII::LMUL_2;
2512-
if (VT.SimpleTy == MVT::riscv_m4x2)
2520+
if (VT.SimpleTy == MVT::riscv_nxv32i8x2)
25132521
return RISCVII::LMUL_4;
25142522
llvm_unreachable("Invalid vector tuple type LMUL.");
25152523
}
@@ -9713,9 +9721,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
97139721
MVT XLenVT = Subtarget.getXLenVT();
97149722
MVT VT = Op->getSimpleValueType(0);
97159723
MVT ContainerVT = getContainerForFixedLengthVector(VT);
9716-
auto LMUL = static_cast<unsigned>(getLMUL(ContainerVT));
9717-
auto Log2LMUL = LMUL > 4 ? LMUL - 8 : LMUL;
9718-
EVT VecTupTy = EVT::getRISCVVectorTupleVT(Log2LMUL, NF);
9724+
unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
9725+
ContainerVT.getScalarSizeInBits();
9726+
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
97199727

97209728
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
97219729
Subtarget);
@@ -9842,9 +9850,9 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
98429850
MVT XLenVT = Subtarget.getXLenVT();
98439851
MVT VT = Op->getOperand(2).getSimpleValueType();
98449852
MVT ContainerVT = getContainerForFixedLengthVector(VT);
9845-
auto LMUL = static_cast<unsigned>(getLMUL(ContainerVT));
9846-
auto Log2LMUL = LMUL > 4 ? LMUL - 8 : LMUL;
9847-
EVT VecTupTy = EVT::getRISCVVectorTupleVT(Log2LMUL, NF);
9853+
unsigned Sz = NF * ContainerVT.getVectorMinNumElements() *
9854+
ContainerVT.getScalarSizeInBits();
9855+
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);
98489856

98499857
SDValue VL = getVLOp(VT.getVectorNumElements(), ContainerVT, DL, DAG,
98509858
Subtarget);
@@ -22171,7 +22179,7 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
2217122179
unsigned SEW = ResVTy->getElementType()->getScalarSizeInBits();
2217222180
unsigned NumElts = ResVTy->getElementCount().getKnownMinValue();
2217322181
Type *VecTupTy = TargetExtType::get(
22174-
LI->getContext(), "riscv_vec_tuple",
22182+
LI->getContext(), "riscv.vector.tuple",
2217522183
ScalableVectorType::get(Type::getInt8Ty(LI->getContext()),
2217622184
NumElts * SEW / 8),
2217722185
2);
@@ -22241,7 +22249,7 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
2224122249
unsigned SEW = InVTy->getElementType()->getScalarSizeInBits();
2224222250
unsigned NumElts = InVTy->getElementCount().getKnownMinValue();
2224322251
Type *VecTupTy = TargetExtType::get(
22244-
SI->getContext(), "riscv_vec_tuple",
22252+
SI->getContext(), "riscv.vector.tuple",
2224522253
ScalableVectorType::get(Type::getInt8Ty(SI->getContext()),
2224622254
NumElts * SEW / 8),
2224722255
2);

llvm/lib/Target/RISCV/RISCVRegisterInfo.td

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -607,17 +607,17 @@ def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
607607
// The register class is added for inline assembly for vector mask types.
608608
def VM : VReg<VMaskVTs, (add VR), 1>;
609609

610-
defvar VTupM1N2VTs = [riscv_m1x2, riscv_mf2x2, riscv_mf4x2, riscv_mf8x2];
611-
defvar VTupM1N3VTs = [riscv_m1x3, riscv_mf2x3, riscv_mf4x3, riscv_mf8x3];
612-
defvar VTupM1N4VTs = [riscv_m1x4, riscv_mf2x4, riscv_mf4x4, riscv_mf8x4];
613-
defvar VTupM1N5VTs = [riscv_m1x5, riscv_mf2x5, riscv_mf4x5, riscv_mf8x5];
614-
defvar VTupM1N6VTs = [riscv_m1x6, riscv_mf2x6, riscv_mf4x6, riscv_mf8x6];
615-
defvar VTupM1N7VTs = [riscv_m1x7, riscv_mf2x7, riscv_mf4x7, riscv_mf8x7];
616-
defvar VTupM1N8VTs = [riscv_m1x8, riscv_mf2x8, riscv_mf4x8, riscv_mf8x8];
617-
defvar VTupM2N2VTs = [riscv_m2x2];
618-
defvar VTupM2N3VTs = [riscv_m2x3];
619-
defvar VTupM2N4VTs = [riscv_m2x4];
620-
defvar VTupM4N2VTs = [riscv_m4x2];
610+
defvar VTupM1N2VTs = [riscv_nxv8i8x2, riscv_nxv4i8x2, riscv_nxv2i8x2, riscv_nxv1i8x2];
611+
defvar VTupM1N3VTs = [riscv_nxv8i8x3, riscv_nxv4i8x3, riscv_nxv2i8x3, riscv_nxv1i8x3];
612+
defvar VTupM1N4VTs = [riscv_nxv8i8x4, riscv_nxv4i8x4, riscv_nxv2i8x4, riscv_nxv1i8x4];
613+
defvar VTupM1N5VTs = [riscv_nxv8i8x5, riscv_nxv4i8x5, riscv_nxv2i8x5, riscv_nxv1i8x5];
614+
defvar VTupM1N6VTs = [riscv_nxv8i8x6, riscv_nxv4i8x6, riscv_nxv2i8x6, riscv_nxv1i8x6];
615+
defvar VTupM1N7VTs = [riscv_nxv8i8x7, riscv_nxv4i8x7, riscv_nxv2i8x7, riscv_nxv1i8x7];
616+
defvar VTupM1N8VTs = [riscv_nxv8i8x8, riscv_nxv4i8x8, riscv_nxv2i8x8, riscv_nxv1i8x8];
617+
defvar VTupM2N2VTs = [riscv_nxv16i8x2];
618+
defvar VTupM2N3VTs = [riscv_nxv16i8x3];
619+
defvar VTupM2N4VTs = [riscv_nxv16i8x4];
620+
defvar VTupM4N2VTs = [riscv_nxv32i8x2];
621621
class VTupRegList<int LMUL, int NF> {
622622
list<ValueType> L = !cond(!and(!eq(LMUL, 1), !eq(NF, 2)): VTupM1N2VTs,
623623
!and(!eq(LMUL, 1), !eq(NF, 3)): VTupM1N3VTs,

llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,11 @@ entry:
101101
%8 = tail call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> %6, i16 -15456, i64 2)
102102
%9 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
103103
%10 = tail call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %8, i64 2, i64 0)
104-
%v_0 = call target("riscv_vec_tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv_vec_tuple_nxv16i8_4t.nxv8i16(target("riscv_vec_tuple", <vscale x 16 x i8>, 4) poison, <vscale x 8 x i16> %10, i64 0)
105-
%v_1 = call target("riscv_vec_tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv_vec_tuple_nxv16i8_4t.nxv8i16(target("riscv_vec_tuple", <vscale x 16 x i8>, 4) %v_0, <vscale x 8 x i16> %2, i64 1)
106-
%v_2 = call target("riscv_vec_tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv_vec_tuple_nxv16i8_4t.nxv8i16(target("riscv_vec_tuple", <vscale x 16 x i8>, 4) %v_1, <vscale x 8 x i16> %3, i64 2)
107-
%v_3 = call target("riscv_vec_tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv_vec_tuple_nxv16i8_4t.nxv8i16(target("riscv_vec_tuple", <vscale x 16 x i8>, 4) %v_2, <vscale x 8 x i16> %4, i64 3)
108-
tail call void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv_vec_tuple", <vscale x 16 x i8>, 4) %v_3, ptr nonnull @var_47, i64 2, i64 4)
104+
%v_0 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, <vscale x 8 x i16> %10, i64 0)
105+
%v_1 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_0, <vscale x 8 x i16> %2, i64 1)
106+
%v_2 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_1, <vscale x 8 x i16> %3, i64 2)
107+
%v_3 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_2, <vscale x 8 x i16> %4, i64 3)
108+
tail call void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_3, ptr nonnull @var_47, i64 2, i64 4)
109109
ret void
110110
}
111111

@@ -119,6 +119,6 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16>,
119119

120120
declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64 immarg)
121121

122-
declare target("riscv_vec_tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv_vec_tuple_nxv16i8_4t.nxv8i16(target("riscv_vec_tuple", <vscale x 16 x i8>, 4), <vscale x 8 x i16>, i64)
122+
declare target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vector.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), <vscale x 8 x i16>, i64)
123123

124-
declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv_vec_tuple", <vscale x 16 x i8>, 4), ptr nocapture, i64, i64)
124+
declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr nocapture, i64, i64)

llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,8 @@ define void @last_chance_recoloring_failure() {
123123
; SUBREGLIVENESS-NEXT: addi sp, sp, 32
124124
; SUBREGLIVENESS-NEXT: ret
125125
entry:
126-
%i = call target("riscv_vec_tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv_vec_tuple", <vscale x 32 x i8>, 2) undef, ptr nonnull poison, <vscale x 16 x i32> poison, i64 55, i64 4)
127-
%i1 = tail call <vscale x 16 x half> @llvm.riscv.vector.extract.v16f16.triscv_vec_tuple_nxv32i8_2t(target("riscv_vec_tuple", <vscale x 32 x i8>, 2) %i, i64 0)
126+
%i = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr nonnull poison, <vscale x 16 x i32> poison, i64 55, i64 4)
127+
%i1 = tail call <vscale x 16 x half> @llvm.riscv.vector.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %i, i64 0)
128128
%i2 = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> zeroinitializer, i64 7, i64 36, i64 0)
129129
call void @func()
130130
%i3 = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> poison, <vscale x 16 x i16> poison, <vscale x 16 x i1> poison, i64 32, i64 0)
@@ -136,8 +136,8 @@ entry:
136136
}
137137

138138
declare void @func()
139-
declare target("riscv_vec_tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv_vec_tuple", <vscale x 32 x i8>, 2), ptr nocapture, <vscale x 16 x i32>, i64, i64)
140-
declare <vscale x 16 x half> @llvm.riscv.vector.extract.v16f16.triscv_vec_tuple_nxv32i8_2t(target("riscv_vec_tuple", <vscale x 32 x i8>, 2), i64)
139+
declare target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr nocapture, <vscale x 16 x i32>, i64, i64)
140+
declare <vscale x 16 x half> @llvm.riscv.vector.extract.v16f16.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64)
141141
declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i64, i64, i64 immarg)
142142
declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64 immarg)
143143
declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x half>, i64, i64)

0 commit comments

Comments
 (0)