Skip to content

Commit 8d2c27a

Browse files
committed
[RISCV] Convert some predicates to TIIPredicate
These predicates can also be used in macro fusion and scheduling model. This is stacked on llvm#129680.
1 parent 8e9bdeb commit 8d2c27a

File tree

8 files changed

+187
-153
lines changed

8 files changed

+187
-153
lines changed

llvm/lib/Target/RISCV/RISCV.td

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,12 @@ include "RISCVCallingConv.td"
3636
include "RISCVInstrInfo.td"
3737
include "GISel/RISCVRegisterBanks.td"
3838

39+
//===----------------------------------------------------------------------===//
40+
// Instruction predicates
41+
//===----------------------------------------------------------------------===//
42+
43+
include "RISCVInstrPredicates.td"
44+
3945
//===----------------------------------------------------------------------===//
4046
// RISC-V macro fusions.
4147
//===----------------------------------------------------------------------===//

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -19949,23 +19949,6 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1994919949
return BB;
1995019950
}
1995119951

19952-
static bool isSelectPseudo(MachineInstr &MI) {
19953-
switch (MI.getOpcode()) {
19954-
default:
19955-
return false;
19956-
case RISCV::Select_GPR_Using_CC_GPR:
19957-
case RISCV::Select_GPR_Using_CC_Imm:
19958-
case RISCV::Select_FPR16_Using_CC_GPR:
19959-
case RISCV::Select_FPR16INX_Using_CC_GPR:
19960-
case RISCV::Select_FPR32_Using_CC_GPR:
19961-
case RISCV::Select_FPR32INX_Using_CC_GPR:
19962-
case RISCV::Select_FPR64_Using_CC_GPR:
19963-
case RISCV::Select_FPR64INX_Using_CC_GPR:
19964-
case RISCV::Select_FPR64IN32X_Using_CC_GPR:
19965-
return true;
19966-
}
19967-
}
19968-
1996919952
static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
1997019953
unsigned RelOpcode, unsigned EqOpcode,
1997119954
const RISCVSubtarget &Subtarget) {
@@ -20161,7 +20144,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
2016120144
SequenceMBBI != E; ++SequenceMBBI) {
2016220145
if (SequenceMBBI->isDebugInstr())
2016320146
continue;
20164-
if (isSelectPseudo(*SequenceMBBI)) {
20147+
if (RISCVInstrInfo::isSelectPseudo(*SequenceMBBI)) {
2016520148
if (SequenceMBBI->getOperand(1).getReg() != LHS ||
2016620149
!SequenceMBBI->getOperand(2).isReg() ||
2016720150
SequenceMBBI->getOperand(2).getReg() != RHS ||
@@ -20238,7 +20221,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
2023820221
auto InsertionPoint = TailMBB->begin();
2023920222
while (SelectMBBI != SelectEnd) {
2024020223
auto Next = std::next(SelectMBBI);
20241-
if (isSelectPseudo(*SelectMBBI)) {
20224+
if (RISCVInstrInfo::isSelectPseudo(*SelectMBBI)) {
2024220225
// %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
2024320226
BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
2024420227
TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 23 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -69,74 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
6969
return RISCVII::getSEWOpNum(MI.getDesc());
7070
}
7171

72-
static bool isVectorConfigInstr(const MachineInstr &MI) {
73-
return MI.getOpcode() == RISCV::PseudoVSETVLI ||
74-
MI.getOpcode() == RISCV::PseudoVSETVLIX0 ||
75-
MI.getOpcode() == RISCV::PseudoVSETIVLI;
76-
}
77-
78-
/// Return true if this is 'vsetvli x0, x0, vtype' which preserves
79-
/// VL and only sets VTYPE.
80-
static bool isVLPreservingConfig(const MachineInstr &MI) {
81-
if (MI.getOpcode() != RISCV::PseudoVSETVLIX0)
82-
return false;
83-
assert(RISCV::X0 == MI.getOperand(1).getReg());
84-
return RISCV::X0 == MI.getOperand(0).getReg();
85-
}
86-
87-
static bool isFloatScalarMoveOrScalarSplatInstr(const MachineInstr &MI) {
88-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
89-
default:
90-
return false;
91-
case RISCV::VFMV_S_F:
92-
case RISCV::VFMV_V_F:
93-
return true;
94-
}
95-
}
96-
97-
static bool isScalarExtractInstr(const MachineInstr &MI) {
98-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
99-
default:
100-
return false;
101-
case RISCV::VMV_X_S:
102-
case RISCV::VFMV_F_S:
103-
return true;
104-
}
105-
}
106-
107-
static bool isScalarInsertInstr(const MachineInstr &MI) {
108-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
109-
default:
110-
return false;
111-
case RISCV::VMV_S_X:
112-
case RISCV::VFMV_S_F:
113-
return true;
114-
}
115-
}
116-
117-
static bool isScalarSplatInstr(const MachineInstr &MI) {
118-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
119-
default:
120-
return false;
121-
case RISCV::VMV_V_I:
122-
case RISCV::VMV_V_X:
123-
case RISCV::VFMV_V_F:
124-
return true;
125-
}
126-
}
127-
128-
static bool isVSlideInstr(const MachineInstr &MI) {
129-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
130-
default:
131-
return false;
132-
case RISCV::VSLIDEDOWN_VX:
133-
case RISCV::VSLIDEDOWN_VI:
134-
case RISCV::VSLIDEUP_VX:
135-
case RISCV::VSLIDEUP_VI:
136-
return true;
137-
}
138-
}
139-
14072
/// Get the EEW for a load or store instruction. Return std::nullopt if MI is
14173
/// not a load or store which ignores SEW.
14274
static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
@@ -166,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
16698
}
16799
}
168100

169-
static bool isNonZeroLoadImmediate(const MachineInstr &MI) {
170-
return MI.getOpcode() == RISCV::ADDI &&
171-
MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
172-
MI.getOperand(1).getReg() == RISCV::X0 &&
173-
MI.getOperand(2).getImm() != 0;
174-
}
175-
176101
/// Return true if this is an operation on mask registers. Note that
177102
/// this includes both arithmetic/logical ops and load/store (vlm/vsm).
178103
static bool isMaskRegOp(const MachineInstr &MI) {
@@ -458,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
458383
}
459384

460385
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
461-
if (isScalarInsertInstr(MI)) {
386+
if (RISCVInstrInfo::isScalarInsertInstr(MI)) {
462387
Res.LMUL = DemandedFields::LMULNone;
463388
Res.SEWLMULRatio = false;
464389
Res.VLAny = false;
@@ -469,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
469394
// tail lanes to either be the original value or -1. We are writing
470395
// unknown bits to the lanes here.
471396
if (hasUndefinedPassthru(MI)) {
472-
if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
397+
if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
398+
!ST->hasVInstructionsF64())
473399
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
474400
else
475401
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -478,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
478404
}
479405

480406
// vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
481-
if (isScalarExtractInstr(MI)) {
407+
if (RISCVInstrInfo::isScalarExtractInstr(MI)) {
482408
assert(!RISCVII::hasVLOp(TSFlags));
483409
Res.LMUL = DemandedFields::LMULNone;
484410
Res.SEWLMULRatio = false;
@@ -496,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
496422
// non-zero VL. We could generalize this if we had a VL > C predicate.
497423
// * The LMUL1 restriction is for machines whose latency may depend on VL.
498424
// * As above, this is only legal for tail "undefined" not "agnostic".
499-
if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
500-
hasUndefinedPassthru(MI)) {
425+
if (RISCVInstrInfo::isVSlideInstr(MI) && VLOp.isImm() &&
426+
VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
501427
Res.VLAny = false;
502428
Res.VLZeroness = true;
503429
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -510,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
510436
// it's place. Since a splat is non-constant time in LMUL, we do need to be
511437
// careful to not increase the number of active vector registers (unlike for
512438
// vmv.s.x.)
513-
if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
514-
hasUndefinedPassthru(MI)) {
439+
if (RISCVInstrInfo::isScalarSplatInstr(MI) && VLOp.isImm() &&
440+
VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
515441
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
516442
Res.SEWLMULRatio = false;
517443
Res.VLAny = false;
518-
if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
444+
if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
445+
!ST->hasVInstructionsF64())
519446
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
520447
else
521448
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -651,7 +578,7 @@ class VSETVLIInfo {
651578
return getAVLImm() > 0;
652579
if (hasAVLReg()) {
653580
if (auto *DefMI = getAVLDefMI(LIS))
654-
return isNonZeroLoadImmediate(*DefMI);
581+
return RISCVInstrInfo::isNonZeroLoadImmediate(*DefMI);
655582
}
656583
if (hasAVLVLMAX())
657584
return true;
@@ -979,7 +906,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
979906
if (!Info.hasAVLReg())
980907
return;
981908
const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
982-
if (!DefMI || !isVectorConfigInstr(*DefMI))
909+
if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
983910
return;
984911
VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*DefMI);
985912
if (!DefInstrInfo.hasSameVLMAX(Info))
@@ -1085,7 +1012,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
10851012
InstrInfo.setAVLRegDef(VNI, VLOp.getReg());
10861013
}
10871014
} else {
1088-
assert(isScalarExtractInstr(MI));
1015+
assert(RISCVInstrInfo::isScalarExtractInstr(MI));
10891016
// Pick a random value for state tracking purposes, will be ignored via
10901017
// the demanded fields mechanism
10911018
InstrInfo.setAVLImm(1);
@@ -1126,7 +1053,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
11261053
// same, we can use the X0, X0 form.
11271054
if (Info.hasSameVLMAX(PrevInfo) && Info.hasAVLReg()) {
11281055
if (const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
1129-
DefMI && isVectorConfigInstr(*DefMI)) {
1056+
DefMI && RISCVInstrInfo::isVectorConfigInstr(*DefMI)) {
11301057
VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
11311058
if (DefInfo.hasSameAVL(PrevInfo) && DefInfo.hasSameVLMAX(PrevInfo)) {
11321059
auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0))
@@ -1304,7 +1231,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
13041231
// reflect the changes MI might make.
13051232
void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info,
13061233
const MachineInstr &MI) const {
1307-
if (isVectorConfigInstr(MI)) {
1234+
if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
13081235
Info = getInfoForVSETVLI(MI);
13091236
return;
13101237
}
@@ -1339,7 +1266,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
13391266
for (const MachineInstr &MI : MBB) {
13401267
transferBefore(Info, MI);
13411268

1342-
if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
1269+
if (RISCVInstrInfo::isVectorConfigInstr(MI) ||
1270+
RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
13431271
isVectorCopy(ST->getRegisterInfo(), MI))
13441272
HadVectorOp = true;
13451273

@@ -1429,7 +1357,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
14291357
if (!Value)
14301358
return true;
14311359
MachineInstr *DefMI = LIS->getInstructionFromIndex(Value->def);
1432-
if (!DefMI || !isVectorConfigInstr(*DefMI))
1360+
if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
14331361
return true;
14341362

14351363
// We found a VSET(I)VLI make sure it matches the output of the
@@ -1460,7 +1388,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
14601388
transferBefore(CurInfo, MI);
14611389

14621390
// If this is an explicit VSETVLI or VSETIVLI, update our state.
1463-
if (isVectorConfigInstr(MI)) {
1391+
if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
14641392
// Conservatively, mark the VL and VTYPE as live.
14651393
assert(MI.getOperand(3).getReg() == RISCV::VL &&
14661394
MI.getOperand(4).getReg() == RISCV::VTYPE &&
@@ -1660,12 +1588,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
16601588
// If the VL values aren't equal, return false if either a) the former is
16611589
// demanded, or b) we can't rewrite the former to be the later for
16621590
// implementation reasons.
1663-
if (!isVLPreservingConfig(MI)) {
1591+
if (!RISCVInstrInfo::isVLPreservingConfig(MI)) {
16641592
if (Used.VLAny)
16651593
return false;
16661594

16671595
if (Used.VLZeroness) {
1668-
if (isVLPreservingConfig(PrevMI))
1596+
if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
16691597
return false;
16701598
if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI),
16711599
LIS))
@@ -1716,7 +1644,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17161644

17171645
for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) {
17181646

1719-
if (!isVectorConfigInstr(MI)) {
1647+
if (!RISCVInstrInfo::isVectorConfigInstr(MI)) {
17201648
Used.doUnion(getDemanded(MI, ST));
17211649
if (MI.isCall() || MI.isInlineAsm() ||
17221650
MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) ||
@@ -1740,7 +1668,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17401668
}
17411669

17421670
if (canMutatePriorConfig(MI, *NextMI, Used)) {
1743-
if (!isVLPreservingConfig(*NextMI)) {
1671+
if (!RISCVInstrInfo::isVLPreservingConfig(*NextMI)) {
17441672
Register DefReg = NextMI->getOperand(0).getReg();
17451673

17461674
MI.getOperand(0).setReg(DefReg);

llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727

2828
#include "MCTargetDesc/RISCVBaseInfo.h"
2929
#include "RISCV.h"
30+
#include "RISCVInstrInfo.h"
3031
#include "RISCVSubtarget.h"
3132
#include "llvm/CodeGen/MachineFunctionPass.h"
3233
#include <queue>
@@ -227,23 +228,13 @@ char RISCVInsertWriteVXRM::ID = 0;
227228
INITIALIZE_PASS(RISCVInsertWriteVXRM, DEBUG_TYPE, RISCV_INSERT_WRITE_VXRM_NAME,
228229
false, false)
229230

230-
static bool ignoresVXRM(const MachineInstr &MI) {
231-
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
232-
default:
233-
return false;
234-
case RISCV::VNCLIP_WI:
235-
case RISCV::VNCLIPU_WI:
236-
return MI.getOperand(3).getImm() == 0;
237-
}
238-
}
239-
240231
bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
241232
BlockData &BBInfo = BlockInfo[MBB.getNumber()];
242233

243234
bool NeedVXRMWrite = false;
244235
for (const MachineInstr &MI : MBB) {
245236
int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
246-
if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
237+
if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
247238
unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
248239

249240
if (!BBInfo.VXRMUse.isValid())
@@ -401,7 +392,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
401392

402393
for (MachineInstr &MI : MBB) {
403394
int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
404-
if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
395+
if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
405396
unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
406397

407398
if (PendingInsert || !Info.isStatic() ||

0 commit comments

Comments
 (0)