25
25
// ===----------------------------------------------------------------------===//
26
26
27
27
#include " RISCV.h"
28
+ #include " RISCVInstrInfo.h"
28
29
#include " RISCVSubtarget.h"
29
30
#include " llvm/ADT/Statistic.h"
30
31
#include " llvm/CodeGen/LiveDebugVariables.h"
@@ -69,78 +70,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
69
70
return RISCVII::getSEWOpNum (MI.getDesc ());
70
71
}
71
72
72
- static bool isVectorConfigInstr (const MachineInstr &MI) {
73
- return MI.getOpcode () == RISCV::PseudoVSETVLI ||
74
- MI.getOpcode () == RISCV::PseudoVSETVLIX0 ||
75
- MI.getOpcode () == RISCV::PseudoVSETIVLI;
76
- }
77
-
78
- // / Return true if this is 'vsetvli x0, x0, vtype' which preserves
79
- // / VL and only sets VTYPE.
80
- static bool isVLPreservingConfig (const MachineInstr &MI) {
81
- if (MI.getOpcode () != RISCV::PseudoVSETVLIX0)
82
- return false ;
83
- assert (RISCV::X0 == MI.getOperand (1 ).getReg ());
84
- return RISCV::X0 == MI.getOperand (0 ).getReg ();
85
- }
86
-
87
- static bool isFloatScalarMoveOrScalarSplatInstr (const MachineInstr &MI) {
88
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
89
- default :
90
- return false ;
91
- case RISCV::VFMV_S_F:
92
- case RISCV::VFMV_V_F:
93
- return true ;
94
- }
95
- }
96
-
97
- static bool isVExtractInstr (const MachineInstr &MI) {
98
- return RISCV::getRVVMCOpcode (MI.getOpcode ()) == RISCV::RI_VEXTRACT;
99
- }
100
-
101
- static bool isScalarExtractInstr (const MachineInstr &MI) {
102
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
103
- default :
104
- return false ;
105
- case RISCV::VMV_X_S:
106
- case RISCV::VFMV_F_S:
107
- return true ;
108
- }
109
- }
110
-
111
- static bool isScalarInsertInstr (const MachineInstr &MI) {
112
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
113
- default :
114
- return false ;
115
- case RISCV::VMV_S_X:
116
- case RISCV::VFMV_S_F:
117
- return true ;
118
- }
119
- }
120
-
121
- static bool isScalarSplatInstr (const MachineInstr &MI) {
122
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
123
- default :
124
- return false ;
125
- case RISCV::VMV_V_I:
126
- case RISCV::VMV_V_X:
127
- case RISCV::VFMV_V_F:
128
- return true ;
129
- }
130
- }
131
-
132
- static bool isVSlideInstr (const MachineInstr &MI) {
133
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
134
- default :
135
- return false ;
136
- case RISCV::VSLIDEDOWN_VX:
137
- case RISCV::VSLIDEDOWN_VI:
138
- case RISCV::VSLIDEUP_VX:
139
- case RISCV::VSLIDEUP_VI:
140
- return true ;
141
- }
142
- }
143
-
144
73
// / Get the EEW for a load or store instruction. Return std::nullopt if MI is
145
74
// / not a load or store which ignores SEW.
146
75
static std::optional<unsigned > getEEWForLoadStore (const MachineInstr &MI) {
@@ -170,13 +99,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
170
99
}
171
100
}
172
101
173
- static bool isNonZeroLoadImmediate (const MachineInstr &MI) {
174
- return MI.getOpcode () == RISCV::ADDI &&
175
- MI.getOperand (1 ).isReg () && MI.getOperand (2 ).isImm () &&
176
- MI.getOperand (1 ).getReg () == RISCV::X0 &&
177
- MI.getOperand (2 ).getImm () != 0 ;
178
- }
179
-
180
102
// / Return true if this is an operation on mask registers. Note that
181
103
// / this includes both arithmetic/logical ops and load/store (vlm/vsm).
182
104
static bool isMaskRegOp (const MachineInstr &MI) {
@@ -462,7 +384,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
462
384
}
463
385
464
386
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
465
- if (isScalarInsertInstr (MI)) {
387
+ if (RISCVInstrInfo:: isScalarInsertInstr (MI)) {
466
388
Res.LMUL = DemandedFields::LMULNone;
467
389
Res.SEWLMULRatio = false ;
468
390
Res.VLAny = false ;
@@ -473,7 +395,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
473
395
// tail lanes to either be the original value or -1. We are writing
474
396
// unknown bits to the lanes here.
475
397
if (hasUndefinedPassthru (MI)) {
476
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
398
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
399
+ !ST->hasVInstructionsF64 ())
477
400
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
478
401
else
479
402
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -482,7 +405,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
482
405
}
483
406
484
407
// vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
485
- if (isScalarExtractInstr (MI)) {
408
+ if (RISCVInstrInfo:: isScalarExtractInstr (MI)) {
486
409
assert (!RISCVII::hasVLOp (TSFlags));
487
410
Res.LMUL = DemandedFields::LMULNone;
488
411
Res.SEWLMULRatio = false ;
@@ -500,8 +423,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
500
423
// non-zero VL. We could generalize this if we had a VL > C predicate.
501
424
// * The LMUL1 restriction is for machines whose latency may depend on VL.
502
425
// * As above, this is only legal for tail "undefined" not "agnostic".
503
- if (isVSlideInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
504
- hasUndefinedPassthru (MI)) {
426
+ if (RISCVInstrInfo:: isVSlideInstr (MI) && VLOp.isImm () &&
427
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
505
428
Res.VLAny = false ;
506
429
Res.VLZeroness = true ;
507
430
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -514,12 +437,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
514
437
// it's place. Since a splat is non-constant time in LMUL, we do need to be
515
438
// careful to not increase the number of active vector registers (unlike for
516
439
// vmv.s.x.)
517
- if (isScalarSplatInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
518
- hasUndefinedPassthru (MI)) {
440
+ if (RISCVInstrInfo:: isScalarSplatInstr (MI) && VLOp.isImm () &&
441
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
519
442
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
520
443
Res.SEWLMULRatio = false ;
521
444
Res.VLAny = false ;
522
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
445
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
446
+ !ST->hasVInstructionsF64 ())
523
447
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
524
448
else
525
449
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -542,7 +466,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
542
466
Res.MaskPolicy = false ;
543
467
}
544
468
545
- if (isVExtractInstr (MI)) {
469
+ if (RISCVInstrInfo:: isVExtractInstr (MI)) {
546
470
assert (!RISCVII::hasVLOp (TSFlags));
547
471
// TODO: LMUL can be any larger value (without cost)
548
472
Res.TailPolicy = false ;
@@ -661,7 +585,7 @@ class VSETVLIInfo {
661
585
return getAVLImm () > 0 ;
662
586
if (hasAVLReg ()) {
663
587
if (auto *DefMI = getAVLDefMI (LIS))
664
- return isNonZeroLoadImmediate (*DefMI);
588
+ return RISCVInstrInfo:: isNonZeroLoadImmediate (*DefMI);
665
589
}
666
590
if (hasAVLVLMAX ())
667
591
return true ;
@@ -989,7 +913,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
989
913
if (!Info.hasAVLReg ())
990
914
return ;
991
915
const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
992
- if (!DefMI || !isVectorConfigInstr (*DefMI))
916
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
993
917
return ;
994
918
VSETVLIInfo DefInstrInfo = getInfoForVSETVLI (*DefMI);
995
919
if (!DefInstrInfo.hasSameVLMAX (Info))
@@ -1095,7 +1019,8 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
1095
1019
InstrInfo.setAVLRegDef (VNI, VLOp.getReg ());
1096
1020
}
1097
1021
} else {
1098
- assert (isScalarExtractInstr (MI) || isVExtractInstr (MI));
1022
+ assert (RISCVInstrInfo::isScalarExtractInstr (MI) ||
1023
+ RISCVInstrInfo::isVExtractInstr (MI));
1099
1024
// Pick a random value for state tracking purposes, will be ignored via
1100
1025
// the demanded fields mechanism
1101
1026
InstrInfo.setAVLImm (1 );
@@ -1136,7 +1061,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
1136
1061
// same, we can use the X0, X0 form.
1137
1062
if (Info.hasSameVLMAX (PrevInfo) && Info.hasAVLReg ()) {
1138
1063
if (const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
1139
- DefMI && isVectorConfigInstr (*DefMI)) {
1064
+ DefMI && RISCVInstrInfo:: isVectorConfigInstr (*DefMI)) {
1140
1065
VSETVLIInfo DefInfo = getInfoForVSETVLI (*DefMI);
1141
1066
if (DefInfo.hasSameAVL (PrevInfo) && DefInfo.hasSameVLMAX (PrevInfo)) {
1142
1067
auto MI = BuildMI (MBB, InsertPt, DL, TII->get (RISCV::PseudoVSETVLIX0))
@@ -1314,7 +1239,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
1314
1239
// reflect the changes MI might make.
1315
1240
void RISCVInsertVSETVLI::transferAfter (VSETVLIInfo &Info,
1316
1241
const MachineInstr &MI) const {
1317
- if (isVectorConfigInstr (MI)) {
1242
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1318
1243
Info = getInfoForVSETVLI (MI);
1319
1244
return ;
1320
1245
}
@@ -1349,7 +1274,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
1349
1274
for (const MachineInstr &MI : MBB) {
1350
1275
transferBefore (Info, MI);
1351
1276
1352
- if (isVectorConfigInstr (MI) || RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1277
+ if (RISCVInstrInfo::isVectorConfigInstr (MI) ||
1278
+ RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1353
1279
isVectorCopy (ST->getRegisterInfo (), MI))
1354
1280
HadVectorOp = true ;
1355
1281
@@ -1439,7 +1365,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
1439
1365
if (!Value)
1440
1366
return true ;
1441
1367
MachineInstr *DefMI = LIS->getInstructionFromIndex (Value->def );
1442
- if (!DefMI || !isVectorConfigInstr (*DefMI))
1368
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
1443
1369
return true ;
1444
1370
1445
1371
// We found a VSET(I)VLI make sure it matches the output of the
@@ -1470,7 +1396,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
1470
1396
transferBefore (CurInfo, MI);
1471
1397
1472
1398
// If this is an explicit VSETVLI or VSETIVLI, update our state.
1473
- if (isVectorConfigInstr (MI)) {
1399
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1474
1400
// Conservatively, mark the VL and VTYPE as live.
1475
1401
assert (MI.getOperand (3 ).getReg () == RISCV::VL &&
1476
1402
MI.getOperand (4 ).getReg () == RISCV::VTYPE &&
@@ -1677,12 +1603,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
1677
1603
// If the VL values aren't equal, return false if either a) the former is
1678
1604
// demanded, or b) we can't rewrite the former to be the later for
1679
1605
// implementation reasons.
1680
- if (!isVLPreservingConfig (MI)) {
1606
+ if (!RISCVInstrInfo:: isVLPreservingConfig (MI)) {
1681
1607
if (Used.VLAny )
1682
1608
return false ;
1683
1609
1684
1610
if (Used.VLZeroness ) {
1685
- if (isVLPreservingConfig (PrevMI))
1611
+ if (RISCVInstrInfo:: isVLPreservingConfig (PrevMI))
1686
1612
return false ;
1687
1613
if (!getInfoForVSETVLI (PrevMI).hasEquallyZeroAVL (getInfoForVSETVLI (MI),
1688
1614
LIS))
@@ -1733,7 +1659,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1733
1659
1734
1660
for (MachineInstr &MI : make_early_inc_range (reverse (MBB))) {
1735
1661
1736
- if (!isVectorConfigInstr (MI)) {
1662
+ if (!RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1737
1663
Used.doUnion (getDemanded (MI, ST));
1738
1664
if (MI.isCall () || MI.isInlineAsm () ||
1739
1665
MI.modifiesRegister (RISCV::VL, /* TRI=*/ nullptr ) ||
@@ -1757,7 +1683,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1757
1683
}
1758
1684
1759
1685
if (canMutatePriorConfig (MI, *NextMI, Used)) {
1760
- if (!isVLPreservingConfig (*NextMI)) {
1686
+ if (!RISCVInstrInfo:: isVLPreservingConfig (*NextMI)) {
1761
1687
Register DefReg = NextMI->getOperand (0 ).getReg ();
1762
1688
1763
1689
MI.getOperand (0 ).setReg (DefReg);
0 commit comments