@@ -69,78 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
69
69
return RISCVII::getSEWOpNum (MI.getDesc ());
70
70
}
71
71
72
- static bool isVectorConfigInstr (const MachineInstr &MI) {
73
- return MI.getOpcode () == RISCV::PseudoVSETVLI ||
74
- MI.getOpcode () == RISCV::PseudoVSETVLIX0 ||
75
- MI.getOpcode () == RISCV::PseudoVSETIVLI;
76
- }
77
-
78
- // / Return true if this is 'vsetvli x0, x0, vtype' which preserves
79
- // / VL and only sets VTYPE.
80
- static bool isVLPreservingConfig (const MachineInstr &MI) {
81
- if (MI.getOpcode () != RISCV::PseudoVSETVLIX0)
82
- return false ;
83
- assert (RISCV::X0 == MI.getOperand (1 ).getReg ());
84
- return RISCV::X0 == MI.getOperand (0 ).getReg ();
85
- }
86
-
87
- static bool isFloatScalarMoveOrScalarSplatInstr (const MachineInstr &MI) {
88
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
89
- default :
90
- return false ;
91
- case RISCV::VFMV_S_F:
92
- case RISCV::VFMV_V_F:
93
- return true ;
94
- }
95
- }
96
-
97
- static bool isVExtractInstr (const MachineInstr &MI) {
98
- return RISCV::getRVVMCOpcode (MI.getOpcode ()) == RISCV::RI_VEXTRACT;
99
- }
100
-
101
- static bool isScalarExtractInstr (const MachineInstr &MI) {
102
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
103
- default :
104
- return false ;
105
- case RISCV::VMV_X_S:
106
- case RISCV::VFMV_F_S:
107
- return true ;
108
- }
109
- }
110
-
111
- static bool isScalarInsertInstr (const MachineInstr &MI) {
112
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
113
- default :
114
- return false ;
115
- case RISCV::VMV_S_X:
116
- case RISCV::VFMV_S_F:
117
- return true ;
118
- }
119
- }
120
-
121
- static bool isScalarSplatInstr (const MachineInstr &MI) {
122
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
123
- default :
124
- return false ;
125
- case RISCV::VMV_V_I:
126
- case RISCV::VMV_V_X:
127
- case RISCV::VFMV_V_F:
128
- return true ;
129
- }
130
- }
131
-
132
- static bool isVSlideInstr (const MachineInstr &MI) {
133
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
134
- default :
135
- return false ;
136
- case RISCV::VSLIDEDOWN_VX:
137
- case RISCV::VSLIDEDOWN_VI:
138
- case RISCV::VSLIDEUP_VX:
139
- case RISCV::VSLIDEUP_VI:
140
- return true ;
141
- }
142
- }
143
-
144
72
// / Get the EEW for a load or store instruction. Return std::nullopt if MI is
145
73
// / not a load or store which ignores SEW.
146
74
static std::optional<unsigned > getEEWForLoadStore (const MachineInstr &MI) {
@@ -170,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
170
98
}
171
99
}
172
100
173
- static bool isNonZeroLoadImmediate (const MachineInstr &MI) {
174
- return MI.getOpcode () == RISCV::ADDI &&
175
- MI.getOperand (1 ).isReg () && MI.getOperand (2 ).isImm () &&
176
- MI.getOperand (1 ).getReg () == RISCV::X0 &&
177
- MI.getOperand (2 ).getImm () != 0 ;
178
- }
179
-
180
101
// / Return true if this is an operation on mask registers. Note that
181
102
// / this includes both arithmetic/logical ops and load/store (vlm/vsm).
182
103
static bool isMaskRegOp (const MachineInstr &MI) {
@@ -462,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
462
383
}
463
384
464
385
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
465
- if (isScalarInsertInstr (MI)) {
386
+ if (RISCVInstrInfo:: isScalarInsertInstr (MI)) {
466
387
Res.LMUL = DemandedFields::LMULNone;
467
388
Res.SEWLMULRatio = false ;
468
389
Res.VLAny = false ;
@@ -473,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
473
394
// tail lanes to either be the original value or -1. We are writing
474
395
// unknown bits to the lanes here.
475
396
if (hasUndefinedPassthru (MI)) {
476
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
397
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
398
+ !ST->hasVInstructionsF64 ())
477
399
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
478
400
else
479
401
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -482,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
482
404
}
483
405
484
406
// vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
485
- if (isScalarExtractInstr (MI)) {
407
+ if (RISCVInstrInfo:: isScalarExtractInstr (MI)) {
486
408
assert (!RISCVII::hasVLOp (TSFlags));
487
409
Res.LMUL = DemandedFields::LMULNone;
488
410
Res.SEWLMULRatio = false ;
@@ -500,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
500
422
// non-zero VL. We could generalize this if we had a VL > C predicate.
501
423
// * The LMUL1 restriction is for machines whose latency may depend on VL.
502
424
// * As above, this is only legal for tail "undefined" not "agnostic".
503
- if (isVSlideInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
504
- hasUndefinedPassthru (MI)) {
425
+ if (RISCVInstrInfo:: isVSlideInstr (MI) && VLOp.isImm () &&
426
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
505
427
Res.VLAny = false ;
506
428
Res.VLZeroness = true ;
507
429
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -514,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
514
436
// it's place. Since a splat is non-constant time in LMUL, we do need to be
515
437
// careful to not increase the number of active vector registers (unlike for
516
438
// vmv.s.x.)
517
- if (isScalarSplatInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
518
- hasUndefinedPassthru (MI)) {
439
+ if (RISCVInstrInfo:: isScalarSplatInstr (MI) && VLOp.isImm () &&
440
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
519
441
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
520
442
Res.SEWLMULRatio = false ;
521
443
Res.VLAny = false ;
522
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
444
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
445
+ !ST->hasVInstructionsF64 ())
523
446
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
524
447
else
525
448
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -542,7 +465,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
542
465
Res.MaskPolicy = false ;
543
466
}
544
467
545
- if (isVExtractInstr (MI)) {
468
+ if (RISCVInstrInfo:: isVExtractInstr (MI)) {
546
469
assert (!RISCVII::hasVLOp (TSFlags));
547
470
// TODO: LMUL can be any larger value (without cost)
548
471
Res.TailPolicy = false ;
@@ -661,7 +584,7 @@ class VSETVLIInfo {
661
584
return getAVLImm () > 0 ;
662
585
if (hasAVLReg ()) {
663
586
if (auto *DefMI = getAVLDefMI (LIS))
664
- return isNonZeroLoadImmediate (*DefMI);
587
+ return RISCVInstrInfo:: isNonZeroLoadImmediate (*DefMI);
665
588
}
666
589
if (hasAVLVLMAX ())
667
590
return true ;
@@ -989,7 +912,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
989
912
if (!Info.hasAVLReg ())
990
913
return ;
991
914
const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
992
- if (!DefMI || !isVectorConfigInstr (*DefMI))
915
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
993
916
return ;
994
917
VSETVLIInfo DefInstrInfo = getInfoForVSETVLI (*DefMI);
995
918
if (!DefInstrInfo.hasSameVLMAX (Info))
@@ -1095,7 +1018,8 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
1095
1018
InstrInfo.setAVLRegDef (VNI, VLOp.getReg ());
1096
1019
}
1097
1020
} else {
1098
- assert (isScalarExtractInstr (MI) || isVExtractInstr (MI));
1021
+ assert (RISCVInstrInfo::isScalarExtractInstr (MI) ||
1022
+ RISCVInstrInfo::isVExtractInstr (MI));
1099
1023
// Pick a random value for state tracking purposes, will be ignored via
1100
1024
// the demanded fields mechanism
1101
1025
InstrInfo.setAVLImm (1 );
@@ -1136,7 +1060,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
1136
1060
// same, we can use the X0, X0 form.
1137
1061
if (Info.hasSameVLMAX (PrevInfo) && Info.hasAVLReg ()) {
1138
1062
if (const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
1139
- DefMI && isVectorConfigInstr (*DefMI)) {
1063
+ DefMI && RISCVInstrInfo:: isVectorConfigInstr (*DefMI)) {
1140
1064
VSETVLIInfo DefInfo = getInfoForVSETVLI (*DefMI);
1141
1065
if (DefInfo.hasSameAVL (PrevInfo) && DefInfo.hasSameVLMAX (PrevInfo)) {
1142
1066
auto MI = BuildMI (MBB, InsertPt, DL, TII->get (RISCV::PseudoVSETVLIX0))
@@ -1314,7 +1238,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
1314
1238
// reflect the changes MI might make.
1315
1239
void RISCVInsertVSETVLI::transferAfter (VSETVLIInfo &Info,
1316
1240
const MachineInstr &MI) const {
1317
- if (isVectorConfigInstr (MI)) {
1241
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1318
1242
Info = getInfoForVSETVLI (MI);
1319
1243
return ;
1320
1244
}
@@ -1349,7 +1273,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
1349
1273
for (const MachineInstr &MI : MBB) {
1350
1274
transferBefore (Info, MI);
1351
1275
1352
- if (isVectorConfigInstr (MI) || RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1276
+ if (RISCVInstrInfo::isVectorConfigInstr (MI) ||
1277
+ RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1353
1278
isVectorCopy (ST->getRegisterInfo (), MI))
1354
1279
HadVectorOp = true ;
1355
1280
@@ -1439,7 +1364,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
1439
1364
if (!Value)
1440
1365
return true ;
1441
1366
MachineInstr *DefMI = LIS->getInstructionFromIndex (Value->def );
1442
- if (!DefMI || !isVectorConfigInstr (*DefMI))
1367
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
1443
1368
return true ;
1444
1369
1445
1370
// We found a VSET(I)VLI make sure it matches the output of the
@@ -1470,7 +1395,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
1470
1395
transferBefore (CurInfo, MI);
1471
1396
1472
1397
// If this is an explicit VSETVLI or VSETIVLI, update our state.
1473
- if (isVectorConfigInstr (MI)) {
1398
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1474
1399
// Conservatively, mark the VL and VTYPE as live.
1475
1400
assert (MI.getOperand (3 ).getReg () == RISCV::VL &&
1476
1401
MI.getOperand (4 ).getReg () == RISCV::VTYPE &&
@@ -1677,12 +1602,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
1677
1602
// If the VL values aren't equal, return false if either a) the former is
1678
1603
// demanded, or b) we can't rewrite the former to be the later for
1679
1604
// implementation reasons.
1680
- if (!isVLPreservingConfig (MI)) {
1605
+ if (!RISCVInstrInfo:: isVLPreservingConfig (MI)) {
1681
1606
if (Used.VLAny )
1682
1607
return false ;
1683
1608
1684
1609
if (Used.VLZeroness ) {
1685
- if (isVLPreservingConfig (PrevMI))
1610
+ if (RISCVInstrInfo:: isVLPreservingConfig (PrevMI))
1686
1611
return false ;
1687
1612
if (!getInfoForVSETVLI (PrevMI).hasEquallyZeroAVL (getInfoForVSETVLI (MI),
1688
1613
LIS))
@@ -1733,7 +1658,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1733
1658
1734
1659
for (MachineInstr &MI : make_early_inc_range (reverse (MBB))) {
1735
1660
1736
- if (!isVectorConfigInstr (MI)) {
1661
+ if (!RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1737
1662
Used.doUnion (getDemanded (MI, ST));
1738
1663
if (MI.isCall () || MI.isInlineAsm () ||
1739
1664
MI.modifiesRegister (RISCV::VL, /* TRI=*/ nullptr ) ||
@@ -1757,7 +1682,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1757
1682
}
1758
1683
1759
1684
if (canMutatePriorConfig (MI, *NextMI, Used)) {
1760
- if (!isVLPreservingConfig (*NextMI)) {
1685
+ if (!RISCVInstrInfo:: isVLPreservingConfig (*NextMI)) {
1761
1686
Register DefReg = NextMI->getOperand (0 ).getReg ();
1762
1687
1763
1688
MI.getOperand (0 ).setReg (DefReg);
0 commit comments