@@ -69,74 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
69
69
return RISCVII::getSEWOpNum (MI.getDesc ());
70
70
}
71
71
72
- static bool isVectorConfigInstr (const MachineInstr &MI) {
73
- return MI.getOpcode () == RISCV::PseudoVSETVLI ||
74
- MI.getOpcode () == RISCV::PseudoVSETVLIX0 ||
75
- MI.getOpcode () == RISCV::PseudoVSETIVLI;
76
- }
77
-
78
- // / Return true if this is 'vsetvli x0, x0, vtype' which preserves
79
- // / VL and only sets VTYPE.
80
- static bool isVLPreservingConfig (const MachineInstr &MI) {
81
- if (MI.getOpcode () != RISCV::PseudoVSETVLIX0)
82
- return false ;
83
- assert (RISCV::X0 == MI.getOperand (1 ).getReg ());
84
- return RISCV::X0 == MI.getOperand (0 ).getReg ();
85
- }
86
-
87
- static bool isFloatScalarMoveOrScalarSplatInstr (const MachineInstr &MI) {
88
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
89
- default :
90
- return false ;
91
- case RISCV::VFMV_S_F:
92
- case RISCV::VFMV_V_F:
93
- return true ;
94
- }
95
- }
96
-
97
- static bool isScalarExtractInstr (const MachineInstr &MI) {
98
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
99
- default :
100
- return false ;
101
- case RISCV::VMV_X_S:
102
- case RISCV::VFMV_F_S:
103
- return true ;
104
- }
105
- }
106
-
107
- static bool isScalarInsertInstr (const MachineInstr &MI) {
108
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
109
- default :
110
- return false ;
111
- case RISCV::VMV_S_X:
112
- case RISCV::VFMV_S_F:
113
- return true ;
114
- }
115
- }
116
-
117
- static bool isScalarSplatInstr (const MachineInstr &MI) {
118
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
119
- default :
120
- return false ;
121
- case RISCV::VMV_V_I:
122
- case RISCV::VMV_V_X:
123
- case RISCV::VFMV_V_F:
124
- return true ;
125
- }
126
- }
127
-
128
- static bool isVSlideInstr (const MachineInstr &MI) {
129
- switch (RISCV::getRVVMCOpcode (MI.getOpcode ())) {
130
- default :
131
- return false ;
132
- case RISCV::VSLIDEDOWN_VX:
133
- case RISCV::VSLIDEDOWN_VI:
134
- case RISCV::VSLIDEUP_VX:
135
- case RISCV::VSLIDEUP_VI:
136
- return true ;
137
- }
138
- }
139
-
140
72
// / Get the EEW for a load or store instruction. Return std::nullopt if MI is
141
73
// / not a load or store which ignores SEW.
142
74
static std::optional<unsigned > getEEWForLoadStore (const MachineInstr &MI) {
@@ -166,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
166
98
}
167
99
}
168
100
169
- static bool isNonZeroLoadImmediate (const MachineInstr &MI) {
170
- return MI.getOpcode () == RISCV::ADDI &&
171
- MI.getOperand (1 ).isReg () && MI.getOperand (2 ).isImm () &&
172
- MI.getOperand (1 ).getReg () == RISCV::X0 &&
173
- MI.getOperand (2 ).getImm () != 0 ;
174
- }
175
-
176
101
// / Return true if this is an operation on mask registers. Note that
177
102
// / this includes both arithmetic/logical ops and load/store (vlm/vsm).
178
103
static bool isMaskRegOp (const MachineInstr &MI) {
@@ -458,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
458
383
}
459
384
460
385
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
461
- if (isScalarInsertInstr (MI)) {
386
+ if (RISCVInstrInfo:: isScalarInsertInstr (MI)) {
462
387
Res.LMUL = DemandedFields::LMULNone;
463
388
Res.SEWLMULRatio = false ;
464
389
Res.VLAny = false ;
@@ -469,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
469
394
// tail lanes to either be the original value or -1. We are writing
470
395
// unknown bits to the lanes here.
471
396
if (hasUndefinedPassthru (MI)) {
472
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
397
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
398
+ !ST->hasVInstructionsF64 ())
473
399
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
474
400
else
475
401
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -478,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
478
404
}
479
405
480
406
// vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
481
- if (isScalarExtractInstr (MI)) {
407
+ if (RISCVInstrInfo:: isScalarExtractInstr (MI)) {
482
408
assert (!RISCVII::hasVLOp (TSFlags));
483
409
Res.LMUL = DemandedFields::LMULNone;
484
410
Res.SEWLMULRatio = false ;
@@ -496,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
496
422
// non-zero VL. We could generalize this if we had a VL > C predicate.
497
423
// * The LMUL1 restriction is for machines whose latency may depend on VL.
498
424
// * As above, this is only legal for tail "undefined" not "agnostic".
499
- if (isVSlideInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
500
- hasUndefinedPassthru (MI)) {
425
+ if (RISCVInstrInfo:: isVSlideInstr (MI) && VLOp.isImm () &&
426
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
501
427
Res.VLAny = false ;
502
428
Res.VLZeroness = true ;
503
429
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
@@ -510,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
510
436
// it's place. Since a splat is non-constant time in LMUL, we do need to be
511
437
// careful to not increase the number of active vector registers (unlike for
512
438
// vmv.s.x.)
513
- if (isScalarSplatInstr (MI) && VLOp.isImm () && VLOp. getImm () == 1 &&
514
- hasUndefinedPassthru (MI)) {
439
+ if (RISCVInstrInfo:: isScalarSplatInstr (MI) && VLOp.isImm () &&
440
+ VLOp. getImm () == 1 && hasUndefinedPassthru (MI)) {
515
441
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
516
442
Res.SEWLMULRatio = false ;
517
443
Res.VLAny = false ;
518
- if (isFloatScalarMoveOrScalarSplatInstr (MI) && !ST->hasVInstructionsF64 ())
444
+ if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr (MI) &&
445
+ !ST->hasVInstructionsF64 ())
519
446
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
520
447
else
521
448
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
@@ -651,7 +578,7 @@ class VSETVLIInfo {
651
578
return getAVLImm () > 0 ;
652
579
if (hasAVLReg ()) {
653
580
if (auto *DefMI = getAVLDefMI (LIS))
654
- return isNonZeroLoadImmediate (*DefMI);
581
+ return RISCVInstrInfo:: isNonZeroLoadImmediate (*DefMI);
655
582
}
656
583
if (hasAVLVLMAX ())
657
584
return true ;
@@ -979,7 +906,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
979
906
if (!Info.hasAVLReg ())
980
907
return ;
981
908
const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
982
- if (!DefMI || !isVectorConfigInstr (*DefMI))
909
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
983
910
return ;
984
911
VSETVLIInfo DefInstrInfo = getInfoForVSETVLI (*DefMI);
985
912
if (!DefInstrInfo.hasSameVLMAX (Info))
@@ -1085,7 +1012,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
1085
1012
InstrInfo.setAVLRegDef (VNI, VLOp.getReg ());
1086
1013
}
1087
1014
} else {
1088
- assert (isScalarExtractInstr (MI));
1015
+ assert (RISCVInstrInfo:: isScalarExtractInstr (MI));
1089
1016
// Pick a random value for state tracking purposes, will be ignored via
1090
1017
// the demanded fields mechanism
1091
1018
InstrInfo.setAVLImm (1 );
@@ -1126,7 +1053,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
1126
1053
// same, we can use the X0, X0 form.
1127
1054
if (Info.hasSameVLMAX (PrevInfo) && Info.hasAVLReg ()) {
1128
1055
if (const MachineInstr *DefMI = Info.getAVLDefMI (LIS);
1129
- DefMI && isVectorConfigInstr (*DefMI)) {
1056
+ DefMI && RISCVInstrInfo:: isVectorConfigInstr (*DefMI)) {
1130
1057
VSETVLIInfo DefInfo = getInfoForVSETVLI (*DefMI);
1131
1058
if (DefInfo.hasSameAVL (PrevInfo) && DefInfo.hasSameVLMAX (PrevInfo)) {
1132
1059
auto MI = BuildMI (MBB, InsertPt, DL, TII->get (RISCV::PseudoVSETVLIX0))
@@ -1304,7 +1231,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
1304
1231
// reflect the changes MI might make.
1305
1232
void RISCVInsertVSETVLI::transferAfter (VSETVLIInfo &Info,
1306
1233
const MachineInstr &MI) const {
1307
- if (isVectorConfigInstr (MI)) {
1234
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1308
1235
Info = getInfoForVSETVLI (MI);
1309
1236
return ;
1310
1237
}
@@ -1339,7 +1266,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
1339
1266
for (const MachineInstr &MI : MBB) {
1340
1267
transferBefore (Info, MI);
1341
1268
1342
- if (isVectorConfigInstr (MI) || RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1269
+ if (RISCVInstrInfo::isVectorConfigInstr (MI) ||
1270
+ RISCVII::hasSEWOp (MI.getDesc ().TSFlags ) ||
1343
1271
isVectorCopy (ST->getRegisterInfo (), MI))
1344
1272
HadVectorOp = true ;
1345
1273
@@ -1429,7 +1357,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
1429
1357
if (!Value)
1430
1358
return true ;
1431
1359
MachineInstr *DefMI = LIS->getInstructionFromIndex (Value->def );
1432
- if (!DefMI || !isVectorConfigInstr (*DefMI))
1360
+ if (!DefMI || !RISCVInstrInfo:: isVectorConfigInstr (*DefMI))
1433
1361
return true ;
1434
1362
1435
1363
// We found a VSET(I)VLI make sure it matches the output of the
@@ -1460,7 +1388,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
1460
1388
transferBefore (CurInfo, MI);
1461
1389
1462
1390
// If this is an explicit VSETVLI or VSETIVLI, update our state.
1463
- if (isVectorConfigInstr (MI)) {
1391
+ if (RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1464
1392
// Conservatively, mark the VL and VTYPE as live.
1465
1393
assert (MI.getOperand (3 ).getReg () == RISCV::VL &&
1466
1394
MI.getOperand (4 ).getReg () == RISCV::VTYPE &&
@@ -1660,12 +1588,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
1660
1588
// If the VL values aren't equal, return false if either a) the former is
1661
1589
// demanded, or b) we can't rewrite the former to be the later for
1662
1590
// implementation reasons.
1663
- if (!isVLPreservingConfig (MI)) {
1591
+ if (!RISCVInstrInfo:: isVLPreservingConfig (MI)) {
1664
1592
if (Used.VLAny )
1665
1593
return false ;
1666
1594
1667
1595
if (Used.VLZeroness ) {
1668
- if (isVLPreservingConfig (PrevMI))
1596
+ if (RISCVInstrInfo:: isVLPreservingConfig (PrevMI))
1669
1597
return false ;
1670
1598
if (!getInfoForVSETVLI (PrevMI).hasEquallyZeroAVL (getInfoForVSETVLI (MI),
1671
1599
LIS))
@@ -1716,7 +1644,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1716
1644
1717
1645
for (MachineInstr &MI : make_early_inc_range (reverse (MBB))) {
1718
1646
1719
- if (!isVectorConfigInstr (MI)) {
1647
+ if (!RISCVInstrInfo:: isVectorConfigInstr (MI)) {
1720
1648
Used.doUnion (getDemanded (MI, ST));
1721
1649
if (MI.isCall () || MI.isInlineAsm () ||
1722
1650
MI.modifiesRegister (RISCV::VL, /* TRI=*/ nullptr ) ||
@@ -1740,7 +1668,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
1740
1668
}
1741
1669
1742
1670
if (canMutatePriorConfig (MI, *NextMI, Used)) {
1743
- if (!isVLPreservingConfig (*NextMI)) {
1671
+ if (!RISCVInstrInfo:: isVLPreservingConfig (*NextMI)) {
1744
1672
Register DefReg = NextMI->getOperand (0 ).getReg ();
1745
1673
1746
1674
MI.getOperand (0 ).setReg (DefReg);
0 commit comments