@@ -141,6 +141,24 @@ static void generateInstSeqImpl(int64_t Val,
141
141
Res.push_back (RISCVMatInt::Inst (RISCV::ADDI, Lo12));
142
142
}
143
143
144
+ static unsigned extractRotateInfo (int64_t Val) {
145
+ // for case: 0b111..1..xxxxxx1..1..
146
+ unsigned LeadingOnes = countLeadingOnes ((uint64_t )Val);
147
+ unsigned TrailingOnes = countTrailingOnes ((uint64_t )Val);
148
+ if (TrailingOnes > 0 && TrailingOnes < 64 &&
149
+ (LeadingOnes + TrailingOnes) > (64 - 12 ))
150
+ return 64 - TrailingOnes;
151
+
152
+ // for case: 0bxxx1..1..1...xxx
153
+ unsigned UpperTrailingOnes = countTrailingOnes (Hi_32 (Val));
154
+ unsigned LowerLeadingOnes = countLeadingOnes (Lo_32 (Val));
155
+ if (UpperTrailingOnes < 32 &&
156
+ (UpperTrailingOnes + LowerLeadingOnes) > (64 - 12 ))
157
+ return 32 - UpperTrailingOnes;
158
+
159
+ return 0 ;
160
+ }
161
+
144
162
namespace llvm {
145
163
namespace RISCVMatInt {
146
164
InstSeq generateInstSeq (int64_t Val, const FeatureBitset &ActiveFeatures) {
@@ -312,6 +330,18 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
312
330
}
313
331
}
314
332
333
+ // Perform optimization with rori in the Zbb extension.
334
+ if (Res.size () > 2 && ActiveFeatures[RISCV::FeatureStdExtZbb]) {
335
+ if (unsigned Rotate = extractRotateInfo (Val)) {
336
+ RISCVMatInt::InstSeq TmpSeq;
337
+ uint64_t NegImm12 =
338
+ ((uint64_t )Val >> (64 - Rotate)) | ((uint64_t )Val << Rotate);
339
+ assert (isInt<12 >(NegImm12));
340
+ TmpSeq.push_back (RISCVMatInt::Inst (RISCV::ADDI, NegImm12));
341
+ TmpSeq.push_back (RISCVMatInt::Inst (RISCV::RORI, Rotate));
342
+ Res = TmpSeq;
343
+ }
344
+ }
315
345
return Res;
316
346
}
317
347
0 commit comments