@@ -250,14 +250,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
250
250
if (RV64LegalI32 && Subtarget.is64Bit())
251
251
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
252
252
253
- if (!Subtarget.hasVendorXCValu())
254
- setCondCodeAction(ISD::SETLE, XLenVT, Expand);
255
253
setCondCodeAction(ISD::SETGT, XLenVT, Custom);
256
254
setCondCodeAction(ISD::SETGE, XLenVT, Expand);
257
- if (!Subtarget.hasVendorXCValu())
258
- setCondCodeAction(ISD::SETULE, XLenVT, Expand);
259
255
setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
260
256
setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
257
+ if (!(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
258
+ setCondCodeAction(ISD::SETULE, XLenVT, Expand);
259
+ setCondCodeAction(ISD::SETLE, XLenVT, Expand);
260
+ }
261
261
262
262
if (RV64LegalI32 && Subtarget.is64Bit())
263
263
setOperationAction(ISD::SETCC, MVT::i32, Promote);
@@ -343,7 +343,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
343
343
if (Subtarget.is64Bit())
344
344
setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
345
345
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Custom);
346
- } else if (Subtarget.hasVendorXCVbitmanip()) {
346
+ } else if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit() ) {
347
347
setOperationAction(ISD::ROTL, XLenVT, Expand);
348
348
} else {
349
349
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
@@ -366,7 +366,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
366
366
: Expand);
367
367
368
368
369
- if (Subtarget.hasVendorXCVbitmanip()) {
369
+ if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit() ) {
370
370
setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
371
371
} else {
372
372
// Zbkb can use rev8+brev8 to implement bitreverse.
@@ -387,14 +387,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
387
387
else
388
388
setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
389
389
}
390
- } else if (!Subtarget.hasVendorXCVbitmanip()) {
390
+ } else if (!( Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit() )) {
391
391
setOperationAction({ISD::CTTZ, ISD::CTPOP}, XLenVT, Expand);
392
392
if (RV64LegalI32 && Subtarget.is64Bit())
393
393
setOperationAction({ISD::CTTZ, ISD::CTPOP}, MVT::i32, Expand);
394
394
}
395
395
396
396
if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
397
- Subtarget.hasVendorXCVbitmanip()) {
397
+ ( Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit() )) {
398
398
// We need the custom lowering to make sure that the resulting sequence
399
399
// for the 32bit case is efficient on 64bit targets.
400
400
if (Subtarget.is64Bit()) {
@@ -1439,7 +1439,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
1439
1439
}
1440
1440
}
1441
1441
1442
- if (Subtarget.hasVendorXCVmem()) {
1442
+ if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit() ) {
1443
1443
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1444
1444
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1445
1445
setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
@@ -1449,7 +1449,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
1449
1449
setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1450
1450
}
1451
1451
1452
- if (Subtarget.hasVendorXCValu()) {
1452
+ if (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit() ) {
1453
1453
setOperationAction(ISD::ABS, XLenVT, Legal);
1454
1454
setOperationAction(ISD::SMIN, XLenVT, Legal);
1455
1455
setOperationAction(ISD::UMIN, XLenVT, Legal);
@@ -1928,12 +1928,13 @@ bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1928
1928
}
1929
1929
1930
1930
bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
1931
- return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXCVbitmanip();
1931
+ return Subtarget.hasStdExtZbb() ||
1932
+ (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit());
1932
1933
}
1933
1934
1934
1935
bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
1935
1936
return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
1936
- Subtarget.hasVendorXCVbitmanip();
1937
+ ( Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit() );
1937
1938
}
1938
1939
1939
1940
bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
@@ -21088,7 +21089,7 @@ bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
21088
21089
SDValue &Offset,
21089
21090
ISD::MemIndexedMode &AM,
21090
21091
SelectionDAG &DAG) const {
21091
- if (Subtarget.hasVendorXCVmem()) {
21092
+ if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit() ) {
21092
21093
if (Op->getOpcode() != ISD::ADD)
21093
21094
return false;
21094
21095
0 commit comments