Skip to content

[RISCV] Qualify all XCV predicates with !is64Bit. #101074

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1540,7 +1540,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (tryIndexedLoad(Node))
return;

if (Subtarget->hasVendorXCVmem()) {
if (Subtarget->hasVendorXCVmem() && !Subtarget->is64Bit()) {
// We match post-incrementing load here
LoadSDNode *Load = cast<LoadSDNode>(Node);
if (Load->getAddressingMode() != ISD::POST_INC)
Expand Down
27 changes: 14 additions & 13 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -250,14 +250,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (RV64LegalI32 && Subtarget.is64Bit())
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);

if (!Subtarget.hasVendorXCValu())
setCondCodeAction(ISD::SETLE, XLenVT, Expand);
setCondCodeAction(ISD::SETGT, XLenVT, Custom);
setCondCodeAction(ISD::SETGE, XLenVT, Expand);
if (!Subtarget.hasVendorXCValu())
setCondCodeAction(ISD::SETULE, XLenVT, Expand);
setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
if (!(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
setCondCodeAction(ISD::SETULE, XLenVT, Expand);
setCondCodeAction(ISD::SETLE, XLenVT, Expand);
}

if (RV64LegalI32 && Subtarget.is64Bit())
setOperationAction(ISD::SETCC, MVT::i32, Promote);
Expand Down Expand Up @@ -343,7 +343,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.is64Bit())
setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Custom);
} else if (Subtarget.hasVendorXCVbitmanip()) {
} else if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit()) {
setOperationAction(ISD::ROTL, XLenVT, Expand);
} else {
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
Expand All @@ -366,7 +366,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
: Expand);


if (Subtarget.hasVendorXCVbitmanip()) {
if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit()) {
setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
} else {
// Zbkb can use rev8+brev8 to implement bitreverse.
Expand All @@ -387,14 +387,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
else
setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
}
} else if (!Subtarget.hasVendorXCVbitmanip()) {
} else if (!(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
setOperationAction({ISD::CTTZ, ISD::CTPOP}, XLenVT, Expand);
if (RV64LegalI32 && Subtarget.is64Bit())
setOperationAction({ISD::CTTZ, ISD::CTPOP}, MVT::i32, Expand);
}

if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
Subtarget.hasVendorXCVbitmanip()) {
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
// We need the custom lowering to make sure that the resulting sequence
// for the 32bit case is efficient on 64bit targets.
if (Subtarget.is64Bit()) {
Expand Down Expand Up @@ -1439,7 +1439,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
}

if (Subtarget.hasVendorXCVmem()) {
if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit()) {
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
Expand All @@ -1449,7 +1449,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
}

if (Subtarget.hasVendorXCValu()) {
if (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()) {
setOperationAction(ISD::ABS, XLenVT, Legal);
setOperationAction(ISD::SMIN, XLenVT, Legal);
setOperationAction(ISD::UMIN, XLenVT, Legal);
Expand Down Expand Up @@ -1928,12 +1928,13 @@ bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
}

bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXCVbitmanip();
return Subtarget.hasStdExtZbb() ||
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit());
}

bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
Subtarget.hasVendorXCVbitmanip();
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit());
}

bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
Expand Down Expand Up @@ -21082,7 +21083,7 @@ bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const {
if (Subtarget.hasVendorXCVmem()) {
if (Subtarget.hasVendorXCVmem() && !Subtarget.is64Bit()) {
if (Op->getOpcode() != ISD::ADD)
return false;

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ bool RISCVTTIImpl::hasActiveVectorLength(unsigned, Type *DataTy, Align) const {
TargetTransformInfo::PopcntSupportKind
RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
return ST->hasStdExtZbb() || ST->hasVendorXCVbitmanip()
return ST->hasStdExtZbb() || (ST->hasVendorXCVbitmanip() && !ST->is64Bit())
? TTI::PSK_FastHardware
: TTI::PSK_Software;
}
Expand Down
Loading