Skip to content

[AArch64] Remove all instances of the 'hasSVEorSME' interfaces. #96543

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4359,7 +4359,9 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
// N1 = SRL_PRED true, V, splat(imm) --> rotr amount
// N0 = SHL_PRED true, V, splat(bits-imm)
// V = (xor x, y)
if (VT.isScalableVector() && Subtarget->hasSVE2orSME()) {
if (VT.isScalableVector() &&
(Subtarget->hasSVE2() ||
(Subtarget->hasSME() && Subtarget->isStreaming()))) {
Comment on lines +4363 to +4364
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given the size of SVE2 perhaps this will become common enough to warrant implementing isSVE2orStreamingSVEAvailable()?

Copy link
Collaborator Author

@sdesmalen-arm sdesmalen-arm Jun 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I'll make that change in an NFC follow-up patch.

if (N0.getOpcode() != AArch64ISD::SHL_PRED ||
N1.getOpcode() != AArch64ISD::SRL_PRED)
std::swap(N0, N1);
Expand Down
21 changes: 11 additions & 10 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1484,7 +1484,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
if (!Subtarget->isLittleEndian())
setOperationAction(ISD::BITCAST, VT, Expand);

if (Subtarget->hasSVE2orSME())
if (Subtarget->hasSVE2() ||
(Subtarget->hasSME() && Subtarget->isStreaming()))
// For SLI/SRI.
setOperationAction(ISD::OR, VT, Custom);
}
Expand Down Expand Up @@ -1937,7 +1938,7 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
}

bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const {
if (!Subtarget->hasSVEorSME())
if (!Subtarget->isSVEorStreamingSVEAvailable())
return true;

// We can only use the BRKB + CNTP sequence with legal predicate types. We can
Expand Down Expand Up @@ -14527,7 +14528,9 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
Op.getOperand(0), Op.getOperand(1));
case ISD::SRA:
case ISD::SRL:
if (VT.isScalableVector() && Subtarget->hasSVE2orSME()) {
if (VT.isScalableVector() &&
(Subtarget->hasSVE2() ||
(Subtarget->hasSME() && Subtarget->isStreaming()))) {
SDValue RShOperand;
unsigned ShiftValue;
if (canLowerSRLToRoundingShiftForVT(Op, VT, DAG, ShiftValue, RShOperand))
Expand Down Expand Up @@ -16234,15 +16237,13 @@ bool AArch64TargetLowering::isLegalInterleavedAccessType(

UseScalable = false;

if (!VecTy->isScalableTy() && !Subtarget->isNeonAvailable() &&
!Subtarget->useSVEForFixedLengthVectors())
return false;

if (VecTy->isScalableTy() && !Subtarget->hasSVEorSME())
if (isa<FixedVectorType>(VecTy) && !Subtarget->isNeonAvailable() &&
Copy link
Collaborator

@paulwalker-arm paulwalker-arm Jun 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since you're refactoring this block do you mind changing the !isScalableTy to be isa<FixedVectorType>(VecTy)?

(!Subtarget->useSVEForFixedLengthVectors() ||
!getSVEPredPatternFromNumElements(MinElts)))
return false;

// Ensure that the predicate for this number of elements is available.
if (Subtarget->hasSVE() && !getSVEPredPatternFromNumElements(MinElts))
if (isa<ScalableVectorType>(VecTy) &&
!Subtarget->isSVEorStreamingSVEAvailable())
return false;

// Ensure the number of vector elements is greater than 1.
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4675,7 +4675,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,

if (AArch64::FPR128RegClass.contains(DestReg) &&
AArch64::FPR128RegClass.contains(SrcReg)) {
if (Subtarget.hasSVEorSME() && !Subtarget.isNeonAvailable())
if (Subtarget.isSVEorStreamingSVEAvailable() &&
!Subtarget.isNeonAvailable())
BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
.addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
.addReg(AArch64::Z0 + (SrcReg - AArch64::Q0))
Expand Down
7 changes: 2 additions & 5 deletions llvm/lib/Target/AArch64/AArch64Subtarget.h
Original file line number Diff line number Diff line change
Expand Up @@ -361,20 +361,17 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {

void mirFileLoaded(MachineFunction &MF) const override;

bool hasSVEorSME() const { return hasSVE() || hasSME(); }
bool hasSVE2orSME() const { return hasSVE2() || hasSME(); }

// Return the known range for the bit length of SVE data registers. A value
// of 0 means nothing is known about that particular limit beyong what's
// implied by the architecture.
unsigned getMaxSVEVectorSizeInBits() const {
assert(hasSVEorSME() &&
assert(isSVEorStreamingSVEAvailable() &&
"Tried to get SVE vector length without SVE support!");
return MaxSVEVectorSizeInBits;
}

unsigned getMinSVEVectorSizeInBits() const {
assert(hasSVEorSME() &&
assert(isSVEorStreamingSVEAvailable() &&
"Tried to get SVE vector length without SVE support!");
return MinSVEVectorSizeInBits;
}
Expand Down
7 changes: 4 additions & 3 deletions llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2691,7 +2691,8 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
return AdjustCost(Entry->Cost);

if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() &&
CCH == TTI::CastContextHint::Masked &&
ST->isSVEorStreamingSVEAvailable() &&
TLI->getTypeAction(Src->getContext(), SrcTy) ==
TargetLowering::TypePromoteInteger &&
TLI->getTypeAction(Dst->getContext(), DstTy) ==
Expand All @@ -2712,8 +2713,8 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
// The BasicTTIImpl version only deals with CCH==TTI::CastContextHint::Normal,
// but we also want to include the TTI::CastContextHint::Masked case too.
if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() &&
TLI->isTypeLegal(DstTy))
CCH == TTI::CastContextHint::Masked &&
ST->isSVEorStreamingSVEAvailable() && TLI->isTypeLegal(DstTy))
CCH = TTI::CastContextHint::Normal;

return AdjustCost(
Expand Down
Loading