Skip to content

[AArch64] Fix a multitude of AArch64 typos (NFC) #143370

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ class AArch64AdvSIMDScalar : public MachineFunctionPass {
bool isProfitableToTransform(const MachineInstr &MI) const;

// transformInstruction - Perform the transformation of an instruction
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void transformInstruction(MachineInstr &MI);

// processMachineBasicBlock - Main optimzation loop.
// processMachineBasicBlock - Main optimization loop.
bool processMachineBasicBlock(MachineBasicBlock *MBB);

public:
Expand Down Expand Up @@ -231,7 +231,7 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(

// If any of the uses of the original instructions is a cross class copy,
// that's a copy that will be removable if we transform. Likewise, if
// any of the uses is a transformable instruction, it's likely the tranforms
// any of the uses is a transformable instruction, it's likely the transforms
// will chain, enabling us to save a copy there, too. This is an aggressive
// heuristic that approximates the graph based cost analysis described above.
Register Dst = MI.getOperand(0).getReg();
Expand Down Expand Up @@ -280,7 +280,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
}

// transformInstruction - Perform the transformation of an instruction
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
// to be the correct register class, minimizing cross-class copies.
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
Expand Down Expand Up @@ -372,7 +372,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
++NumScalarInsnsUsed;
}

// processMachineBasicBlock - Main optimzation loop.
// processMachineBasicBlock - Main optimization loop.
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;

if (PAuthABIPlatform || PAuthABIVersion) {
TS->emitAtributesSubsection(
TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_PAUTHABI),
AArch64BuildAttributes::SubsectionOptional::REQUIRED,
Expand All @@ -490,7 +490,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
(Flags & AArch64BuildAttributes::Feature_GCS_Flag) ? 1 : 0;

if (BTIValue || PACValue || GCSValue) {
TS->emitAtributesSubsection(
TS->emitAttributesSubsection(
AArch64BuildAttributes::getVendorName(
AArch64BuildAttributes::AEABI_FEATURE_AND_BITS),
AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
Expand Down Expand Up @@ -3531,7 +3531,7 @@ const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
char AArch64AsmPrinter::ID = 0;

INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
"AArch64 Assmebly Printer", false, false)
"AArch64 Assembly Printer", false, false)

// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AArch64/AArch64CollectLOH.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ static bool isCandidateLoad(const MachineInstr &MI) {
}
}

/// Check whether the given instruction can load a litteral.
/// Check whether the given instruction can load a literal.
static bool supportLoadFromLiteral(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
Expand All @@ -247,7 +247,7 @@ static bool supportLoadFromLiteral(const MachineInstr &MI) {
}
}

/// Number of GPR registers traked by mapRegToGPRIndex()
/// Number of GPR registers tracked by mapRegToGPRIndex()
static const unsigned N_GPR_REGS = 31;
/// Map register number to index from 0-30.
static int mapRegToGPRIndex(MCRegister Reg) {
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -573,15 +573,15 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
// Update the CFG first.
updateTailPHIs();

// Save successor probabilties before removing CmpBB and Tail from their
// Save successor probabilities before removing CmpBB and Tail from their
// parents.
BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB);
BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail);

Head->removeSuccessor(CmpBB);
CmpBB->removeSuccessor(Tail);

// If Head and CmpBB had successor probabilties, udpate the probabilities to
// If Head and CmpBB had successor probabilities, update the probabilities to
// reflect the ccmp-conversion.
if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) {

Expand All @@ -596,7 +596,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
Head2Tail + Head2CmpBB * CmpBB2Tail);

// We will transfer successors of CmpBB to Head in a moment without
// normalizing the successor probabilities. Set the successor probabilites
// normalizing the successor probabilities. Set the successor probabilities
// before doing so.
//
// Pr(I|Head) = Pr(CmpBB|Head) * Pr(I|CmpBB).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ static bool usesFrameIndex(const MachineInstr &MI) {
return false;
}

// Instructions that lose their 'read' operation for a subesquent fence acquire
// Instructions that lose their 'read' operation for a subsequent fence acquire
// (DMB LD) once the zero register is used.
//
// WARNING: The aquire variants of the instructions are also affected, but they
// WARNING: The acquire variants of the instructions are also affected, but they
// are split out into `atomicBarrierDroppedOnZero()` to support annotations on
// assembly.
static bool atomicReadDroppedOnZero(unsigned Opcode) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64FastISel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ Register AArch64FastISel::materializeGV(const GlobalValue *GV) {
// also uses BuildMI for making an ADRP (+ MOVK) + ADD, but the operands
// are not exactly 1:1 with FastISel so we cannot easily abstract this
// out. At some point, it would be nice to find a way to not have this
// duplciate code.
// duplicate code.
Register DstReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::MOVKXi),
DstReg)
Expand Down
18 changes: 9 additions & 9 deletions llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ static const unsigned DefaultSafeSPDisplacement = 255;
/// size limit beyond which some of these instructions will require a scratch
/// register during their expansion later.
static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
// FIXME: For now, just conservatively guestimate based on unscaled indexing
// FIXME: For now, just conservatively guesstimate based on unscaled indexing
// range. We'll end up allocating an unnecessary spill slot a lot, but
// realistically that's not a big deal at this stage of the game.
for (MachineBasicBlock &MBB : MF) {
Expand Down Expand Up @@ -647,7 +647,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
continue;

// Not all unwinders may know about SVE registers, so assume the lowest
// common demoninator.
// common denominator.
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
MCRegister Reg = Info.getReg();
if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
Expand Down Expand Up @@ -801,7 +801,7 @@ void AArch64FrameLowering::allocateStackSpace(
.addImm(InitialOffset.getFixed())
.addImm(InitialOffset.getScalable());
// The fixed allocation may leave unprobed bytes at the top of the
// stack. If we have subsequent alocation (e.g. if we have variable-sized
// stack. If we have subsequent allocation (e.g. if we have variable-sized
// objects), we need to issue an extra probe, so these allocations start in
// a known state.
if (FollowupAllocs) {
Expand Down Expand Up @@ -2054,7 +2054,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
HasWinCFI = true;
// alloc_l can hold at most 256MB, so assume that NumBytes doesn't
// exceed this amount. We need to move at most 2^24 - 1 into x15.
// This is at most two instructions, MOVZ follwed by MOVK.
// This is at most two instructions, MOVZ followed by MOVK.
// TODO: Fix to use multiple stack alloc unwind codes for stacks
// exceeding 256MB in size.
if (NumBytes >= (1 << 28))
Expand Down Expand Up @@ -2400,7 +2400,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineInstr::FrameDestroy, PrologueSaveSize);
} else {
// If not, make sure to emit an add after the last ldp.
// We're doing this by transfering the size to be restored from the
// We're doing this by transferring the size to be restored from the
// adjustment *before* the CSR pops to the adjustment *after* the CSR
// pops.
AfterCSRPopSize += PrologueSaveSize;
Expand Down Expand Up @@ -2949,7 +2949,7 @@ static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
const TargetRegisterInfo *TRI) {
// If we are generating register pairs for a Windows function that requires
// EH support, then pair consecutive registers only. There are no unwind
// opcodes for saves/restores of non-consectuve register pairs.
// opcodes for saves/restores of non-consecutive register pairs.
// The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x,
// save_lrpair.
// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
Expand Down Expand Up @@ -3187,7 +3187,7 @@ static void computeCalleeSaveRegisterPairs(
RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair
RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();

// Realign the scalable offset if necesary. This is relevant when
// Realign the scalable offset if necessary. This is relevant when
// spilling predicates on Windows.
if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
ScalableByteOffset = alignTo(ScalableByteOffset, Scale);
Expand Down Expand Up @@ -5022,7 +5022,7 @@ MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II,
}

// Find contiguous runs of tagged memory and emit shorter instruction
// sequencies for them when possible.
// sequences for them when possible.
TagStoreEdit TSE(MBB, FirstZeroData);
std::optional<int64_t> EndOffset;
for (auto &Instr : Instrs) {
Expand Down Expand Up @@ -5591,7 +5591,7 @@ void AArch64FrameLowering::emitRemarks(
unsigned RegTy = StackAccess::AccessType::GPR;
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
// spill/fill the predicate as a data vector (so are an FPR acess).
// spill/fill the predicate as a data vector (so are an FPR access).
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) {
Expand Down
12 changes: 6 additions & 6 deletions llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,7 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
}

/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
/// operand is refered by the instructions have SP operand
/// operand is referred by the instructions have SP operand
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
SDValue &Shift) {
unsigned ShiftVal = 0;
Expand Down Expand Up @@ -2841,7 +2841,7 @@ static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
// After #1, x useful bits are 0x7, then the useful bits of x, live through
// y.
// After #2, the useful bits of x are 0x4.
// However, if x is used on an unpredicatable instruction, then all its bits
// However, if x is used on an unpredictable instruction, then all its bits
// are useful.
// E.g.
// 1. y = x & 0x7
Expand Down Expand Up @@ -3611,7 +3611,7 @@ static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
DstLSB = 0;
Width = ImmS - ImmR + 1;
// FIXME: This constraint is to catch bitfield insertion we may
// want to widen the pattern if we want to grab general bitfied
// want to widen the pattern if we want to grab general bitfield
// move case
if (Width <= 0)
continue;
Expand Down Expand Up @@ -3999,7 +3999,7 @@ static int getIntOperandFromRegisterString(StringRef RegString) {

// Lower the read_register intrinsic to an MRS instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MRS SysReg mapper.
bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
Expand Down Expand Up @@ -4060,7 +4060,7 @@ bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {

// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// form described in getIntOperandsFromRegisterString) or is a named register
// known by the MSR SysReg mapper.
bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
Expand Down Expand Up @@ -7278,7 +7278,7 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
}

/// Return the EVT of the data associated to a memory operation in \p
/// Root. If such EVT cannot be retrived, it returns an invalid EVT.
/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Root))
return MemIntr->getMemoryVT();
Expand Down
30 changes: 15 additions & 15 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5367,7 +5367,7 @@ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
return AArch64ISD::UMULL;
} else if (VT == MVT::v2i64 && DAG.MaskedValueIsZero(N0, Mask) &&
DAG.MaskedValueIsZero(N1, Mask)) {
// For v2i64 we look more aggresively at both operands being zero, to avoid
// For v2i64 we look more aggressively at both operands being zero, to avoid
// scalarization.
return AArch64ISD::UMULL;
}
Expand Down Expand Up @@ -5844,7 +5844,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
} else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
} else {
report_fatal_error("Unexpected type for AArch64 NEON intrinic");
report_fatal_error("Unexpected type for AArch64 NEON intrinsic");
}
}
case Intrinsic::aarch64_neon_pmull64: {
Expand Down Expand Up @@ -8630,9 +8630,9 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
if (SizeInBits < 8)
return false;

APInt RequredZero(SizeInBits, 0xFE);
APInt RequiredZero(SizeInBits, 0xFE);
KnownBits Bits = DAG.computeKnownBits(Arg, 4);
bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
bool ZExtBool = (Bits.Zero & RequiredZero) == RequiredZero;
return ZExtBool;
}

Expand Down Expand Up @@ -13536,7 +13536,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
} else {
assert(VT.getScalarSizeInBits() == 32 &&
"Expected 16 or 32 bit shuffle elemements");
"Expected 16 or 32 bit shuffle elements");
Input = DAG.getBitcast(MVT::v2f64, Input);
OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
}
Expand Down Expand Up @@ -13941,7 +13941,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
V1 = DAG.getBitcast(NewVecTy, V1);
// Constuct the DUP instruction
// Construct the DUP instruction
V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
// Cast back to the original type
return DAG.getBitcast(VT, V1);
Expand Down Expand Up @@ -16900,12 +16900,12 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
}

bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
Align &RequiredAligment) const {
Align &RequiredAlignment) const {
if (!LoadedType.isSimple() ||
(!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
return false;
// Cyclone supports unaligned accesses.
RequiredAligment = Align(1);
RequiredAlignment = Align(1);
unsigned NumBits = LoadedType.getSizeInBits();
return NumBits == 32 || NumBits == 64;
}
Expand Down Expand Up @@ -18028,7 +18028,7 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
return SDValue();

// Pattern is dectected. Let's convert it to sequence of nodes.
// Pattern is detected. Let's convert it to sequence of nodes.
SDLoc DL(N);

// First, create the node pattern of UABD/SABD.
Expand Down Expand Up @@ -18246,10 +18246,10 @@ static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(I * 16, DL, MVT::i64));
SDValue Dot =
DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, Vec8Op0, Vec8Op1);
SDValue VecReudceAdd8 =
SDValue VecReduceAdd8 =
DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16,
VecReudceAdd8);
VecReduceAdd8);
}

// Given an (integer) vecreduce, we know the order of the inputs does not
Expand Down Expand Up @@ -21474,7 +21474,7 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
case Intrinsic::aarch64_neon_ushl:
// For positive shift amounts we can use SHL, as ushl/sshl perform a regular
// left shift for positive shift amounts. For negative shifts we can use a
// VASHR/VLSHR as appropiate.
// VASHR/VLSHR as appropriate.
if (ShiftAmount < 0) {
Opcode = IID == Intrinsic::aarch64_neon_sshl ? AArch64ISD::VASHR
: AArch64ISD::VLSHR;
Expand Down Expand Up @@ -22880,7 +22880,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
}

static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexpected Opcode!");

// splice(pg, op1, undef) -> op1
if (N->getOperand(2).isUndef())
Expand Down Expand Up @@ -23616,10 +23616,10 @@ static SDValue performLOADCombine(SDNode *N,
LD->getMemOperand()->getFlags(), LD->getAAInfo());
SDValue UndefVector = DAG.getUNDEF(NewVT);
SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL);
SDValue ExtendedReminingLoad =
SDValue ExtendedRemainingLoad =
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT,
{UndefVector, RemainingLoad, InsertIdx});
LoadOps.push_back(ExtendedReminingLoad);
LoadOps.push_back(ExtendedRemainingLoad);
LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1));
EVT ConcatVT =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64ISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ class AArch64TargetLowering : public TargetLowering {
bool optimizeExtendOrTruncateConversion(
Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;

bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;

unsigned getMaxSupportedInterleaveFactor() const override { return 4; }

Expand Down
Loading
Loading