Skip to content

Commit 62cae4f

Browse files
authored
[AArch64] Fix a multitude of AArch64 typos (NFC) (#143370)
Fix a multitude of typos in the AArch64 codebase using the https://github.com/crate-ci/typos Rust package.
1 parent 4a6d31f commit 62cae4f

40 files changed

+125
-125
lines changed

llvm/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,11 @@ class AArch64AdvSIMDScalar : public MachineFunctionPass {
7373
bool isProfitableToTransform(const MachineInstr &MI) const;
7474

7575
// transformInstruction - Perform the transformation of an instruction
76-
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
76+
// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
7777
// to be the correct register class, minimizing cross-class copies.
7878
void transformInstruction(MachineInstr &MI);
7979

80-
// processMachineBasicBlock - Main optimzation loop.
80+
// processMachineBasicBlock - Main optimization loop.
8181
bool processMachineBasicBlock(MachineBasicBlock *MBB);
8282

8383
public:
@@ -231,7 +231,7 @@ bool AArch64AdvSIMDScalar::isProfitableToTransform(
231231

232232
// If any of the uses of the original instructions is a cross class copy,
233233
// that's a copy that will be removable if we transform. Likewise, if
234-
// any of the uses is a transformable instruction, it's likely the tranforms
234+
// any of the uses is a transformable instruction, it's likely the transforms
235235
// will chain, enabling us to save a copy there, too. This is an aggressive
236236
// heuristic that approximates the graph based cost analysis described above.
237237
Register Dst = MI.getOperand(0).getReg();
@@ -280,7 +280,7 @@ static MachineInstr *insertCopy(const TargetInstrInfo *TII, MachineInstr &MI,
280280
}
281281

282282
// transformInstruction - Perform the transformation of an instruction
283-
// to its equivalant AdvSIMD scalar instruction. Update inputs and outputs
283+
// to its equivalent AdvSIMD scalar instruction. Update inputs and outputs
284284
// to be the correct register class, minimizing cross-class copies.
285285
void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
286286
LLVM_DEBUG(dbgs() << "Scalar transform: " << MI);
@@ -372,7 +372,7 @@ void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
372372
++NumScalarInsnsUsed;
373373
}
374374

375-
// processMachineBasicBlock - Main optimzation loop.
375+
// processMachineBasicBlock - Main optimization loop.
376376
bool AArch64AdvSIMDScalar::processMachineBasicBlock(MachineBasicBlock *MBB) {
377377
bool Changed = false;
378378
for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {

llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
467467
PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
468468

469469
if (PAuthABIPlatform || PAuthABIVersion) {
470-
TS->emitAtributesSubsection(
470+
TS->emitAttributesSubsection(
471471
AArch64BuildAttributes::getVendorName(
472472
AArch64BuildAttributes::AEABI_PAUTHABI),
473473
AArch64BuildAttributes::SubsectionOptional::REQUIRED,
@@ -490,7 +490,7 @@ void AArch64AsmPrinter::emitAttributes(unsigned Flags,
490490
(Flags & AArch64BuildAttributes::Feature_GCS_Flag) ? 1 : 0;
491491

492492
if (BTIValue || PACValue || GCSValue) {
493-
TS->emitAtributesSubsection(
493+
TS->emitAttributesSubsection(
494494
AArch64BuildAttributes::getVendorName(
495495
AArch64BuildAttributes::AEABI_FEATURE_AND_BITS),
496496
AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
@@ -3531,7 +3531,7 @@ const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
35313531
char AArch64AsmPrinter::ID = 0;
35323532

35333533
INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3534-
"AArch64 Assmebly Printer", false, false)
3534+
"AArch64 Assembly Printer", false, false)
35353535

35363536
// Force static initialization.
35373537
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {

llvm/lib/Target/AArch64/AArch64CollectLOH.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ static bool isCandidateLoad(const MachineInstr &MI) {
232232
}
233233
}
234234

235-
/// Check whether the given instruction can load a litteral.
235+
/// Check whether the given instruction can load a literal.
236236
static bool supportLoadFromLiteral(const MachineInstr &MI) {
237237
switch (MI.getOpcode()) {
238238
default:
@@ -247,7 +247,7 @@ static bool supportLoadFromLiteral(const MachineInstr &MI) {
247247
}
248248
}
249249

250-
/// Number of GPR registers traked by mapRegToGPRIndex()
250+
/// Number of GPR registers tracked by mapRegToGPRIndex()
251251
static const unsigned N_GPR_REGS = 31;
252252
/// Map register number to index from 0-30.
253253
static int mapRegToGPRIndex(MCRegister Reg) {

llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -573,15 +573,15 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
573573
// Update the CFG first.
574574
updateTailPHIs();
575575

576-
// Save successor probabilties before removing CmpBB and Tail from their
576+
// Save successor probabilities before removing CmpBB and Tail from their
577577
// parents.
578578
BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB);
579579
BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail);
580580

581581
Head->removeSuccessor(CmpBB);
582582
CmpBB->removeSuccessor(Tail);
583583

584-
// If Head and CmpBB had successor probabilties, udpate the probabilities to
584+
// If Head and CmpBB had successor probabilities, update the probabilities to
585585
// reflect the ccmp-conversion.
586586
if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) {
587587

@@ -596,7 +596,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
596596
Head2Tail + Head2CmpBB * CmpBB2Tail);
597597

598598
// We will transfer successors of CmpBB to Head in a moment without
599-
// normalizing the successor probabilities. Set the successor probabilites
599+
// normalizing the successor probabilities. Set the successor probabilities
600600
// before doing so.
601601
//
602602
// Pr(I|Head) = Pr(CmpBB|Head) * Pr(I|CmpBB).

llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@ static bool usesFrameIndex(const MachineInstr &MI) {
6464
return false;
6565
}
6666

67-
// Instructions that lose their 'read' operation for a subesquent fence acquire
67+
// Instructions that lose their 'read' operation for a subsequent fence acquire
6868
// (DMB LD) once the zero register is used.
6969
//
70-
// WARNING: The aquire variants of the instructions are also affected, but they
70+
// WARNING: The acquire variants of the instructions are also affected, but they
7171
// are split out into `atomicBarrierDroppedOnZero()` to support annotations on
7272
// assembly.
7373
static bool atomicReadDroppedOnZero(unsigned Opcode) {

llvm/lib/Target/AArch64/AArch64FastISel.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -508,7 +508,7 @@ Register AArch64FastISel::materializeGV(const GlobalValue *GV) {
508508
// also uses BuildMI for making an ADRP (+ MOVK) + ADD, but the operands
509509
// are not exactly 1:1 with FastISel so we cannot easily abstract this
510510
// out. At some point, it would be nice to find a way to not have this
511-
// duplciate code.
511+
// duplicate code.
512512
Register DstReg = createResultReg(&AArch64::GPR64commonRegClass);
513513
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AArch64::MOVKXi),
514514
DstReg)

llvm/lib/Target/AArch64/AArch64FrameLowering.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ static const unsigned DefaultSafeSPDisplacement = 255;
399399
/// size limit beyond which some of these instructions will require a scratch
400400
/// register during their expansion later.
401401
static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
402-
// FIXME: For now, just conservatively guestimate based on unscaled indexing
402+
// FIXME: For now, just conservatively guesstimate based on unscaled indexing
403403
// range. We'll end up allocating an unnecessary spill slot a lot, but
404404
// realistically that's not a big deal at this stage of the game.
405405
for (MachineBasicBlock &MBB : MF) {
@@ -647,7 +647,7 @@ void AArch64FrameLowering::emitCalleeSavedSVELocations(
647647
continue;
648648

649649
// Not all unwinders may know about SVE registers, so assume the lowest
650-
// common demoninator.
650+
// common denominator.
651651
assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
652652
MCRegister Reg = Info.getReg();
653653
if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
@@ -801,7 +801,7 @@ void AArch64FrameLowering::allocateStackSpace(
801801
.addImm(InitialOffset.getFixed())
802802
.addImm(InitialOffset.getScalable());
803803
// The fixed allocation may leave unprobed bytes at the top of the
804-
// stack. If we have subsequent alocation (e.g. if we have variable-sized
804+
// stack. If we have subsequent allocation (e.g. if we have variable-sized
805805
// objects), we need to issue an extra probe, so these allocations start in
806806
// a known state.
807807
if (FollowupAllocs) {
@@ -2054,7 +2054,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
20542054
HasWinCFI = true;
20552055
// alloc_l can hold at most 256MB, so assume that NumBytes doesn't
20562056
// exceed this amount. We need to move at most 2^24 - 1 into x15.
2057-
// This is at most two instructions, MOVZ follwed by MOVK.
2057+
// This is at most two instructions, MOVZ followed by MOVK.
20582058
// TODO: Fix to use multiple stack alloc unwind codes for stacks
20592059
// exceeding 256MB in size.
20602060
if (NumBytes >= (1 << 28))
@@ -2400,7 +2400,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
24002400
MachineInstr::FrameDestroy, PrologueSaveSize);
24012401
} else {
24022402
// If not, make sure to emit an add after the last ldp.
2403-
// We're doing this by transfering the size to be restored from the
2403+
// We're doing this by transferring the size to be restored from the
24042404
// adjustment *before* the CSR pops to the adjustment *after* the CSR
24052405
// pops.
24062406
AfterCSRPopSize += PrologueSaveSize;
@@ -2949,7 +2949,7 @@ static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
29492949
const TargetRegisterInfo *TRI) {
29502950
// If we are generating register pairs for a Windows function that requires
29512951
// EH support, then pair consecutive registers only. There are no unwind
2952-
// opcodes for saves/restores of non-consectuve register pairs.
2952+
// opcodes for saves/restores of non-consecutive register pairs.
29532953
// The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x,
29542954
// save_lrpair.
29552955
// https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
@@ -3187,7 +3187,7 @@ static void computeCalleeSaveRegisterPairs(
31873187
RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair
31883188
RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
31893189

3190-
// Realign the scalable offset if necesary. This is relevant when
3190+
// Realign the scalable offset if necessary. This is relevant when
31913191
// spilling predicates on Windows.
31923192
if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
31933193
ScalableByteOffset = alignTo(ScalableByteOffset, Scale);
@@ -5022,7 +5022,7 @@ MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II,
50225022
}
50235023

50245024
// Find contiguous runs of tagged memory and emit shorter instruction
5025-
// sequencies for them when possible.
5025+
// sequences for them when possible.
50265026
TagStoreEdit TSE(MBB, FirstZeroData);
50275027
std::optional<int64_t> EndOffset;
50285028
for (auto &Instr : Instrs) {
@@ -5591,7 +5591,7 @@ void AArch64FrameLowering::emitRemarks(
55915591
unsigned RegTy = StackAccess::AccessType::GPR;
55925592
if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
55935593
// SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
5594-
// spill/fill the predicate as a data vector (so are an FPR acess).
5594+
// spill/fill the predicate as a data vector (so are an FPR access).
55955595
if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
55965596
MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO &&
55975597
AArch64::PPRRegClass.contains(MI.getOperand(0).getReg())) {

llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -991,7 +991,7 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
991991
}
992992

993993
/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
994-
/// operand is refered by the instructions have SP operand
994+
/// operand is referred by the instructions have SP operand
995995
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
996996
SDValue &Shift) {
997997
unsigned ShiftVal = 0;
@@ -2841,7 +2841,7 @@ static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
28412841
// After #1, x useful bits are 0x7, then the useful bits of x, live through
28422842
// y.
28432843
// After #2, the useful bits of x are 0x4.
2844-
// However, if x is used on an unpredicatable instruction, then all its bits
2844+
// However, if x is used on an unpredictable instruction, then all its bits
28452845
// are useful.
28462846
// E.g.
28472847
// 1. y = x & 0x7
@@ -3611,7 +3611,7 @@ static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
36113611
DstLSB = 0;
36123612
Width = ImmS - ImmR + 1;
36133613
// FIXME: This constraint is to catch bitfield insertion we may
3614-
// want to widen the pattern if we want to grab general bitfied
3614+
// want to widen the pattern if we want to grab general bitfield
36153615
// move case
36163616
if (Width <= 0)
36173617
continue;
@@ -3999,7 +3999,7 @@ static int getIntOperandFromRegisterString(StringRef RegString) {
39993999

40004000
// Lower the read_register intrinsic to an MRS instruction node if the special
40014001
// register string argument is either of the form detailed in the ALCE (the
4002-
// form described in getIntOperandsFromRegsterString) or is a named register
4002+
// form described in getIntOperandsFromRegisterString) or is a named register
40034003
// known by the MRS SysReg mapper.
40044004
bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
40054005
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -4060,7 +4060,7 @@ bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
40604060

40614061
// Lower the write_register intrinsic to an MSR instruction node if the special
40624062
// register string argument is either of the form detailed in the ALCE (the
4063-
// form described in getIntOperandsFromRegsterString) or is a named register
4063+
// form described in getIntOperandsFromRegisterString) or is a named register
40644064
// known by the MSR SysReg mapper.
40654065
bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
40664066
const auto *MD = cast<MDNodeSDNode>(N->getOperand(1));
@@ -7278,7 +7278,7 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
72787278
}
72797279

72807280
/// Return the EVT of the data associated to a memory operation in \p
7281-
/// Root. If such EVT cannot be retrived, it returns an invalid EVT.
7281+
/// Root. If such EVT cannot be retrieved, it returns an invalid EVT.
72827282
static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
72837283
if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Root))
72847284
return MemIntr->getMemoryVT();

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5367,7 +5367,7 @@ static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
53675367
return AArch64ISD::UMULL;
53685368
} else if (VT == MVT::v2i64 && DAG.MaskedValueIsZero(N0, Mask) &&
53695369
DAG.MaskedValueIsZero(N1, Mask)) {
5370-
// For v2i64 we look more aggresively at both operands being zero, to avoid
5370+
// For v2i64 we look more aggressively at both operands being zero, to avoid
53715371
// scalarization.
53725372
return AArch64ISD::UMULL;
53735373
}
@@ -5844,7 +5844,7 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
58445844
} else if (Ty.isVector() && Ty.isInteger() && isTypeLegal(Ty)) {
58455845
return DAG.getNode(ISD::ABS, dl, Ty, Op.getOperand(1));
58465846
} else {
5847-
report_fatal_error("Unexpected type for AArch64 NEON intrinic");
5847+
report_fatal_error("Unexpected type for AArch64 NEON intrinsic");
58485848
}
58495849
}
58505850
case Intrinsic::aarch64_neon_pmull64: {
@@ -8630,9 +8630,9 @@ static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {
86308630
if (SizeInBits < 8)
86318631
return false;
86328632

8633-
APInt RequredZero(SizeInBits, 0xFE);
8633+
APInt RequiredZero(SizeInBits, 0xFE);
86348634
KnownBits Bits = DAG.computeKnownBits(Arg, 4);
8635-
bool ZExtBool = (Bits.Zero & RequredZero) == RequredZero;
8635+
bool ZExtBool = (Bits.Zero & RequiredZero) == RequiredZero;
86368636
return ZExtBool;
86378637
}
86388638

@@ -13536,7 +13536,7 @@ static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
1353613536
OpLHS = DAG.getBitcast(MVT::v2f32, OpLHS);
1353713537
} else {
1353813538
assert(VT.getScalarSizeInBits() == 32 &&
13539-
"Expected 16 or 32 bit shuffle elemements");
13539+
"Expected 16 or 32 bit shuffle elements");
1354013540
Input = DAG.getBitcast(MVT::v2f64, Input);
1354113541
OpLHS = DAG.getBitcast(MVT::v2f64, OpLHS);
1354213542
}
@@ -13941,7 +13941,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1394113941
unsigned NewEltCount = VT.getSizeInBits() / LaneSize;
1394213942
MVT NewVecTy = MVT::getVectorVT(NewEltTy, NewEltCount);
1394313943
V1 = DAG.getBitcast(NewVecTy, V1);
13944-
// Constuct the DUP instruction
13944+
// Construct the DUP instruction
1394513945
V1 = constructDup(V1, Lane, dl, NewVecTy, Opcode, DAG);
1394613946
// Cast back to the original type
1394713947
return DAG.getBitcast(VT, V1);
@@ -16900,12 +16900,12 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
1690016900
}
1690116901

1690216902
bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
16903-
Align &RequiredAligment) const {
16903+
Align &RequiredAlignment) const {
1690416904
if (!LoadedType.isSimple() ||
1690516905
(!LoadedType.isInteger() && !LoadedType.isFloatingPoint()))
1690616906
return false;
1690716907
// Cyclone supports unaligned accesses.
16908-
RequiredAligment = Align(1);
16908+
RequiredAlignment = Align(1);
1690916909
unsigned NumBits = LoadedType.getSizeInBits();
1691016910
return NumBits == 32 || NumBits == 64;
1691116911
}
@@ -18028,7 +18028,7 @@ static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
1802818028
EXT1->getOperand(0)->getValueType(0) != MVT::v16i8)
1802918029
return SDValue();
1803018030

18031-
// Pattern is dectected. Let's convert it to sequence of nodes.
18031+
// Pattern is detected. Let's convert it to sequence of nodes.
1803218032
SDLoc DL(N);
1803318033

1803418034
// First, create the node pattern of UABD/SABD.
@@ -18246,10 +18246,10 @@ static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
1824618246
DAG.getConstant(I * 16, DL, MVT::i64));
1824718247
SDValue Dot =
1824818248
DAG.getNode(DotOpcode, DL, Zeros.getValueType(), Zeros, Vec8Op0, Vec8Op1);
18249-
SDValue VecReudceAdd8 =
18249+
SDValue VecReduceAdd8 =
1825018250
DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot);
1825118251
return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16,
18252-
VecReudceAdd8);
18252+
VecReduceAdd8);
1825318253
}
1825418254

1825518255
// Given an (integer) vecreduce, we know the order of the inputs does not
@@ -21474,7 +21474,7 @@ static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {
2147421474
case Intrinsic::aarch64_neon_ushl:
2147521475
// For positive shift amounts we can use SHL, as ushl/sshl perform a regular
2147621476
// left shift for positive shift amounts. For negative shifts we can use a
21477-
// VASHR/VLSHR as appropiate.
21477+
// VASHR/VLSHR as appropriate.
2147821478
if (ShiftAmount < 0) {
2147921479
Opcode = IID == Intrinsic::aarch64_neon_sshl ? AArch64ISD::VASHR
2148021480
: AArch64ISD::VLSHR;
@@ -22880,7 +22880,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
2288022880
}
2288122881

2288222882
static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
22883-
assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexepected Opcode!");
22883+
assert(N->getOpcode() == AArch64ISD::SPLICE && "Unexpected Opcode!");
2288422884

2288522885
// splice(pg, op1, undef) -> op1
2288622886
if (N->getOperand(2).isUndef())
@@ -23616,10 +23616,10 @@ static SDValue performLOADCombine(SDNode *N,
2361623616
LD->getMemOperand()->getFlags(), LD->getAAInfo());
2361723617
SDValue UndefVector = DAG.getUNDEF(NewVT);
2361823618
SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL);
23619-
SDValue ExtendedReminingLoad =
23619+
SDValue ExtendedRemainingLoad =
2362023620
DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT,
2362123621
{UndefVector, RemainingLoad, InsertIdx});
23622-
LoadOps.push_back(ExtendedReminingLoad);
23622+
LoadOps.push_back(ExtendedRemainingLoad);
2362323623
LoadOpsChain.push_back(SDValue(cast<SDNode>(RemainingLoad), 1));
2362423624
EVT ConcatVT =
2362523625
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ class AArch64TargetLowering : public TargetLowering {
207207
bool optimizeExtendOrTruncateConversion(
208208
Instruction *I, Loop *L, const TargetTransformInfo &TTI) const override;
209209

210-
bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override;
210+
bool hasPairedLoad(EVT LoadedType, Align &RequiredAlignment) const override;
211211

212212
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
213213

0 commit comments

Comments
 (0)