Skip to content

Commit 83783e8

Browse files
authored
[RISCV] Fix typos discovered by codespell (NFC) (#126191)
Found using https://github.com/codespell-project/codespell ``` codespell RISCV --write-changes \ --ignore-words-list=FPR,fpr,VAs,ORE,WorstCase,hart,sie,MIs,FLE,fle,CarryIn,vor,OLT,VILL,vill,bu,pass-thru ```
1 parent e346d47 commit 83783e8

23 files changed

+55
-55
lines changed

llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ class RISCVAsmParser : public MCTargetAsmParser {
130130
void emitToStreamer(MCStreamer &S, const MCInst &Inst);
131131

132132
// Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that
133-
// synthesize the desired immedate value into the destination register.
133+
// synthesize the desired immediate value into the destination register.
134134
void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out);
135135

136136
// Helper to emit a combination of AUIPC and SecondOpcode. Used to implement
@@ -2626,7 +2626,7 @@ ParseStatus RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) {
26262626
std::unique_ptr<RISCVOperand> OptionalImmOp;
26272627

26282628
if (getLexer().isNot(AsmToken::LParen)) {
2629-
// Parse an Integer token. We do not accept arbritrary constant expressions
2629+
// Parse an Integer token. We do not accept arbitrary constant expressions
26302630
// in the offset field (because they may include parens, which complicates
26312631
// parsing a lot).
26322632
int64_t ImmVal;

llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -621,7 +621,7 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
621621
return;
622622
}
623623

624-
// We found an ICmp, do some canonicalizations.
624+
// We found an ICmp, do some canonicalization.
625625

626626
// Adjust comparisons to use comparison with 0 if possible.
627627
if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
@@ -735,7 +735,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
735735
return true;
736736
}
737737
case TargetOpcode::G_FCONSTANT: {
738-
// TODO: Use constant pool for complext constants.
738+
// TODO: Use constant pool for complex constants.
739739
// TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
740740
Register DstReg = MI.getOperand(0).getReg();
741741
const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();

llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static unsigned extractRotateInfo(int64_t Val) {
175175

176176
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
177177
RISCVMatInt::InstSeq &Res) {
178-
assert(Val > 0 && "Expected postive val");
178+
assert(Val > 0 && "Expected positive val");
179179

180180
unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val);
181181
uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;

llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
using namespace llvm;
2323

24-
// This option controls wether or not we emit ELF attributes for ABI features,
24+
// This option controls whether or not we emit ELF attributes for ABI features,
2525
// like RISC-V atomics or X3 usage.
2626
static cl::opt<bool> RiscvAbiAttr(
2727
"riscv-abi-attributes",

llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1089,7 +1089,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
10891089
bool hasVLOutput = RISCV::isFaultFirstLoad(*MI);
10901090
for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
10911091
const MachineOperand &MO = MI->getOperand(OpNo);
1092-
// Skip vl ouput. It should be the second output.
1092+
// Skip vl output. It should be the second output.
10931093
if (hasVLOutput && OpNo == 1)
10941094
continue;
10951095

llvm/lib/Target/RISCV/RISCVFeatures.td

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,7 +1020,7 @@ def HasStdExtSmctrOrSsctr : Predicate<"Subtarget->hasStdExtSmctrOrSsctr()">,
10201020
// Vendor extensions
10211021
//===----------------------------------------------------------------------===//
10221022

1023-
// Ventana Extenions
1023+
// Ventana Extensions
10241024

10251025
def FeatureVendorXVentanaCondOps
10261026
: RISCVExtension<1, 0, "Ventana Conditional Ops">;
@@ -1337,7 +1337,7 @@ def HasVendorXqcilo
13371337
// LLVM specific features and extensions
13381338
//===----------------------------------------------------------------------===//
13391339

1340-
// Feature32Bit exists to mark CPUs that support RV32 to distinquish them from
1340+
// Feature32Bit exists to mark CPUs that support RV32 to distinguish them from
13411341
// tuning CPU names.
13421342
def Feature32Bit
13431343
: SubtargetFeature<"32bit", "IsRV32", "true", "Implements RV32">;

llvm/lib/Target/RISCV/RISCVFrameLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1182,7 +1182,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
11821182

11831183
if (getLibCallID(MF, CSI) != -1) {
11841184
// tail __riscv_restore_[0-12] instruction is considered as a terminator,
1185-
// therefor it is unnecessary to place any CFI instructions after it. Just
1185+
// therefore it is unnecessary to place any CFI instructions after it. Just
11861186
// deallocate stack if needed and return.
11871187
if (StackSize != 0)
11881188
deallocateStack(MF, MBB, MBBI, DL, StackSize,

llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ static std::pair<Value *, Value *> matchStridedStart(Value *Start,
131131
}
132132

133133
// Not a constant, maybe it's a strided constant with a splat added or
134-
// multipled.
134+
// multiplied.
135135
auto *BO = dyn_cast<BinaryOperator>(Start);
136136
if (!BO || (BO->getOpcode() != Instruction::Add &&
137137
BO->getOpcode() != Instruction::Or &&

llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3499,7 +3499,7 @@ bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5,
34993499
}
35003500

35013501
// Select VL as a 5 bit immediate or a value that will become a register. This
3502-
// allows us to choose betwen VSETIVLI or VSETVLI later.
3502+
// allows us to choose between VSETIVLI or VSETVLI later.
35033503
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
35043504
auto *C = dyn_cast<ConstantSDNode>(N);
35053505
if (C && isUInt<5>(C->getZExtValue())) {

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
20772077
if (isInt<32>(Val))
20782078
return true;
20792079

2080-
// A constant pool entry may be more aligned thant he load we're trying to
2080+
// A constant pool entry may be more aligned than the load we're trying to
20812081
// replace. If we don't support unaligned scalar mem, prefer the constant
20822082
// pool.
20832083
// TODO: Can the caller pass down the alignment?
@@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
29212921
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
29222922

29232923
if (!DstVT.isVector()) {
2924-
// For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
2924+
// For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
29252925
// the result.
29262926
if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
29272927
Src.getValueType() == MVT::bf16) {
@@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
31863186

31873187
// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
31883188
// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
3189-
// qNan and coverting the new source to integer and back to FP.
3189+
// qNan and converting the new source to integer and back to FP.
31903190
static SDValue
31913191
lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
31923192
const RISCVSubtarget &Subtarget) {
@@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
32063206
// Freeze the source since we are increasing the number of uses.
32073207
Src = DAG.getFreeze(Src);
32083208

3209-
// Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
3209+
// Convert sNan to qNan by executing x + x for all unordered element x in Src.
32103210
MVT MaskVT = Mask.getSimpleValueType();
32113211
SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
32123212
DAG.getVTList(MaskVT, MVT::Other),
@@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
37243724
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
37253725
NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
37263726
// If we have to use more than one INSERT_VECTOR_ELT then this
3727-
// optimization is likely to increase code size; avoid peforming it in
3727+
// optimization is likely to increase code size; avoid performing it in
37283728
// such a case. We can use a load from a constant pool in this case.
37293729
if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
37303730
return SDValue();
@@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
46184618
int MaskSrc = M < Size ? 0 : 1;
46194619

46204620
// Compute which of the two target values this index should be assigned to.
4621-
// This reflects whether the high elements are remaining or the low elemnts
4621+
// This reflects whether the high elements are remaining or the low elements
46224622
// are remaining.
46234623
int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
46244624

@@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
85678567
SDValue RHS = CondV.getOperand(1);
85688568
ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
85698569

8570-
// Special case for a select of 2 constants that have a diffence of 1.
8570+
// Special case for a select of 2 constants that have a difference of 1.
85718571
// Normally this is done by DAGCombine, but if the select is introduced by
85728572
// type legalization or op legalization, we miss it. Restricting to SETLT
85738573
// case for now because that is what signed saturating add/sub need.
@@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
97179717
// We need to convert from a scalable VF to a vsetvli with VLMax equal to
97189718
// (vscale * VF). The vscale and VF are independent of element width. We use
97199719
// SEW=8 for the vsetvli because it is the only element width that supports all
9720-
// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
9720+
// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
97219721
// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
97229722
// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
97239723
// SEW and LMUL are better for the surrounding vector instructions.
@@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1320313203
return;
1320413204
if (IsStrict) {
1320513205
SDValue Chain = N->getOperand(0);
13206-
// In absense of Zfh, promote f16 to f32, then convert.
13206+
// In absence of Zfh, promote f16 to f32, then convert.
1320713207
if (Op0.getValueType() == MVT::f16 &&
1320813208
!Subtarget.hasStdExtZfhOrZhinx()) {
1320913209
Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
@@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1322013220
Results.push_back(Res.getValue(1));
1322113221
return;
1322213222
}
13223-
// For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
13223+
// For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
1322413224
// convert.
1322513225
if ((Op0.getValueType() == MVT::f16 &&
1322613226
!Subtarget.hasStdExtZfhOrZhinx()) ||
@@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1326313263
if (!isTypeLegal(Op0VT))
1326413264
return;
1326513265

13266-
// In absense of Zfh, promote f16 to f32, then convert.
13266+
// In absence of Zfh, promote f16 to f32, then convert.
1326713267
if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
1326813268
Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
1326913269

@@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1389013890
static unsigned getVecReduceOpcode(unsigned Opc) {
1389113891
switch (Opc) {
1389213892
default:
13893-
llvm_unreachable("Unhandled binary to transfrom reduction");
13893+
llvm_unreachable("Unhandled binary to transform reduction");
1389413894
case ISD::ADD:
1389513895
return ISD::VECREDUCE_ADD;
1389613896
case ISD::UMAX:
@@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
1402014020
auto BinOpToRVVReduce = [](unsigned Opc) {
1402114021
switch (Opc) {
1402214022
default:
14023-
llvm_unreachable("Unhandled binary to transfrom reduction");
14023+
llvm_unreachable("Unhandled binary to transform reduction");
1402414024
case ISD::ADD:
1402514025
return RISCVISD::VECREDUCE_ADD_VL;
1402614026
case ISD::UMAX:
@@ -15577,7 +15577,7 @@ struct NodeExtensionHelper {
1557715577

1557815578
bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT,
1557915579
const RISCVSubtarget &Subtarget) {
15580-
// Any f16 extension will neeed zvfh
15580+
// Any f16 extension will need zvfh
1558115581
if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16())
1558215582
return false;
1558315583
// The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with
@@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N,
1632616326
if (Base1 != Base2)
1632716327
continue;
1632816328

16329-
// Check if the offsets match the XTHeadMemPair encoding contraints.
16329+
// Check if the offsets match the XTHeadMemPair encoding constraints.
1633016330
bool Valid = false;
1633116331
if (MemVT == MVT::i32) {
1633216332
// Check for adjacent i32 values and a 2-bit index.
@@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
1695416954
}
1695516955

1695616956
// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
16957-
// the result is used as the conditon of a br_cc or select_cc we can invert,
16957+
// the result is used as the condition of a br_cc or select_cc we can invert,
1695816958
// inverting the setcc is free, and Z is 0/1. Caller will invert the
1695916959
// br_cc/select_cc.
1696016960
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
@@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
1701517015
return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
1701617016
}
1701717017

17018-
// Perform common combines for BR_CC and SELECT_CC condtions.
17018+
// Perform common combines for BR_CC and SELECT_CC conditions.
1701917019
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
1702017020
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
1702117021
ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
@@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1860318603
const int64_t Addend = SimpleVID->Addend;
1860418604

1860518605
// Note: We don't need to check alignment here since (by assumption
18606-
// from the existance of the gather), our offsets must be sufficiently
18606+
// from the existence of the gather), our offsets must be sufficiently
1860718607
// aligned.
1860818608

1860918609
const EVT PtrVT = getPointerTy(DAG.getDataLayout());
@@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
2063920639
EVT PtrVT = getPointerTy(DAG.getDataLayout());
2064020640
MVT XLenVT = Subtarget.getXLenVT();
2064120641
unsigned XLenInBytes = Subtarget.getXLen() / 8;
20642-
// Used with vargs to acumulate store chains.
20642+
// Used with vargs to accumulate store chains.
2064320643
std::vector<SDValue> OutChains;
2064420644

2064520645
// Assign locations to all of the incoming arguments.

llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
//===----------------------------------------------------------------------===//
88
//
99
// The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the
10-
// beginning of each basic block or function that is referenced by an indrect
10+
// beginning of each basic block or function that is referenced by an indirect
1111
// jump/call instruction.
1212
//
1313
//===----------------------------------------------------------------------===//

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1069,7 +1069,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
10691069
const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
10701070
if (VLOp.isImm()) {
10711071
int64_t Imm = VLOp.getImm();
1072-
// Conver the VLMax sentintel to X0 register.
1072+
// Convert the VLMax sentintel to X0 register.
10731073
if (Imm == RISCV::VLMaxSentinel) {
10741074
// If we know the exact VLEN, see if we can use the constant encoding
10751075
// for the VLMAX instead. This reduces register pressure slightly.

llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -170,10 +170,10 @@ struct BlockData {
170170
// Indicates if the block uses VXRM. Uninitialized means no use.
171171
VXRMInfo VXRMUse;
172172

173-
// Indicates the VXRM output from the block. Unitialized means transparent.
173+
// Indicates the VXRM output from the block. Uninitialized means transparent.
174174
VXRMInfo VXRMOut;
175175

176-
// Keeps track of the available VXRM value at the start of the basic bloc.
176+
// Keeps track of the available VXRM value at the start of the basic block.
177177
VXRMInfo AvailableIn;
178178

179179
// Keeps track of the available VXRM value at the end of the basic block.
@@ -384,8 +384,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
384384
PInfo.AvailableOut.getVXRMImm() ==
385385
BBInfo.AnticipatedIn.getVXRMImm())
386386
continue;
387-
// If the predecessor anticipates this value for all its succesors,
388-
// then a write to VXRM would have already occured before this block is
387+
// If the predecessor anticipates this value for all its successors,
388+
// then a write to VXRM would have already occurred before this block is
389389
// executed.
390390
if (PInfo.AnticipatedOut.isStatic() &&
391391
PInfo.AnticipatedOut.getVXRMImm() ==
@@ -429,7 +429,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
429429
// If all our successors anticipate a value, do the insert.
430430
// NOTE: It's possible that not all predecessors of our successor provide the
431431
// correct value. This can occur on critical edges. If we don't split the
432-
// critical edge we'll also have a write vxrm in the succesor that is
432+
// critical edge we'll also have a write vxrm in the successor that is
433433
// redundant with this one.
434434
if (PendingInsert ||
435435
(BBInfo.AnticipatedOut.isStatic() &&

llvm/lib/Target/RISCV/RISCVInstrFormats.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
232232
bit UsesVXRM = 0;
233233
let TSFlags{20} = UsesVXRM;
234234

235-
// Indicates whther these instructions can partially overlap between source
235+
// Indicates whether these instructions can partially overlap between source
236236
// registers and destination registers according to the vector spec.
237237
// 0 -> not a vector pseudo
238238
// 1 -> default value for vector pseudos. not widening or narrowing.

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1516,7 +1516,7 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
15161516
SeenMIs.erase(DefMI);
15171517

15181518
// If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1519-
// DefMI would be invalid when tranferred inside the loop. Checking for a
1519+
// DefMI would be invalid when transferred inside the loop. Checking for a
15201520
// loop is expensive, but at least remove kill flags if they are in different
15211521
// BBs.
15221522
if (DefMI->getParent() != MI.getParent())

llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
// For case 1, if a compressed register is available, then the uncompressed
1818
// register is copied to the compressed register and its uses are replaced.
1919
//
20-
// For example, storing zero uses the uncompressible zero register:
20+
// For example, storing zero uses the incompressible zero register:
2121
// sw zero, 0(a0) # if zero
2222
// sw zero, 8(a0) # if zero
2323
// sw zero, 4(a0) # if zero
@@ -275,7 +275,7 @@ static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) {
275275
// rather than used.
276276
//
277277
// For stores, we can change SrcDest (and Base if SrcDest == Base) but
278-
// cannot resolve an uncompressible offset in this case.
278+
// cannot resolve an incompressible offset in this case.
279279
if (isCompressibleStore(MI)) {
280280
if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
281281
!NewBaseAdjust)
@@ -313,7 +313,7 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI,
313313
// If RegImm.Reg is modified by this instruction, then we cannot optimize
314314
// past this instruction. If the register is already compressed, then it may
315315
// possible to optimize a large offset in the current instruction - this
316-
// will have been detected by the preceeding call to
316+
// will have been detected by the preceding call to
317317
// getRegImmPairPreventingCompression.
318318
if (MI.modifiesRegister(RegImm.Reg, TRI))
319319
break;
@@ -409,7 +409,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
409409
LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
410410
for (MachineInstr &MI : MBB) {
411411
// Determine if this instruction would otherwise be compressed if not for
412-
// an uncompressible register or offset.
412+
// an incompressible register or offset.
413413
RegImmPair RegImm = getRegImmPairPreventingCompression(MI);
414414
if (!RegImm.Reg && RegImm.Imm == 0)
415415
continue;

llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,8 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
434434

435435
// Memory constraints have two operands.
436436
if (NumOps != 2 || !Flags.isMemKind()) {
437-
// If the register is used by something other than a memory contraint,
438-
// we should not fold.
437+
// If the register is used by something other than a memory
438+
// constraint, we should not fold.
439439
for (unsigned J = 0; J < NumOps; ++J) {
440440
const MachineOperand &MO = UseMI.getOperand(I + 1 + J);
441441
if (MO.isReg() && MO.getReg() == DestReg)

0 commit comments

Comments
 (0)