-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV] Fix typos discovered by codespell (NFC) #126191
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Found using https://github.com/codespell-project/codespell codespell RISCV --ignore-words-list=FPR,fpr,VAs,ORE,WorstCase,hart,sie,MIs,FLE,fle,CarryIn,vor,OLT,VILL,vill,bu,pass-thru --write-changes
@llvm/pr-subscribers-backend-risc-v Author: Sudharsan Veeravalli (svs-quic) ChangesFound using https://github.com/codespell-project/codespell
Patch is 30.38 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/126191.diff 23 Files Affected:
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index c51c4201ebd18ca..d050194142a4750 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -130,7 +130,7 @@ class RISCVAsmParser : public MCTargetAsmParser {
void emitToStreamer(MCStreamer &S, const MCInst &Inst);
// Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that
- // synthesize the desired immedate value into the destination register.
+ // synthesize the desired immediate value into the destination register.
void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out);
// Helper to emit a combination of AUIPC and SecondOpcode. Used to implement
@@ -2626,7 +2626,7 @@ ParseStatus RISCVAsmParser::parseZeroOffsetMemOp(OperandVector &Operands) {
std::unique_ptr<RISCVOperand> OptionalImmOp;
if (getLexer().isNot(AsmToken::LParen)) {
- // Parse an Integer token. We do not accept arbritrary constant expressions
+ // Parse an Integer token. We do not accept arbitrary constant expressions
// in the offset field (because they may include parens, which complicates
// parsing a lot).
int64_t ImmVal;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index d5d422226281bae..62fbe55dffba1c3 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -621,7 +621,7 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
return;
}
- // We found an ICmp, do some canonicalizations.
+ // We found an ICmp, do some canonicalization.
// Adjust comparisons to use comparison with 0 if possible.
if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
@@ -735,7 +735,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return true;
}
case TargetOpcode::G_FCONSTANT: {
- // TODO: Use constant pool for complext constants.
+ // TODO: Use constant pool for complex constants.
// TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
Register DstReg = MI.getOperand(0).getReg();
const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 26725cf7decbeea..06ae8e1296e5153 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -175,7 +175,7 @@ static unsigned extractRotateInfo(int64_t Val) {
static void generateInstSeqLeadingZeros(int64_t Val, const MCSubtargetInfo &STI,
RISCVMatInt::InstSeq &Res) {
- assert(Val > 0 && "Expected postive val");
+ assert(Val > 0 && "Expected positive val");
unsigned LeadingZeros = llvm::countl_zero((uint64_t)Val);
uint64_t ShiftedVal = (uint64_t)Val << LeadingZeros;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
index 99f57f47835abd0..72b3e56c8a72fcd 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -21,7 +21,7 @@
using namespace llvm;
-// This option controls wether or not we emit ELF attributes for ABI features,
+// This option controls whether or not we emit ELF attributes for ABI features,
// like RISC-V atomics or X3 usage.
static cl::opt<bool> RiscvAbiAttr(
"riscv-abi-attributes",
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index b1990409754b083..7dcf2ba2ac40592 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -1089,7 +1089,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
bool hasVLOutput = RISCV::isFaultFirstLoad(*MI);
for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
const MachineOperand &MO = MI->getOperand(OpNo);
- // Skip vl ouput. It should be the second output.
+ // Skip vl output. It should be the second output.
if (hasVLOutput && OpNo == 1)
continue;
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index f050977c55e196a..51aa8d7d307e4c4 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1020,7 +1020,7 @@ def HasStdExtSmctrOrSsctr : Predicate<"Subtarget->hasStdExtSmctrOrSsctr()">,
// Vendor extensions
//===----------------------------------------------------------------------===//
-// Ventana Extenions
+// Ventana Extensions
def FeatureVendorXVentanaCondOps
: RISCVExtension<1, 0, "Ventana Conditional Ops">;
@@ -1337,7 +1337,7 @@ def HasVendorXqcilo
// LLVM specific features and extensions
//===----------------------------------------------------------------------===//
-// Feature32Bit exists to mark CPUs that support RV32 to distinquish them from
+// Feature32Bit exists to mark CPUs that support RV32 to distinguish them from
// tuning CPU names.
def Feature32Bit
: SubtargetFeature<"32bit", "IsRV32", "true", "Implements RV32">;
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 4beaa1e6b9e156c..6abf45591d78ec1 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1182,7 +1182,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
if (getLibCallID(MF, CSI) != -1) {
// tail __riscv_restore_[0-12] instruction is considered as a terminator,
- // therefor it is unnecessary to place any CFI instructions after it. Just
+ // therefore it is unnecessary to place any CFI instructions after it. Just
// deallocate stack if needed and return.
if (StackSize != 0)
deallocateStack(MF, MBB, MBBI, DL, StackSize,
diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
index 39c0af798597190..82c0d8d4738a41b 100644
--- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
@@ -131,7 +131,7 @@ static std::pair<Value *, Value *> matchStridedStart(Value *Start,
}
// Not a constant, maybe it's a strided constant with a splat added or
- // multipled.
+ // multiplied.
auto *BO = dyn_cast<BinaryOperator>(Start);
if (!BO || (BO->getOpcode() != Instruction::Add &&
BO->getOpcode() != Instruction::Or &&
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 82fb8fb8ccc69bb..ec2e8f1d50264cd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3499,7 +3499,7 @@ bool RISCVDAGToDAGISel::selectSimm5Shl2(SDValue N, SDValue &Simm5,
}
// Select VL as a 5 bit immediate or a value that will become a register. This
-// allows us to choose betwen VSETIVLI or VSETVLI later.
+// allows us to choose between VSETIVLI or VSETVLI later.
bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
auto *C = dyn_cast<ConstantSDNode>(N);
if (C && isUInt<5>(C->getZExtValue())) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d91ba33c235966a..13ce566f8def6c3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2077,7 +2077,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
if (isInt<32>(Val))
return true;
- // A constant pool entry may be more aligned thant he load we're trying to
+ // A constant pool entry may be more aligned than the load we're trying to
// replace. If we don't support unaligned scalar mem, prefer the constant
// pool.
// TODO: Can the caller pass down the alignment?
@@ -2921,7 +2921,7 @@ static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
if (!DstVT.isVector()) {
- // For bf16 or for f16 in absense of Zfh, promote to f32, then saturate
+ // For bf16 or for f16 in absence of Zfh, promote to f32, then saturate
// the result.
if ((Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) ||
Src.getValueType() == MVT::bf16) {
@@ -3186,7 +3186,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
-// qNan and coverting the new source to integer and back to FP.
+// qNan and converting the new source to integer and back to FP.
static SDValue
lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
@@ -3206,7 +3206,7 @@ lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
// Freeze the source since we are increasing the number of uses.
Src = DAG.getFreeze(Src);
- // Covert sNan to qNan by executing x + x for all unordered elemenet x in Src.
+ // Convert sNan to qNan by executing x + x for all unordered element x in Src.
MVT MaskVT = Mask.getSimpleValueType();
SDValue Unorder = DAG.getNode(RISCVISD::STRICT_FSETCC_VL, DL,
DAG.getVTList(MaskVT, MVT::Other),
@@ -3724,7 +3724,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.getXLen());
NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELen());
// If we have to use more than one INSERT_VECTOR_ELT then this
- // optimization is likely to increase code size; avoid peforming it in
+ // optimization is likely to increase code size; avoid performing it in
// such a case. We can use a load from a constant pool in this case.
if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
return SDValue();
@@ -4618,7 +4618,7 @@ static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
int MaskSrc = M < Size ? 0 : 1;
// Compute which of the two target values this index should be assigned to.
- // This reflects whether the high elements are remaining or the low elemnts
+ // This reflects whether the high elements are remaining or the low elements
// are remaining.
int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
@@ -8567,7 +8567,7 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue RHS = CondV.getOperand(1);
ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
- // Special case for a select of 2 constants that have a diffence of 1.
+ // Special case for a select of 2 constants that have a difference of 1.
// Normally this is done by DAGCombine, but if the select is introduced by
// type legalization or op legalization, we miss it. Restricting to SETLT
// case for now because that is what signed saturating add/sub need.
@@ -9717,7 +9717,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
// We need to convert from a scalable VF to a vsetvli with VLMax equal to
// (vscale * VF). The vscale and VF are independent of element width. We use
// SEW=8 for the vsetvli because it is the only element width that supports all
-// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
+// fractional LMULs. The LMUL is chosen so that with SEW=8 the VLMax is
// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
// SEW and LMUL are better for the surrounding vector instructions.
@@ -13203,7 +13203,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
return;
if (IsStrict) {
SDValue Chain = N->getOperand(0);
- // In absense of Zfh, promote f16 to f32, then convert.
+ // In absence of Zfh, promote f16 to f32, then convert.
if (Op0.getValueType() == MVT::f16 &&
!Subtarget.hasStdExtZfhOrZhinx()) {
Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other},
@@ -13220,7 +13220,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(Res.getValue(1));
return;
}
- // For bf16, or f16 in absense of Zfh, promote [b]f16 to f32 and then
+ // For bf16, or f16 in absence of Zfh, promote [b]f16 to f32 and then
// convert.
if ((Op0.getValueType() == MVT::f16 &&
!Subtarget.hasStdExtZfhOrZhinx()) ||
@@ -13263,7 +13263,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
if (!isTypeLegal(Op0VT))
return;
- // In absense of Zfh, promote f16 to f32, then convert.
+ // In absence of Zfh, promote f16 to f32, then convert.
if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx())
Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0);
@@ -13890,7 +13890,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
static unsigned getVecReduceOpcode(unsigned Opc) {
switch (Opc) {
default:
- llvm_unreachable("Unhandled binary to transfrom reduction");
+ llvm_unreachable("Unhandled binary to transform reduction");
case ISD::ADD:
return ISD::VECREDUCE_ADD;
case ISD::UMAX:
@@ -14020,7 +14020,7 @@ static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
auto BinOpToRVVReduce = [](unsigned Opc) {
switch (Opc) {
default:
- llvm_unreachable("Unhandled binary to transfrom reduction");
+ llvm_unreachable("Unhandled binary to transform reduction");
case ISD::ADD:
return RISCVISD::VECREDUCE_ADD_VL;
case ISD::UMAX:
@@ -15577,7 +15577,7 @@ struct NodeExtensionHelper {
bool isSupportedFPExtend(SDNode *Root, MVT NarrowEltVT,
const RISCVSubtarget &Subtarget) {
- // Any f16 extension will neeed zvfh
+ // Any f16 extension will need zvfh
if (NarrowEltVT == MVT::f16 && !Subtarget.hasVInstructionsF16())
return false;
// The only bf16 extension we can do is vfmadd_vl -> vfwmadd_vl with
@@ -16326,7 +16326,7 @@ static SDValue performMemPairCombine(SDNode *N,
if (Base1 != Base2)
continue;
- // Check if the offsets match the XTHeadMemPair encoding contraints.
+ // Check if the offsets match the XTHeadMemPair encoding constraints.
bool Valid = false;
if (MemVT == MVT::i32) {
// Check for adjacent i32 values and a 2-bit index.
@@ -16954,7 +16954,7 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
}
// Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
-// the result is used as the conditon of a br_cc or select_cc we can invert,
+// the result is used as the condition of a br_cc or select_cc we can invert,
// inverting the setcc is free, and Z is 0/1. Caller will invert the
// br_cc/select_cc.
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
@@ -17015,7 +17015,7 @@ static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {
return DAG.getNode(Opc, SDLoc(Cond), VT, Setcc, Xor.getOperand(0));
}
-// Perform common combines for BR_CC and SELECT_CC condtions.
+// Perform common combines for BR_CC and SELECT_CC conditions.
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {
ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
@@ -18603,7 +18603,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
const int64_t Addend = SimpleVID->Addend;
// Note: We don't need to check alignment here since (by assumption
- // from the existance of the gather), our offsets must be sufficiently
+ // from the existence of the gather), our offsets must be sufficiently
// aligned.
const EVT PtrVT = getPointerTy(DAG.getDataLayout());
@@ -20639,7 +20639,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
EVT PtrVT = getPointerTy(DAG.getDataLayout());
MVT XLenVT = Subtarget.getXLenVT();
unsigned XLenInBytes = Subtarget.getXLen() / 8;
- // Used with vargs to acumulate store chains.
+ // Used with vargs to accumulate store chains.
std::vector<SDValue> OutChains;
// Assign locations to all of the incoming arguments.
diff --git a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
index c006fba4af4bca5..4660a975b20ae55 100644
--- a/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
+++ b/llvm/lib/Target/RISCV/RISCVIndirectBranchTracking.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// The pass adds LPAD (AUIPC with rs1 = X0) machine instructions at the
-// beginning of each basic block or function that is referenced by an indrect
+// beginning of each basic block or function that is referenced by an indirect
// jump/call instruction.
//
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index b56a39d8316d115..4a74906ed3cc30d 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1069,7 +1069,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
if (VLOp.isImm()) {
int64_t Imm = VLOp.getImm();
- // Conver the VLMax sentintel to X0 register.
+ // Convert the VLMax sentintel to X0 register.
if (Imm == RISCV::VLMaxSentinel) {
// If we know the exact VLEN, see if we can use the constant encoding
// for the VLMAX instead. This reduces register pressure slightly.
diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index fe593a3cabad73a..7df04fc225b0ba7 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -170,10 +170,10 @@ struct BlockData {
// Indicates if the block uses VXRM. Uninitialized means no use.
VXRMInfo VXRMUse;
- // Indicates the VXRM output from the block. Unitialized means transparent.
+ // Indicates the VXRM output from the block. Uninitialized means transparent.
VXRMInfo VXRMOut;
- // Keeps track of the available VXRM value at the start of the basic bloc.
+ // Keeps track of the available VXRM value at the start of the basic block.
VXRMInfo AvailableIn;
// Keeps track of the available VXRM value at the end of the basic block.
@@ -384,8 +384,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
PInfo.AvailableOut.getVXRMImm() ==
BBInfo.AnticipatedIn.getVXRMImm())
continue;
- // If the predecessor anticipates this value for all its succesors,
- // then a write to VXRM would have already occured before this block is
+ // If the predecessor anticipates this value for all its successors,
+ // then a write to VXRM would have already occurred before this block is
// executed.
if (PInfo.AnticipatedOut.isStatic() &&
PInfo.AnticipatedOut.getVXRMImm() ==
@@ -429,7 +429,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
// If all our successors anticipate a value, do the insert.
// NOTE: It's possible that not all predecessors of our successor provide the
// correct value. This can occur on critical edges. If we don't split the
- // critical edge we'll also have a write vxrm in the succesor that is
+ // critical edge we'll also have a write vxrm in the successor that is
// redundant with this one.
if (PendingInsert ||
(BBInfo.AnticipatedOut.isStatic() &&
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 013c26c72bfd554..cea28bdce284cbd 1006...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Found using https://github.com/codespell-project/codespell ``` codespell RISCV --write-changes \ --ignore-words-list=FPR,fpr,VAs,ORE,WorstCase,hart,sie,MIs,FLE,fle,CarryIn,vor,OLT,VILL,vill,bu,pass-thru ```
Found using https://github.com/codespell-project/codespell