Skip to content

Commit 2cb99df

Browse files
[mlir][sparse] Fix typos (#67859)
1 parent d2e8517 commit 2cb99df

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ Value LoopEmitter::genSegmentHigh(OpBuilder &builder, Location loc,
242242
{
243243
OpBuilder::InsertionGuard guard(builder);
244244
// Load the next coordinates only when inbound (to avoid OOB
245-
// acccesses).
245+
// accesses).
246246
builder.setInsertionPointToStart(ifInBound.thenBlock());
247247
Value crd = genIndexLoad(builder, loc, coordinates, pos);
248248
Value isSameCrd = builder.create<arith::CmpIOp>(
@@ -651,7 +651,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
651651
// expression on init vals will be moved into scf.reduce and replaced with
652652
// the block arguments when exiting the loop (see exitForLoop). This is
653653
// needed as we can not build the actual reduction block and get the actual
654-
// reduction varaible before users fill parallel loop body.
654+
// reduction variable before users fill parallel loop body.
655655
for (int i = 0, e = reduc.size(); i < e; i++)
656656
reduc[i] = parOp.getInitVals()[i];
657657
loop = parOp;
@@ -882,7 +882,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(
882882

883883
// The set of induction variables for the while loop.
884884
SmallVector<Value> ivs;
885-
// Segement sizes for induction variables used for different kinds of loop
885+
// Segment sizes for induction variables used for different kinds of loop
886886
// conditions.
887887
SmallVector<unsigned> opSegSize;
888888

@@ -1077,7 +1077,7 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
10771077
needsUniv = !spConds.empty() && needsUniv;
10781078
// The TensorLevel used for loop conditions.
10791079
// If there is any sparse level, we need to use the sparse condition.
1080-
// If all levels are dense, we can pick arbitary one (dense slice-driven loop
1080+
// If all levels are dense, we can pick arbitrary one (dense slice-driven loop
10811081
// can be generated using a simple ForOp as well).
10821082
Operation *l = nullptr;
10831083
Value iv = nullptr;
@@ -1700,7 +1700,7 @@ std::pair<Operation *, ValueRange> LoopEmitter::genSliceLvlTraverseLoop(
17001700
// Delegates to users' callback.
17011701
bodyBuilder(builder, loc, iv, ifRet);
17021702
}
1703-
// Marks this speical ifOp to avoid sparisification finalizing it.
1703+
// Marks this special ifOp to avoid sparisification finalizing it.
17041704
ifOp->setAttr(getLoopEmitterLoopAttrName(),
17051705
StringAttr::get(builder.getContext(), "slice"));
17061706
// Insertion point restored to after ifOp.
@@ -1741,7 +1741,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
17411741
Value pos = c0;
17421742
OpBuilder::InsertPoint ip;
17431743
SmallVector<Value> innerArgs(userReduc.begin(), userReduc.end());
1744-
scf::ForOp outerMost = nullptr; // the outtermost loop.
1744+
scf::ForOp outerMost = nullptr; // the outermost loop.
17451745

17461746
// Wraps body builder and inserts a extra counting instruction at the end.
17471747
auto wrapped = [bodyBuilder](OpBuilder &builder, Location loc, Value iv,
@@ -1842,7 +1842,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
18421842
OpBuilder &builder, Location loc, ValueRange ivs,
18431843
ValueRange iterArgs) -> scf::ValueVector {
18441844
for (auto em : llvm::enumerate(ivs)) {
1845-
// Linearizes postion: pos = (pos * lvlsize) +
1845+
// Linearizes position: pos = (pos * lvlsize) +
18461846
// iv;
18471847
pos = MULI(pos, lvlSzs[em.index()]);
18481848
pos = ADDI(pos, em.value());
@@ -2072,7 +2072,7 @@ bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
20722072
assert(isOrderedDLT(lvlType));
20732073
if (isSingletonDLT(lvlType)) {
20742074
llvm_unreachable("TODO: dense level should be easy to support, while "
2075-
"singleton level requres more efforts");
2075+
"singleton level requires more efforts");
20762076
}
20772077

20782078
assert(!dependentLvlMap[tid][lvl].empty());

mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
250250
}
251251

252252
if (auto binOp = a.dyn_cast<AffineBinaryOpExpr>()) {
253-
// We do not set dim level format for affine expresssion like d0 + d1 on
253+
// We do not set dim level format for affine expression like d0 + d1 on
254254
// either loop index at d0 or d1.
255255
// We continue the recursion merely to check whether current affine is
256256
// admissible or not.
@@ -309,7 +309,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
309309
if (merger.hasDependentLvl(ldx, tensor)) {
310310
// TODO: This can be supported by coiterate slices if the loop idx is
311311
// appeared on affine index for different tensor, or take slice on
312-
// mulitple dimensions when it is on the same tensor.
312+
// multiple dimensions when it is on the same tensor.
313313
// E.g.,
314314
// `d0 + d1` for indexing t0[lvl0] and `d0 + d2` for indexing t1[lvl0]
315315
// d0_1 = getNextSliceOffset t0 along lvl0
@@ -357,7 +357,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
357357
/// indexing-expression is `d0 + d1`)
358358
static unsigned getNumNonTrivialIdxExpOnSparseLvls(AffineMap map,
359359
Value tensor) {
360-
// The `tensor` is not guaranted to have `RankedTensorType`, therefore
360+
// The `tensor` is not guaranteed to have `RankedTensorType`, therefore
361361
// we can't use `getRankedTensorType`/`getSparseTensorType` here.
362362
// However, we don't need to handle `StorageSpecifierType`, so we
363363
// can use `SparseTensorType` once we guard against non-tensors.
@@ -636,7 +636,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
636636

637637
// Each tensor expression and optional dimension ordering (row-major
638638
// by default) puts an ordering constraint on the loop indices. For
639-
// example, the tensor expresion A_ijk forces the ordering i < j < k
639+
// example, the tensor expression A_ijk forces the ordering i < j < k
640640
// on the loop indices if no explicit dimension ordering is given.
641641
const Level lvlRank = map.getNumResults();
642642
assert(!enc || lvlRank == enc.getLvlRank());
@@ -668,7 +668,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
668668

669669
// Applying order constraints on every pair of dimExpr between two
670670
// compound affine expressions can sometime too strict:
671-
// E.g, for [dense, dense] -> (d0 + d1, d2 + d3).
671+
// E.g., for [dense, dense] -> (d0 + d1, d2 + d3).
672672
// It is totally fine to have loop sequence d0->d2->d1->d3 instead of
673673
// requiring d0 < d2, d1 < d2, d0 < d3, d1 < d3.
674674
// We also relax the affine constraint when use slice-based algorithm
@@ -1316,7 +1316,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
13161316
return; // not needed at this level
13171317
assert(!env.isReduc());
13181318
// Generate start or end of an expanded access pattern. Note that because
1319-
// an expension does not rely on the ongoing contents of the sparse storage
1319+
// an expansion does not rely on the ongoing contents of the sparse storage
13201320
// scheme, we can use the original tensor as incoming SSA value (which
13211321
// simplifies codegen a bit). If expansion on the actual contents is ever
13221322
// needed, we will need to use the SSA value in the insertion chain instead.
@@ -2007,9 +2007,9 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
20072007
bool isAdmissible = false;
20082008
bool hasCycle = true;
20092009

2010-
// An const list of all masks that we used for interation graph
2010+
// A const list of all masks that we used for iteration graph
20112011
// computation. Must be ordered from more strict to less strict.
2012-
// Ideally (though might not be guaranteed), the eariler a constraint mask
2012+
// Ideally (though might not be guaranteed), the earlier a constraint mask
20132013
// can be satisfied, the faster the generated kernel will be.
20142014
const auto allMasks = {
20152015
SortMask::kIncludeAll, SortMask::kIncludeDense,
@@ -2038,7 +2038,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
20382038
env.startEmit();
20392039
genBuffers(env, rewriter);
20402040
// TODO: Constant affine expression should be handled differently when using
2041-
// slice-based codegen, it does not matter now becasue we already reject the
2041+
// slice-based codegen, it does not matter now because we already reject the
20422042
// constant expression at a earlier stage.
20432043
genInitConstantDenseAddress(env, rewriter);
20442044
genStmt(env, rewriter, env.getExprId(), 0);

mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
11011101
}
11021102
case TensorExp::Kind::kCmpF:
11031103
case TensorExp::Kind::kCmpI:
1104-
// An comparison operation needs to be performed
1104+
// A comparison operation needs to be performed
11051105
// for the disjunction of sparse iteration spaces.
11061106
//
11071107
// x < y | !y | y |
@@ -1118,7 +1118,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
11181118
case TensorExp::Kind::kShlI:
11191119
// A shift operation by an invariant amount (viz. tensor expressions
11201120
// can only occur at the left-hand-side of the operator) can be handled
1121-
// with the conjuction rule.
1121+
// with the conjunction rule.
11221122
{
11231123
const ExprId e0 = expr.children.e0;
11241124
const ExprId e1 = expr.children.e1;

0 commit comments

Comments
 (0)