@@ -250,7 +250,7 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
250
250
}
251
251
252
252
if (auto binOp = a.dyn_cast <AffineBinaryOpExpr>()) {
253
- // We do not set dim level format for affine expresssion like d0 + d1 on
253
+ // We do not set dim level format for affine expression like d0 + d1 on
254
254
// either loop index at d0 or d1.
255
255
// We continue the recursion merely to check whether current affine is
256
256
// admissible or not.
@@ -309,7 +309,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
309
309
if (merger.hasDependentLvl (ldx, tensor)) {
310
310
// TODO: This can be supported by coiterate slices if the loop idx is
311
311
// appeared on affine index for different tensor, or take slice on
312
- // mulitple dimensions when it is on the same tensor.
312
+ // multiple dimensions when it is on the same tensor.
313
313
// E.g.,
314
314
// `d0 + d1` for indexing t0[lvl0] and `d0 + d2` for indexing t1[lvl0]
315
315
// d0_1 = getNextSliceOffset t0 along lvl0
@@ -357,7 +357,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
357
357
// / indexing-expression is `d0 + d1`)
358
358
static unsigned getNumNonTrivialIdxExpOnSparseLvls (AffineMap map,
359
359
Value tensor) {
360
- // The `tensor` is not guaranted to have `RankedTensorType`, therefore
360
+ // The `tensor` is not guaranteed to have `RankedTensorType`, therefore
361
361
// we can't use `getRankedTensorType`/`getSparseTensorType` here.
362
362
// However, we don't need to handle `StorageSpecifierType`, so we
363
363
// can use `SparseTensorType` once we guard against non-tensors.
@@ -636,7 +636,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
636
636
637
637
// Each tensor expression and optional dimension ordering (row-major
638
638
// by default) puts an ordering constraint on the loop indices. For
639
- // example, the tensor expresion A_ijk forces the ordering i < j < k
639
+ // example, the tensor expression A_ijk forces the ordering i < j < k
640
640
// on the loop indices if no explicit dimension ordering is given.
641
641
const Level lvlRank = map.getNumResults ();
642
642
assert (!enc || lvlRank == enc.getLvlRank ());
@@ -668,7 +668,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,
668
668
669
669
// Applying order constraints on every pair of dimExpr between two
670
670
// compound affine expressions can sometime too strict:
671
- // E.g, for [dense, dense] -> (d0 + d1, d2 + d3).
671
+ // E.g. , for [dense, dense] -> (d0 + d1, d2 + d3).
672
672
// It is totally fine to have loop sequence d0->d2->d1->d3 instead of
673
673
// requiring d0 < d2, d1 < d2, d0 < d3, d1 < d3.
674
674
// We also relax the affine constraint when use slice-based algorithm
@@ -1316,7 +1316,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
1316
1316
return ; // not needed at this level
1317
1317
assert (!env.isReduc ());
1318
1318
// Generate start or end of an expanded access pattern. Note that because
1319
- // an expension does not rely on the ongoing contents of the sparse storage
1319
+ // an expansion does not rely on the ongoing contents of the sparse storage
1320
1320
// scheme, we can use the original tensor as incoming SSA value (which
1321
1321
// simplifies codegen a bit). If expansion on the actual contents is ever
1322
1322
// needed, we will need to use the SSA value in the insertion chain instead.
@@ -2007,9 +2007,9 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
2007
2007
bool isAdmissible = false ;
2008
2008
bool hasCycle = true ;
2009
2009
2010
- // An const list of all masks that we used for interation graph
2010
+ // A const list of all masks that we used for iteration graph
2011
2011
// computation. Must be ordered from more strict to less strict.
2012
- // Ideally (though might not be guaranteed), the eariler a constraint mask
2012
+ // Ideally (though might not be guaranteed), the earlier a constraint mask
2013
2013
// can be satisfied, the faster the generated kernel will be.
2014
2014
const auto allMasks = {
2015
2015
SortMask::kIncludeAll , SortMask::kIncludeDense ,
@@ -2038,7 +2038,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
2038
2038
env.startEmit ();
2039
2039
genBuffers (env, rewriter);
2040
2040
// TODO: Constant affine expression should be handled differently when using
2041
- // slice-based codegen, it does not matter now becasue we already reject the
2041
+ // slice-based codegen, it does not matter now because we already reject the
2042
2042
// constant expression at a earlier stage.
2043
2043
genInitConstantDenseAddress (env, rewriter);
2044
2044
genStmt (env, rewriter, env.getExprId (), 0 );
0 commit comments