Skip to content

[mlir][sparse] Fix typos #67859

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ Value LoopEmitter::genSegmentHigh(OpBuilder &builder, Location loc,
{
OpBuilder::InsertionGuard guard(builder);
// Load the next coordinates only when inbound (to avoid OOB
// acccesses).
// accesses).
builder.setInsertionPointToStart(ifInBound.thenBlock());
Value crd = genIndexLoad(builder, loc, coordinates, pos);
Value isSameCrd = builder.create<arith::CmpIOp>(
Expand Down Expand Up @@ -651,7 +651,7 @@ std::pair<Operation *, Value> LoopEmitter::emitForLoopOverTensorAtLvl(
// expression on init vals will be moved into scf.reduce and replaced with
// the block arguments when exiting the loop (see exitForLoop). This is
// needed as we can not build the actual reduction block and get the actual
// reduction varaible before users fill parallel loop body.
// reduction variable before users fill parallel loop body.
for (int i = 0, e = reduc.size(); i < e; i++)
reduc[i] = parOp.getInitVals()[i];
loop = parOp;
Expand Down Expand Up @@ -882,7 +882,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(

// The set of induction variables for the while loop.
SmallVector<Value> ivs;
// Segement sizes for induction variables used for different kinds of loop
// Segment sizes for induction variables used for different kinds of loop
// conditions.
SmallVector<unsigned> opSegSize;

Expand Down Expand Up @@ -1077,7 +1077,7 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
needsUniv = !spConds.empty() && needsUniv;
// The TensorLevel used for loop conditions.
// If there is any sparse level, we need to use the sparse condition.
// If all levels are dense, we can pick arbitary one (dense slice-driven loop
// If all levels are dense, we can pick arbitrary one (dense slice-driven loop
// can be generated using a simple ForOp as well).
Operation *l = nullptr;
Value iv = nullptr;
Expand Down Expand Up @@ -1700,7 +1700,7 @@ std::pair<Operation *, ValueRange> LoopEmitter::genSliceLvlTraverseLoop(
// Delegates to users' callback.
bodyBuilder(builder, loc, iv, ifRet);
}
// Marks this speical ifOp to avoid sparisification finalizing it.
// Marks this special ifOp to avoid sparisification finalizing it.
ifOp->setAttr(getLoopEmitterLoopAttrName(),
StringAttr::get(builder.getContext(), "slice"));
// Insertion point restored to after ifOp.
Expand Down Expand Up @@ -1741,7 +1741,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
Value pos = c0;
OpBuilder::InsertPoint ip;
SmallVector<Value> innerArgs(userReduc.begin(), userReduc.end());
scf::ForOp outerMost = nullptr; // the outtermost loop.
scf::ForOp outerMost = nullptr; // the outermost loop.

// Wraps body builder and inserts a extra counting instruction at the end.
auto wrapped = [bodyBuilder](OpBuilder &builder, Location loc, Value iv,
Expand Down Expand Up @@ -1842,7 +1842,7 @@ ValueRange LoopEmitter::genUnResolvedSliceTreeTraverse(
OpBuilder &builder, Location loc, ValueRange ivs,
ValueRange iterArgs) -> scf::ValueVector {
for (auto em : llvm::enumerate(ivs)) {
// Linearizes postion: pos = (pos * lvlsize) +
// Linearizes position: pos = (pos * lvlsize) +
// iv;
pos = MULI(pos, lvlSzs[em.index()]);
pos = ADDI(pos, em.value());
Expand Down Expand Up @@ -2072,7 +2072,7 @@ bool LoopEmitter::genSliceBegin(OpBuilder &builder, Location loc, TensorId tid,
assert(isOrderedDLT(lvlType));
if (isSingletonDLT(lvlType)) {
llvm_unreachable("TODO: dense level should be easy to support, while "
"singleton level requres more efforts");
"singleton level requires more efforts");
}

assert(!dependentLvlMap[tid][lvl].empty());
Expand Down
18 changes: 9 additions & 9 deletions mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ static bool findAffine(Merger &merger, TensorId tid, Level lvl, AffineExpr a,
}

if (auto binOp = a.dyn_cast<AffineBinaryOpExpr>()) {
// We do not set dim level format for affine expresssion like d0 + d1 on
// We do not set dim level format for affine expression like d0 + d1 on
// either loop index at d0 or d1.
// We continue the recursion merely to check whether current affine is
// admissible or not.
Expand Down Expand Up @@ -309,7 +309,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
if (merger.hasDependentLvl(ldx, tensor)) {
// TODO: This can be supported by coiterate slices if the loop idx is
// appeared on affine index for different tensor, or take slice on
// mulitple dimensions when it is on the same tensor.
// multiple dimensions when it is on the same tensor.
// E.g.,
// `d0 + d1` for indexing t0[lvl0] and `d0 + d2` for indexing t1[lvl0]
// d0_1 = getNextSliceOffset t0 along lvl0
Expand Down Expand Up @@ -357,7 +357,7 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
/// indexing-expression is `d0 + d1`)
static unsigned getNumNonTrivialIdxExpOnSparseLvls(AffineMap map,
Value tensor) {
// The `tensor` is not guaranted to have `RankedTensorType`, therefore
// The `tensor` is not guaranteed to have `RankedTensorType`, therefore
// we can't use `getRankedTensorType`/`getSparseTensorType` here.
// However, we don't need to handle `StorageSpecifierType`, so we
// can use `SparseTensorType` once we guard against non-tensors.
Expand Down Expand Up @@ -636,7 +636,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,

// Each tensor expression and optional dimension ordering (row-major
// by default) puts an ordering constraint on the loop indices. For
// example, the tensor expresion A_ijk forces the ordering i < j < k
// example, the tensor expression A_ijk forces the ordering i < j < k
// on the loop indices if no explicit dimension ordering is given.
const Level lvlRank = map.getNumResults();
assert(!enc || lvlRank == enc.getLvlRank());
Expand Down Expand Up @@ -668,7 +668,7 @@ static void addFilterLoopBasedConstraints(CodegenEnv &env, OpOperand &t,

// Applying order constraints on every pair of dimExpr between two
// compound affine expressions can sometime too strict:
// E.g, for [dense, dense] -> (d0 + d1, d2 + d3).
// E.g., for [dense, dense] -> (d0 + d1, d2 + d3).
// It is totally fine to have loop sequence d0->d2->d1->d3 instead of
// requiring d0 < d2, d1 < d2, d0 < d3, d1 < d3.
// We also relax the affine constraint when use slice-based algorithm
Expand Down Expand Up @@ -1316,7 +1316,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
return; // not needed at this level
assert(!env.isReduc());
// Generate start or end of an expanded access pattern. Note that because
// an expension does not rely on the ongoing contents of the sparse storage
// an expansion does not rely on the ongoing contents of the sparse storage
// scheme, we can use the original tensor as incoming SSA value (which
// simplifies codegen a bit). If expansion on the actual contents is ever
// needed, we will need to use the SSA value in the insertion chain instead.
Expand Down Expand Up @@ -2007,9 +2007,9 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
bool isAdmissible = false;
bool hasCycle = true;

// An const list of all masks that we used for interation graph
// A const list of all masks that we used for iteration graph
// computation. Must be ordered from more strict to less strict.
// Ideally (though might not be guaranteed), the eariler a constraint mask
// Ideally (though might not be guaranteed), the earlier a constraint mask
// can be satisfied, the faster the generated kernel will be.
const auto allMasks = {
SortMask::kIncludeAll, SortMask::kIncludeDense,
Expand Down Expand Up @@ -2038,7 +2038,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
env.startEmit();
genBuffers(env, rewriter);
// TODO: Constant affine expression should be handled differently when using
// slice-based codegen, it does not matter now becasue we already reject the
// slice-based codegen, it does not matter now because we already reject the
// constant expression at a earlier stage.
genInitConstantDenseAddress(env, rewriter);
genStmt(env, rewriter, env.getExprId(), 0);
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1101,7 +1101,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
}
case TensorExp::Kind::kCmpF:
case TensorExp::Kind::kCmpI:
// An comparison operation needs to be performed
// A comparison operation needs to be performed
// for the disjunction of sparse iteration spaces.
//
// x < y | !y | y |
Expand All @@ -1118,7 +1118,7 @@ LatSetId Merger::buildLattices(ExprId e, LoopId i) {
case TensorExp::Kind::kShlI:
// A shift operation by an invariant amount (viz. tensor expressions
// can only occur at the left-hand-side of the operator) can be handled
// with the conjuction rule.
// with the conjunction rule.
{
const ExprId e0 = expr.children.e0;
const ExprId e1 = expr.children.e1;
Expand Down