Skip to content

Commit 5b0db27

Browse files
authored
[mlir][sparse] remove LoopOrd type (#74540)
Rationale: We no longer deal with topsort during sparsification, so that LoopId == LoopOrd for all methods. This first revision removes the types. A follow up revision will simplify some other remaining constructs that deal with loop order (e.g. at and ldx).
1 parent 341a51a commit 5b0db27

File tree

4 files changed

+19
-52
lines changed

4 files changed

+19
-52
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,8 @@ void CodegenEnv::updateInsertionChain(Value chain) {
206206
insChain = chain;
207207
}
208208

209-
// FIXME: clarify what this "rank" is really supposed to mean/be.
210-
bool CodegenEnv::atExpandLevel(OpOperand *o, unsigned rank, LoopOrd n) const {
211-
return sparseOut == o && outerParNest == static_cast<LoopOrd>(rank - 1) &&
209+
bool CodegenEnv::atExpandLevel(OpOperand *o, unsigned rank, LoopId n) const {
210+
return sparseOut == o && outerParNest == static_cast<LoopId>(rank - 1) &&
212211
outerParNest == n;
213212
}
214213

mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,7 @@ class CodegenEnv {
118118
/// It also sets the sparseOut if the output tensor is sparse.
119119
bool isAdmissibleTensorExp(ExprId e);
120120

121-
/// Returns the induction-variable for the loop identified by the given
122-
/// `LoopId`. This method handles application of the topological sort
123-
/// in order to convert the `LoopId` into the corresponding `LoopOrd`.
121+
/// Returns the induction-variable for the given loop.
124122
Value getLoopVar(LoopId i) const;
125123

126124
//
@@ -133,8 +131,7 @@ class CodegenEnv {
133131
Value getInsertionChain() const { return insChain; }
134132
void updateInsertionChain(Value chain);
135133

136-
// FIXME: clarify what this "rank" is really supposed to mean/be.
137-
bool atExpandLevel(OpOperand *o, unsigned rank, LoopOrd n) const;
134+
bool atExpandLevel(OpOperand *o, unsigned rank, LoopId n) const;
138135
void startExpand(Value values, Value filled, Value added, Value count);
139136
bool isExpand() const { return expValues != nullptr; }
140137
void updateExpandCount(Value count);
@@ -180,7 +177,7 @@ class CodegenEnv {
180177
// expansion in the innermost loop nest (`expValues` through `expCount`).
181178
OpOperand *sparseOut;
182179
// The count of outer non-filter loops, as defined by `isAdmissibleTopoOrder`.
183-
LoopOrd outerParNest;
180+
LoopId outerParNest;
184181
Value insChain;
185182
Value expValues;
186183
Value expFilled;

mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h

Lines changed: 7 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -19,31 +19,9 @@
1919
namespace mlir {
2020
namespace sparse_tensor {
2121

22-
//===----------------------------------------------------------------------===//
23-
/// The position of a loop in the loop-stack, or the position of a
24-
/// `LoopId` in a topologically-sorted list of `LoopId`s.
25-
///
26-
/// Although this type may have the same cardinality as `LoopId`, it must
27-
/// not be confused with that type. The `LoopId` type is used by the `Merger`
28-
/// as a unique identifier for loop-variables, regardless of the ordering
29-
/// of those loops. Whereas the `LoopOrd` type is used by the `LoopEmitter`
30-
/// (and `CodegenEnv`) to refer to the actual order in which loops are
31-
/// generated.
32-
///
33-
/// TODO: further explicate the correspondences between these various
34-
/// types. In particular, since the `$dim` argument to `linalg::IndexOp`
35-
/// is a De Bruijn index, it seems like that should correspond to `LoopOrd`,
36-
/// and yet the `Merger` has that correspond with `LoopId` instead.
37-
/// In addition `LoopEmitter::genAffine` has `AffineDimExpr::position`
38-
/// correspond to `LoopId`, however it is unclear what the providence
39-
/// of those `AffineDimExpr` is.
40-
//
41-
// TODO: use a struct/class rather than a typedef, so that we can actually
42-
// typecheck this to avoid mixups in the code.
43-
using LoopOrd = unsigned;
44-
4522
// A compressed <tensor id, level> pair.
4623
using TensorLevel = unsigned;
24+
4725
//===----------------------------------------------------------------------===//
4826
// SparseTensorLoopEmiter class, manages sparse tensors and helps to
4927
// generate loop structure to (co)-iterate sparse tensors.
@@ -108,9 +86,7 @@ class LoopEmitter {
10886
/// to the position of that tensor `Value` in the array). Setting
10987
/// `isSparseOut` indicates that the sparse output tensor is empty,
11088
/// so the loop emitter will generate loops over it according to the
111-
/// level-sizes. The `topSort` array specifies the actual order in
112-
/// which loops are generated, thus providing a mapping from `LoopOrd`
113-
/// to `LoopId`.
89+
/// level-sizes.
11490
void initialize(ValueRange tensors, StringAttr loopTag = nullptr,
11591
bool hasOutput = false, bool isSparseOut = false,
11692
unsigned numLoops = 0, DependentLvlGetter getter = nullptr);
@@ -193,21 +169,16 @@ class LoopEmitter {
193169
}
194170

195171
/// Fills the out-parameter with the loop induction variables for all
196-
/// loops in the current loop-stack. The variables are given in the
197-
/// same order as the loop-stack, hence `ivs` should be indexed into
198-
/// by `LoopOrd` (not `LoopId`).
172+
/// loops in the current loop-stack.
199173
SmallVector<Value> getLoopIVs() const {
200174
return llvm::to_vector(getLoopIVsRange());
201175
}
202176

203-
/// Gets the current depth of the loop-stack. The result is given
204-
/// the type `LoopOrd` for the same reason as one-past-the-end iterators.
205-
LoopOrd getCurrentDepth() const {
206-
return llvm::range_size(getLoopIVsRange());
207-
}
177+
/// Gets the current depth of the loop-stack.
178+
LoopId getCurrentDepth() const { return llvm::range_size(getLoopIVsRange()); }
208179

209-
/// Gets loop induction variable for the given `LoopOrd`.
210-
Value getLoopIV(LoopOrd n) const {
180+
/// Gets loop induction variable for the given loop
181+
Value getLoopIV(LoopId n) const {
211182
if (n >= getCurrentDepth())
212183
return Value();
213184
auto it = getLoopIVsRange().begin();

mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,7 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
411411
Location loc = op.getLoc();
412412
// Direct insertion in lexicographic coordinate order.
413413
if (!env.isExpand()) {
414-
const LoopOrd numLoops = op.getRank(t);
414+
const LoopId numLoops = op.getRank(t);
415415
// Retrieves the first `numLoop` induction variables.
416416
SmallVector<Value> ivs = llvm::to_vector(
417417
llvm::drop_end(env.emitter().getLoopIVsRange(),
@@ -713,7 +713,7 @@ static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
713713
}
714714

715715
/// Generates an expanded access pattern in innermost dimension.
716-
static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
716+
static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId at,
717717
bool atStart) {
718718
linalg::GenericOp op = env.op();
719719
OpOperand *lhs = op.getDpsInitOperand(0);
@@ -740,7 +740,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
740740
r.getResult(3));
741741
} else {
742742
SmallVector<Value> indices;
743-
for (LoopOrd i = 0; i < at; i++)
743+
for (LoopId i = 0; i < at; i++)
744744
indices.push_back(env.emitter().getLoopIV(i));
745745
Value values = env.getExpandValues();
746746
Value filled = env.getExpandFilled();
@@ -815,7 +815,7 @@ static Operation *genCoIteration(CodegenEnv &env, OpBuilder &builder,
815815

816816
/// Generates a for-loop or a while-loop, depending on whether it implements
817817
/// singleton iteration or co-iteration over the given conjunction.
818-
static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
818+
static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopId at,
819819
bool needsUniv, ArrayRef<TensorLevel> tidLvls) {
820820
bool tryParallel = shouldTryParallize(env, at, at == 0, tidLvls);
821821
return genCoIteration(env, builder, at, tidLvls, tryParallel, needsUniv);
@@ -943,7 +943,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
943943
/// Starts a loop sequence at given level. Returns true if
944944
/// the universal loop index must be maintained at this level.
945945
static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
946-
LoopOrd idx, LoopId ldx, LatSetId lts) {
946+
LoopId idx, LoopId ldx, LatSetId lts) {
947947
assert(!env.getLoopVar(idx));
948948
// Emit invariants at this loop sequence level.
949949
genInvariants(env, builder, exp, ldx, /*atStart=*/true);
@@ -1127,7 +1127,7 @@ static bool translateBitsToTidLvlPairs(
11271127

11281128
/// Starts a single loop in current sequence.
11291129
static std::pair<Operation *, bool> startLoop(CodegenEnv &env,
1130-
OpBuilder &builder, LoopOrd at,
1130+
OpBuilder &builder, LoopId at,
11311131
LatPointId li, bool needsUniv) {
11321132
// The set of tensors + lvls to generate loops on
11331133
SmallVector<TensorLevel> tidLvls;
@@ -1199,7 +1199,7 @@ static void endLoopSeq(CodegenEnv &env, OpBuilder &builder, unsigned exp,
11991199
/// to manage the complexity of implementing co-iteration over unions
12001200
/// and intersections of sparse iterations spaces.
12011201
static void genStmt(CodegenEnv &env, RewriterBase &rewriter, ExprId exp,
1202-
LoopOrd at) {
1202+
LoopId at) {
12031203
// At each leaf, assign remaining tensor (sub)expression to output tensor.
12041204
if (at == env.getLoopNum()) {
12051205
Value rhs = genExp(env, rewriter, exp);

0 commit comments

Comments
 (0)