Skip to content

Commit a7bf2e5

Browse files
author
Peiming Liu
committed
[mlir][sparse] refactoring isAdmissibleTensorExp into codegen
This patch moves some utils into CodegenEnv class, it should make the code easier to follow and it eliminates several indirect value assignment that use `ptr**`. Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D142040
1 parent c549da9 commit a7bf2e5

File tree

3 files changed

+199
-169
lines changed

3 files changed

+199
-169
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp

Lines changed: 91 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,25 @@
77
//===----------------------------------------------------------------------===//
88

99
#include "CodegenEnv.h"
10+
11+
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
12+
#include "mlir/Dialect/Linalg/Utils/Utils.h"
13+
#include "mlir/Dialect/Tensor/IR/Tensor.h"
1014
#include <optional>
1115

1216
using namespace mlir;
1317
using namespace mlir::sparse_tensor;
1418

19+
//===----------------------------------------------------------------------===//
20+
// Code generation environment helper functions
21+
//===----------------------------------------------------------------------===//
22+
23+
/// Returns true if tensor materializes uninitialized into the computation.
24+
static bool isMaterializing(Value val) {
25+
return val.getDefiningOp<tensor::EmptyOp>() ||
26+
val.getDefiningOp<bufferization::AllocTensorOp>();
27+
}
28+
1529
//===----------------------------------------------------------------------===//
1630
// Code generation environment constructor and general methods
1731
//===----------------------------------------------------------------------===//
@@ -25,11 +39,18 @@ CodegenEnv::CodegenEnv(linalg::GenericOp linop, SparsificationOptions opts,
2539
expFilled(), expAdded(), expCount(), redVal(), redExp(-1u),
2640
redCustom(-1u) {}
2741

28-
void CodegenEnv::startEmit(OpOperand *so, unsigned lv) {
29-
assert(sparseOut == nullptr && insChain == nullptr &&
30-
"must only start emitting once");
31-
sparseOut = so;
32-
outerParNest = lv;
42+
LogicalResult CodegenEnv::initTensorExp() {
43+
// Builds the tensor expression for the Linalg operation in SSA form.
44+
std::optional<unsigned> optExp = latticeMerger.buildTensorExpFromLinalg(op());
45+
if (!optExp || !isAdmissibleTensorExp(*optExp))
46+
return failure();
47+
48+
tensorExp = *optExp;
49+
return success();
50+
}
51+
52+
void CodegenEnv::startEmit() {
53+
assert(insChain == nullptr && "must only start emitting once");
3354
if (sparseOut) {
3455
insChain = sparseOut->get();
3556
latticeMerger.setHasSparseOut(true);
@@ -66,6 +87,71 @@ std::optional<Operation *> CodegenEnv::genLoopBoundary(
6687
return r;
6788
}
6889

90+
//===----------------------------------------------------------------------===//
91+
// Code generation environment verify functions.
92+
//===----------------------------------------------------------------------===//
93+
94+
bool CodegenEnv::isAdmissibleTensorExp(unsigned exp) {
95+
// We reject any expression that makes a reduction from `-outTensor`, as those
96+
// expressions create a dependency between the current iteration (i) and the
97+
// previous iteration (i-1). It would require iterating over the whole
98+
// coordinate space, which prevent exploiting sparsity for faster code.
99+
for (utils::IteratorType it : linalgOp.getIteratorTypesArray()) {
100+
if (it == utils::IteratorType::reduction) {
101+
if (latticeMerger.hasNegateOnOut(exp))
102+
return false;
103+
break;
104+
}
105+
}
106+
107+
OpOperand *lhs = linalgOp.getDpsInitOperand(0);
108+
unsigned tensor = lhs->getOperandNumber();
109+
auto enc = getSparseTensorEncoding(lhs->get().getType());
110+
// An non-annotated output tensor is assumed dense, and becomes a random
111+
// access n-dim memref. Admissible since insertions cannot occur.
112+
if (!enc || enc.isAllDense())
113+
return true;
114+
115+
// A tensor expression with a sparse output tensor that changes its values
116+
// but not its nonzero structure, an operation called "simply dynamic" in
117+
// [Bik96,Ch9], is also admissible without special env.
118+
if (latticeMerger.isSingleCondition(tensor, exp))
119+
return true;
120+
121+
// Accept "truly dynamic" if the output tensor materializes uninitialized
122+
// into the computation and insertions occur in lexicographic index order.
123+
sparseOut = lhs;
124+
return isMaterializing(lhs->get());
125+
}
126+
127+
bool CodegenEnv::isAdmissibleTopoOrder() {
128+
if (!hasSparseOutput())
129+
return true;
130+
131+
OpOperand *lhs = linalgOp.getDpsInitOperand(0);
132+
// Accept "truly dynamic" if the output tensor materializes uninitialized
133+
// into the computation and insertions occur in lexicographic index order.
134+
unsigned nest = 0;
135+
auto iteratorTypes = linalgOp.getIteratorTypesArray();
136+
for (unsigned i = 0, e = latticeMerger.getNumLoops(); i < e; i++) {
137+
if (!latticeMerger.isFilterLoop(topSortAt(i))) {
138+
// We only count non-filter loops as filter loops should be considered
139+
// as a special type of parallel loops.
140+
if (linalg::isReductionIterator(iteratorTypes[topSortAt(i)]))
141+
break; // terminate at first reduction
142+
nest++;
143+
}
144+
}
145+
// Determine admissible dynamic insertion situations:
146+
// (1) fully injective, since there are no reductions,
147+
// (2) admissible 1-d expansion in innermost dimension.
148+
if (nest >= linalgOp.getRank(lhs) - 1) {
149+
outerParNest = nest;
150+
return true;
151+
}
152+
return false;
153+
}
154+
69155
//===----------------------------------------------------------------------===//
70156
// Code generation environment topological sort methods
71157
//===----------------------------------------------------------------------===//

mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,15 @@ class CodegenEnv {
4444
// General methods.
4545
//
4646

47+
LogicalResult initTensorExp();
48+
unsigned getTensorExp() const { return tensorExp; }
49+
4750
linalg::GenericOp op() const { return linalgOp; }
4851
const SparsificationOptions &options() const { return sparseOptions; }
4952
Merger &merger() { return latticeMerger; }
5053
LoopEmitter &emitter() { return loopEmitter; }
5154

52-
void startEmit(OpOperand *so, unsigned lv);
55+
void startEmit();
5356

5457
/// Generates loop boundary statements (entering/exiting loops). The function
5558
/// passes and updates the passed-in parameters.
@@ -72,6 +75,18 @@ class CodegenEnv {
7275
return latticeMerger.getDimLevelType(b);
7376
}
7477

78+
//
79+
// Code generation environment verify functions.
80+
//
81+
82+
/// Whether the tensor expression is admissible for codegen.
83+
/// It also sets the sparseOut if the output tensor is sparse.
84+
bool isAdmissibleTensorExp(unsigned exp);
85+
86+
/// Whether the iteration graph is sorted in admissible topoOrder.
87+
/// Sets outerParNest on success with sparse output
88+
bool isAdmissibleTopoOrder();
89+
7590
//
7691
// Topological delegate and sort methods.
7792
//
@@ -156,6 +171,9 @@ class CodegenEnv {
156171
Value redVal;
157172
unsigned redExp;
158173
unsigned redCustom;
174+
175+
// The root tensor expression of the kernel.
176+
unsigned tensorExp;
159177
};
160178

161179
} // namespace sparse_tensor

0 commit comments

Comments
 (0)