Skip to content

[mlir][sparse] Support pretty print to debug sparse iteration. #80207

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Feb 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -95,13 +95,15 @@ struct SparsificationPass
SparsificationPass(const SparsificationPass &pass) = default;
SparsificationPass(const SparsificationOptions &options) {
parallelization = options.parallelizationStrategy;
sparseEmitStrategy = options.sparseEmitStrategy;
enableRuntimeLibrary = options.enableRuntimeLibrary;
}

void runOnOperation() override {
auto *ctx = &getContext();
// Translate strategy flags to strategy options.
SparsificationOptions options(parallelization, enableRuntimeLibrary);
SparsificationOptions options(parallelization, sparseEmitStrategy,
enableRuntimeLibrary);
// Apply sparsification and cleanup rewriting.
RewritePatternSet patterns(ctx);
populateSparsificationPatterns(patterns, options);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1369,7 +1369,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
return failure();

// Recursively generates code if admissible.
env.startEmit();
env.startEmit(options.sparseEmitStrategy);
genBuffers(env, rewriter);
// TODO: Constant affine expression should be handled differently when using
// slice-based codegen, it does not matter now because we already reject the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ LogicalResult CodegenEnv::initTensorExp() {
return success();
}

void CodegenEnv::startEmit() {
void CodegenEnv::startEmit(SparseEmitStrategy emitStrategy) {
assert(insChain == nullptr && "must only start emitting once");
if (sparseOut) {
insChain = sparseOut->get();
Expand Down Expand Up @@ -96,7 +96,8 @@ void CodegenEnv::startEmit() {
/*dependentLvlGetter=*/
[this](TensorId t, Level lvl) -> std::vector<LoopCoeffPair> {
return merger().getDependentLoops(t, lvl);
});
},
emitStrategy);
}

std::optional<Operation *> CodegenEnv::genLoopBoundary(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class CodegenEnv {
Merger &merger() { return latticeMerger; }
LoopEmitter &emitter() { return loopEmitter; }

void startEmit();
void startEmit(SparseEmitStrategy emitStrategy);

/// Generates loop boundary statements (entering/exiting loops). The function
/// passes and updates the passed-in parameters.
Expand Down
13 changes: 8 additions & 5 deletions mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,17 +81,20 @@ static Value genSliceStride(OpBuilder &builder, Location loc, Value tensor,

LoopEmitter::LoopEmitter(ValueRange tensors, StringAttr loopTag, bool hasOutput,
bool isSparseOut, unsigned numLoops,
DependentLvlGetter dimGetter) {
DependentLvlGetter dimGetter,
SparseEmitStrategy emitStrategy) {
initialize(tensors, loopTag, hasOutput, isSparseOut, numLoops, dimGetter);
}

void LoopEmitter::initialize(ValueRange ts, StringAttr loopTag, bool hasOutput,
bool isSparseOut, unsigned numLoops,
DependentLvlGetter dimGetter) {
DependentLvlGetter dimGetter,
SparseEmitStrategy emitStrategy) {
// First initialize the top-level type of the fields.
this->loopTag = loopTag;
this->hasOutput = hasOutput;
this->isSparseOut = isSparseOut;
SparseIterator::setSparseEmitStrategy(emitStrategy);

const unsigned numManifestTensors = ts.size();
const unsigned synTensorId = numManifestTensors;
Expand Down Expand Up @@ -169,7 +172,7 @@ LoopEmitter::makeLevelIterator(OpBuilder &builder, Location loc, TensorId t,
Value offset = genSliceOffset(builder, loc, tensors[t], l);
Value stride = genSliceStride(builder, loc, tensors[t], l);
auto slicedIt = makeSlicedLevelIterator(std::move(it), offset, stride,
lvls[t][l]->size());
lvls[t][l]->getSize());
return slicedIt;
}
return it;
Expand Down Expand Up @@ -465,7 +468,7 @@ std::pair<Operation *, Value> LoopEmitter::emitWhileLoopOverTensorsAtLvls(

// Construct the while-loop with a parameter for each coordinate.
for (SparseIterator *it : spIters) {
ValueRange itVals = it->getItVals();
ValueRange itVals = it->getCursor();
ivs.append(itVals.begin(), itVals.end());
}

Expand Down Expand Up @@ -724,7 +727,7 @@ void LoopEmitter::exitWhileLoop(OpBuilder &builder, Location loc,
// Forward the sparse iterator.
Value cmp = CMPI(eq, it.getCrd(), iv);
it.forwardIf(builder, loc, cmp);
operands.append(it.getItVals().begin(), it.getItVals().end());
operands.append(it.getCursor().begin(), it.getCursor().end());
// const Value newPos = whileOp->getResult(o++);
// Following loops continue iteration from the break point of the
// current while loop.
Expand Down
20 changes: 12 additions & 8 deletions mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

#include "mlir/Dialect/SparseTensor/IR/Enums.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/SparseTensor/Utils/Merger.h"
#include "mlir/IR/PatternMatch.h"

Expand Down Expand Up @@ -84,14 +85,17 @@ class LoopEmitter {
/// `isSparseOut` indicates that the sparse output tensor is empty,
/// so the loop emitter will generate loops over it according to the
/// level-sizes.
void initialize(ValueRange tensors, StringAttr loopTag = nullptr,
bool hasOutput = false, bool isSparseOut = false,
unsigned numLoops = 0, DependentLvlGetter getter = nullptr);

explicit LoopEmitter(ValueRange tensors, StringAttr loopTag = nullptr,
bool hasOutput = false, bool isSparseOut = false,
unsigned numLoops = 0,
DependentLvlGetter getter = nullptr);
void
initialize(ValueRange tensors, StringAttr loopTag = nullptr,
bool hasOutput = false, bool isSparseOut = false,
unsigned numLoops = 0, DependentLvlGetter getter = nullptr,
SparseEmitStrategy emitStrategy = SparseEmitStrategy::kFunctional);

explicit LoopEmitter(
ValueRange tensors, StringAttr loopTag = nullptr, bool hasOutput = false,
bool isSparseOut = false, unsigned numLoops = 0,
DependentLvlGetter getter = nullptr,
SparseEmitStrategy emitStrategy = SparseEmitStrategy::kFunctional);

/// Starts a loop emitting session by generating all the buffers needed
/// for iterating over the tensors.
Expand Down
Loading