Skip to content

[mlir][sparse] remove filter-loop based algorithm support to handle a… #71840

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 2 additions & 9 deletions mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,6 @@ struct SparseCompilerOptions
"any-storage-any-loop",
"Enable sparse parallelization for any storage and loop."))};

PassOptions::Option<bool> enableIndexReduction{
*this, "enable-index-reduction",
desc("Enable dependent index reduction based algorithm to handle "
"non-trivial index expressions on sparse inputs (experimental "
"features)"),
init(false)};

PassOptions::Option<bool> enableRuntimeLibrary{
*this, "enable-runtime-library",
desc("Enable runtime library for manipulating sparse tensors"),
Expand Down Expand Up @@ -151,8 +144,8 @@ struct SparseCompilerOptions

/// Projects out the options for `createSparsificationPass`.
SparsificationOptions sparsificationOptions() const {
return SparsificationOptions(parallelization, enableIndexReduction,
enableGPULibgen, enableRuntimeLibrary);
return SparsificationOptions(parallelization, enableGPULibgen,
enableRuntimeLibrary);
}

/// Projects out the options for `createConvertVectorToLLVMPass`.
Expand Down
11 changes: 5 additions & 6 deletions mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,14 @@ std::unique_ptr<Pass> createPreSparsificationRewritePass();

/// Options for the Sparsification pass.
struct SparsificationOptions {
SparsificationOptions(SparseParallelizationStrategy p, bool idxReduc,
bool gpuLibgen, bool enableRT)
: parallelizationStrategy(p), enableIndexReduction(idxReduc),
enableGPULibgen(gpuLibgen), enableRuntimeLibrary(enableRT) {}
SparsificationOptions(SparseParallelizationStrategy p, bool gpuLibgen,
bool enableRT)
: parallelizationStrategy(p), enableGPULibgen(gpuLibgen),
enableRuntimeLibrary(enableRT) {}
SparsificationOptions()
: SparsificationOptions(SparseParallelizationStrategy::kNone, false,
false, true) {}
true) {}
SparseParallelizationStrategy parallelizationStrategy;
bool enableIndexReduction;
bool enableGPULibgen;
bool enableRuntimeLibrary;
};
Expand Down
3 changes: 0 additions & 3 deletions mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,6 @@ def SparsificationPass : Pass<"sparsification", "ModuleOp"> {
];
// TODO(57514): These enum options are duplicated in Passes.h.
let options = [
Option<"enableIndexReduction", "enable-index-reduction", "bool",
"false",
"Enable dependent index reduction based algorithm to handle non-trivial index expressions on sparse inputs (experimental features)">,
Option<"parallelization", "parallelization-strategy", "mlir::SparseParallelizationStrategy",
"mlir::SparseParallelizationStrategy::kNone",
"Set the parallelization strategy", [{llvm::cl::values(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,15 @@ struct SparsificationPass
SparsificationPass(const SparsificationPass &pass) = default;
SparsificationPass(const SparsificationOptions &options) {
parallelization = options.parallelizationStrategy;
enableIndexReduction = options.enableIndexReduction;
enableGPULibgen = options.enableGPULibgen;
enableRuntimeLibrary = options.enableRuntimeLibrary;
}

void runOnOperation() override {
auto *ctx = &getContext();
// Translate strategy flags to strategy options.
SparsificationOptions options(parallelization, enableIndexReduction,
enableGPULibgen, enableRuntimeLibrary);
SparsificationOptions options(parallelization, enableGPULibgen,
enableRuntimeLibrary);
// Apply GPU libgen (if requested), sparsification, and cleanup rewriting.
RewritePatternSet patterns(ctx);
if (enableGPULibgen)
Expand Down
8 changes: 6 additions & 2 deletions mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -323,12 +323,16 @@ static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
return true;
}
case AffineExprKind::Constant:
// TODO: Support Constant AffineExp for slice-based codegen
case AffineExprKind::Mul: {
// TODO: Support index expression like `2 * d0`, we now only support more
// complicated cases like `2 * d0 + d1`.
if (!isSubExp)
return false;

// TODO: Support Constant AffineExp for slice-based codegen
if (a.isa<AffineConstantExpr>())
llvm_unreachable("Not yet implemented");

auto binOp = a.cast<AffineBinaryOpExpr>();
auto lhs = binOp.getLHS(), rhs = binOp.getRHS();
if (rhs.isa<AffineConstantExpr>())
Expand Down Expand Up @@ -1953,7 +1957,7 @@ struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> {
const unsigned numFilterLoops = getNumNonTrivialIdxExpOnSparseLvls(op);
// TODO: we should probably always use slice-based codegen whenever
// possible, we can even intermix slice-based and filter-loop based codegen.
bool idxReducBased = options.enableIndexReduction && numFilterLoops != 0;
bool idxReducBased = numFilterLoops != 0;
// If we have indexing map like (d0) -> (0, d0), there might be more
// levels then loops because of the constant index, that means we can not
// use numLoops as the upper bound for ranks of all tensors.
Expand Down
Loading