Skip to content

[mlir][sparse] code cleanup using the assumption that dim2lvl maps ar… #72894

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 15 additions & 19 deletions mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -745,8 +745,8 @@ class SparseTensorAllocConverter
const auto resType = getSparseTensorType(op);
if (!resType.hasEncoding())
return failure();
Location loc = op.getLoc();

Location loc = op.getLoc();
// Deal with copy.
if (op.getCopy()) {
auto desc = getDescriptorFromTensorTuple(adaptor.getCopy());
Expand All @@ -768,16 +768,14 @@ class SparseTensorAllocConverter
return success();
}

// Construct the dim/lvl sizes and the (unused) dim2lvl/lvl2dim buffers.
SmallVector<Value> dimSizesValues;
if (!resType.isIdentity()) {
return rewriter.notifyMatchFailure(
op, "try run --sparse-reinterpret-map before codegen");
}
// Level size equals to dimension size since lvl2dim map is an identity map.
SmallVector<Value> lvlSizesValues;
Value dimSizesBuffer;
Value dim2lvlBuffer;
Value lvl2dimBuffer;
createDimSizes(rewriter, loc, resType, adaptor.getDynamicSizes(),
dimSizesValues);
genMapBuffers(rewriter, loc, resType, dimSizesValues, dimSizesBuffer,
lvlSizesValues, dim2lvlBuffer, lvl2dimBuffer);
/*dimSizesValues=*/lvlSizesValues);

// Construct allocation for each field.
Value sizeHint = op.getSizeHint();
Expand Down Expand Up @@ -809,19 +807,17 @@ class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
const auto resType = getSparseTensorType(op);
if (!resType.hasEncoding())
return failure();
Location loc = op.getLoc();

// Construct the dim/lvl sizes and the (unused) dim2lvl/lvl2dim buffers.
SmallVector<Value> dimSizesValues;
if (!resType.isIdentity()) {
return rewriter.notifyMatchFailure(
op, "try run --sparse-reinterpret-map before codegen");
}

Location loc = op.getLoc();
// Level size equals to dimension size since lvl2dim map is an identity map.
SmallVector<Value> lvlSizesValues;
Value dimSizesBuffer;
Value dim2lvlBuffer;
Value lvl2dimBuffer;
createDimSizes(rewriter, loc, resType, adaptor.getDynamicSizes(),
dimSizesValues);
genMapBuffers(rewriter, loc, resType, dimSizesValues, dimSizesBuffer,
lvlSizesValues, dim2lvlBuffer, lvl2dimBuffer);

/*dimSizesValues=*/lvlSizesValues);
// Construct allocation for each field.
Value sizeHint; // none
SmallVector<Value> fields;
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/SparseTensor/codegen.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s --lower-sparse-ops-to-foreach --lower-sparse-foreach-to-scf --sparse-tensor-codegen --canonicalize -cse | FileCheck %s
// RUN: mlir-opt %s --lower-sparse-ops-to-foreach --lower-sparse-foreach-to-scf --sparse-reinterpret-map --sparse-tensor-codegen --canonicalize -cse | FileCheck %s

#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>

Expand Down