Skip to content

[mlir][sparse] Add has_runtime_library test op #85355

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1419,7 +1419,7 @@ def SparseTensor_ForeachOp : SparseTensor_Op<"foreach",
}

//===----------------------------------------------------------------------===//
// Sparse Tensor Debugging Operations.
// Sparse Tensor Debugging and Test-Only Operations.
//===----------------------------------------------------------------------===//

def SparseTensor_PrintOp : SparseTensor_Op<"print">,
Expand All @@ -1440,4 +1440,15 @@ def SparseTensor_PrintOp : SparseTensor_Op<"print">,
let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
}

def SparseTensor_HasRuntimeLibraryOp
: SparseTensor_Op<"has_runtime_library", []>, Results<(outs I1:$result)> {
string summary = "Indicates whether running in runtime/codegen mode";
string description = [{
Returns a boolean value that indicates whether the sparse compiler runs in
runtime library mode or not. For testing only: This op is useful for writing
test cases that require different IR depending on runtime/codegen mode.
}];
let assemblyFormat = "attr-dict";
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm a bit concerned about "test only" ops mixed up in the non-test dialects, and how this gets into "test patterns" mixed into the normal compilation flow.

Is the TODO in the test indicative that this will be reverted soon?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I plan to revert this once the buffer deallocation pass is working properly with the sparse compiler. The plan is to fix all memory leaks first, so that I can easily spot issues/regressions while working on the buffer deallocation pass.

I could move the op to Dialect/Test/TestOps.td in the mean time and rename it to test.sparse_tensor.has_runtime_library. But so far we don't seem to have dialect-specific test ops in there. Alternatively, I could also rename it to sparse_tensor.test.has_runtime_library.


#endif // SPARSETENSOR_OPS
43 changes: 28 additions & 15 deletions mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1561,6 +1561,19 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
}
};

struct SparseHasRuntimeLibraryConverter
: public OpConversionPattern<HasRuntimeLibraryOp> {
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(HasRuntimeLibraryOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto i1Type = rewriter.getI1Type();
rewriter.replaceOpWithNewOp<arith::ConstantOp>(
op, i1Type, rewriter.getIntegerAttr(i1Type, 0));
return success();
}
};

} // namespace

//===----------------------------------------------------------------------===//
Expand All @@ -1572,21 +1585,21 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
void mlir::populateSparseTensorCodegenPatterns(
TypeConverter &typeConverter, RewritePatternSet &patterns,
bool createSparseDeallocs, bool enableBufferInitialization) {
patterns.add<SparseAssembleOpConverter, SparseDisassembleOpConverter,
SparseReturnConverter, SparseCallConverter, SparseLvlOpConverter,
SparseCastConverter, SparseExtractSliceConverter,
SparseTensorLoadConverter, SparseExpandConverter,
SparseCompressConverter, SparseInsertConverter,
SparseReorderCOOConverter, SparseReMapConverter,
SparseSliceGetterOpConverter<ToSliceOffsetOp,
StorageSpecifierKind::DimOffset>,
SparseSliceGetterOpConverter<ToSliceStrideOp,
StorageSpecifierKind::DimStride>,
SparseToPositionsConverter, SparseToCoordinatesConverter,
SparseToCoordinatesBufferConverter, SparseToValuesConverter,
SparseConvertConverter, SparseNewConverter,
SparseNumberOfEntriesConverter>(typeConverter,
patterns.getContext());
patterns.add<
SparseAssembleOpConverter, SparseDisassembleOpConverter,
SparseReturnConverter, SparseCallConverter, SparseLvlOpConverter,
SparseCastConverter, SparseExtractSliceConverter,
SparseTensorLoadConverter, SparseExpandConverter, SparseCompressConverter,
SparseInsertConverter, SparseReorderCOOConverter, SparseReMapConverter,
SparseSliceGetterOpConverter<ToSliceOffsetOp,
StorageSpecifierKind::DimOffset>,
SparseSliceGetterOpConverter<ToSliceStrideOp,
StorageSpecifierKind::DimStride>,
SparseToPositionsConverter, SparseToCoordinatesConverter,
SparseToCoordinatesBufferConverter, SparseToValuesConverter,
SparseConvertConverter, SparseNewConverter,
SparseNumberOfEntriesConverter, SparseHasRuntimeLibraryConverter>(
typeConverter, patterns.getContext());
patterns.add<SparseTensorDeallocConverter>(
typeConverter, patterns.getContext(), createSparseDeallocs);
patterns.add<SparseTensorAllocConverter, SparseTensorEmptyConverter>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -840,6 +840,19 @@ class SparseTensorDisassembleConverter
}
};

struct SparseHasRuntimeLibraryConverter
: public OpConversionPattern<HasRuntimeLibraryOp> {
using OpConversionPattern::OpConversionPattern;
LogicalResult
matchAndRewrite(HasRuntimeLibraryOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto i1Type = rewriter.getI1Type();
rewriter.replaceOpWithNewOp<arith::ConstantOp>(
op, i1Type, rewriter.getIntegerAttr(i1Type, 1));
return success();
}
};

} // namespace

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -868,6 +881,7 @@ void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter,
SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
SparseTensorLoadConverter, SparseTensorInsertConverter,
SparseTensorExpandConverter, SparseTensorCompressConverter,
SparseTensorAssembleConverter, SparseTensorDisassembleConverter>(
typeConverter, patterns.getContext());
SparseTensorAssembleConverter, SparseTensorDisassembleConverter,
SparseHasRuntimeLibraryConverter>(typeConverter,
patterns.getContext());
}
Original file line number Diff line number Diff line change
Expand Up @@ -140,10 +140,19 @@ module {
sparse_tensor.print %s1 : tensor<4x3x2xf32, #BatchedCSR>
sparse_tensor.print %s2 : tensor<4x3x2xf32, #CSRDense>

// FIXME: doing this explicitly crashes runtime
// bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
// bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>
// bufferization.dealloc_tensor %s2 : tensor<4x3x2xf32, #CSRDense>
// TODO: This check is no longer needed once the codegen path uses the
// buffer deallocation pass. "dealloc_tensor" turn into a no-op in the
// codegen path.
%has_runtime = sparse_tensor.has_runtime_library
scf.if %has_runtime {
// sparse_tensor.assemble copies buffers when running with the runtime
// library. Deallocations are needed not needed when running in codgen
// mode.
bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>
bufferization.dealloc_tensor %s2 : tensor<4x3x2xf32, #CSRDense>
}

return
}
}