Skip to content

[mlir][sparse] Add more tests and verification for n:m #81186

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Feb 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions mlir/include/mlir-c/Dialect/SparseTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,16 @@ mlirSparseTensorEncodingAttrGetPosWidth(MlirAttribute attr);
MLIR_CAPI_EXPORTED int
mlirSparseTensorEncodingAttrGetCrdWidth(MlirAttribute attr);

MLIR_CAPI_EXPORTED unsigned
mlirSparseTensorEncodingAttrGetStructuredN(MlirSparseTensorLevelType lvlType);

MLIR_CAPI_EXPORTED unsigned
mlirSparseTensorEncodingAttrGetStructuredM(MlirSparseTensorLevelType lvlType);

MLIR_CAPI_EXPORTED MlirSparseTensorLevelType
mlirSparseTensorEncodingAttrBuildLvlType(
enum MlirBaseSparseTensorLevelType lvlType, unsigned n, unsigned m);

#ifdef __cplusplus
}
#endif
Expand Down
38 changes: 37 additions & 1 deletion mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,15 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
py::arg("lvl_to_dim"), py::arg("pos_width"), py::arg("crd_width"),
py::arg("context") = py::none(),
"Gets a sparse_tensor.encoding from parameters.")
.def_classmethod(
"build_level_type",
[](py::object cls, MlirBaseSparseTensorLevelType lvlType, unsigned n,
unsigned m) {
return mlirSparseTensorEncodingAttrBuildLvlType(lvlType, n, m);
},
py::arg("cls"), py::arg("lvl_type"), py::arg("n") = 0,
py::arg("m") = 0,
"Builds a sparse_tensor.encoding.level_type from parameters.")
.def_property_readonly(
"lvl_types",
[](MlirAttribute self) {
Expand Down Expand Up @@ -89,7 +98,34 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
.def_property_readonly("pos_width",
mlirSparseTensorEncodingAttrGetPosWidth)
.def_property_readonly("crd_width",
mlirSparseTensorEncodingAttrGetCrdWidth);
mlirSparseTensorEncodingAttrGetCrdWidth)
.def_property_readonly(
"structured_n",
[](MlirAttribute self) -> unsigned {
const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
return mlirSparseTensorEncodingAttrGetStructuredN(
mlirSparseTensorEncodingAttrGetLvlType(self, lvlRank - 1));
})
.def_property_readonly(
"structured_m",
[](MlirAttribute self) -> unsigned {
const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
return mlirSparseTensorEncodingAttrGetStructuredM(
mlirSparseTensorEncodingAttrGetLvlType(self, lvlRank - 1));
})
.def_property_readonly("lvl_types_enum", [](MlirAttribute self) {
const int lvlRank = mlirSparseTensorEncodingGetLvlRank(self);
std::vector<MlirBaseSparseTensorLevelType> ret;
ret.reserve(lvlRank);
for (int l = 0; l < lvlRank; l++) {
// Convert level type to 32 bits to ignore n and m for n_out_of_m
// format.
ret.push_back(
static_cast<MlirBaseSparseTensorLevelType>(static_cast<uint32_t>(
mlirSparseTensorEncodingAttrGetLvlType(self, l))));
}
return ret;
});
}

PYBIND11_MODULE(_mlirDialectsSparseTensor, m) {
Expand Down
18 changes: 18 additions & 0 deletions mlir/lib/CAPI/Dialect/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,21 @@ int mlirSparseTensorEncodingAttrGetPosWidth(MlirAttribute attr) {
int mlirSparseTensorEncodingAttrGetCrdWidth(MlirAttribute attr) {
return cast<SparseTensorEncodingAttr>(unwrap(attr)).getCrdWidth();
}

MlirSparseTensorLevelType
mlirSparseTensorEncodingAttrBuildLvlType(MlirBaseSparseTensorLevelType lvlType,
unsigned n, unsigned m) {
LevelType lt = static_cast<LevelType>(lvlType);
return static_cast<MlirSparseTensorLevelType>(*buildLevelType(
*getLevelFormat(lt), isOrderedLT(lt), isUniqueLT(lt), n, m));
}

unsigned
mlirSparseTensorEncodingAttrGetStructuredN(MlirSparseTensorLevelType lvlType) {
return getN(static_cast<LevelType>(lvlType));
}

unsigned
mlirSparseTensorEncodingAttrGetStructuredM(MlirSparseTensorLevelType lvlType) {
return getM(static_cast<LevelType>(lvlType));
}
34 changes: 21 additions & 13 deletions mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,22 @@ FailureOr<uint64_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
ERROR_IF(failed(parser.parseOptionalKeyword(&base)),
"expected valid level format (e.g. dense, compressed or singleton)")
uint64_t properties = 0;
SmallVector<unsigned> structure;
SmallVector<unsigned> structured;

if (base.compare("structured") == 0) {
ParseResult res = parser.parseCommaSeparatedList(
mlir::OpAsmParser::Delimiter::OptionalSquare,
[&]() -> ParseResult { return parseStructure(parser, &structure); },
" in block n out of m");
[&]() -> ParseResult { return parseStructured(parser, &structured); },
" in structured n out of m");
FAILURE_IF_FAILED(res)
if (structured.size() != 2) {
parser.emitError(loc, "expected exactly 2 structured sizes");
return failure();
}
if (structured[0] > structured[1]) {
parser.emitError(loc, "expected n <= m in n_out_of_m");
return failure();
}
}

ParseResult res = parser.parseCommaSeparatedList(
Expand All @@ -57,12 +65,8 @@ FailureOr<uint64_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
} else if (base.compare("compressed") == 0) {
properties |= static_cast<uint64_t>(LevelFormat::Compressed);
} else if (base.compare("structured") == 0) {
if (structure.size() != 2) {
parser.emitError(loc, "expected exactly 2 structure sizes");
return failure();
}
properties |= static_cast<uint64_t>(LevelFormat::NOutOfM);
properties |= nToBits(structure[0]) | mToBits(structure[1]);
properties |= nToBits(structured[0]) | mToBits(structured[1]);
} else if (base.compare("loose_compressed") == 0) {
properties |= static_cast<uint64_t>(LevelFormat::LooseCompressed);
} else if (base.compare("singleton") == 0) {
Expand Down Expand Up @@ -95,20 +99,24 @@ ParseResult LvlTypeParser::parseProperty(AsmParser &parser,
}

ParseResult
LvlTypeParser::parseStructure(AsmParser &parser,
SmallVector<unsigned> *structure) const {
LvlTypeParser::parseStructured(AsmParser &parser,
SmallVector<unsigned> *structured) const {
int intVal;
auto loc = parser.getCurrentLocation();
OptionalParseResult intValParseResult = parser.parseOptionalInteger(intVal);
if (intValParseResult.has_value()) {
if (failed(*intValParseResult)) {
parser.emitError(loc, "failed to parse block size");
parser.emitError(loc, "failed to parse structured size");
return failure();
}
if (intVal < 0) {
parser.emitError(loc, "expected structured size to be >= 0");
return failure();
}
structure->push_back(intVal);
structured->push_back(intVal);
return success();
}
parser.emitError(loc, "expected valid integer for block size");
parser.emitError(loc, "expected valid integer for structured size");
return failure();
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ class LvlTypeParser {

private:
ParseResult parseProperty(AsmParser &parser, uint64_t *properties) const;
ParseResult parseStructure(AsmParser &parser,
SmallVector<unsigned> *structure) const;
ParseResult parseStructured(AsmParser &parser,
SmallVector<unsigned> *structured) const;
};

} // namespace ir_detail
Expand Down
31 changes: 31 additions & 0 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,37 @@ LogicalResult SparseTensorEncodingAttr::verify(
return emitError() << "expected all singleton lvlTypes "
"following a singleton level";
}
// TODO: audit formats that actually are supported by backend.
if (auto it = std::find_if(lvlTypes.begin(), lvlTypes.end(), isNOutOfMLT);
it != std::end(lvlTypes)) {
if (it != lvlTypes.end() - 1)
return emitError() << "expected n_out_of_m to be the last level type";
if (!std::all_of(lvlTypes.begin(), it,
[](LevelType i) { return isDenseLT(i); }))
return emitError() << "expected all dense lvlTypes "
"before a n_out_of_m level";
if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
if (!isBlockSparsity(dimToLvl)) {
return emitError()
<< "expected 1xm block structure for n_out_of_m level";
}
auto sizes = getBlockSize(dimToLvl);
unsigned coefficient = 0;
for (const auto &elem : sizes) {
if (elem != 0) {
if (elem != coefficient && coefficient != 0) {
return emitError() << "expected only one blocked level "
"with the same coefficients";
}
coefficient = elem;
}
}
if (coefficient != getM(*it)) {
return emitError() << "expected coeffiencts of Affine expressions "
"to be equal to m of n_out_of_m level";
}
}
}
// Before we can check that the level-rank is consistent/coherent
// across all fields, we need to define it. The source-of-truth for
// the `getLvlRank` method is the length of the level-types array,
Expand Down
106 changes: 106 additions & 0 deletions mlir/test/Dialect/SparseTensor/invalid_encoding.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -315,3 +315,109 @@ func.func private @BSR(%arg0: tensor<?x?xf64, #BSR>) {
func.func private @BSR_explicit(%arg0: tensor<?x?xf64, #BSR_explicit>) {
return
}

// -----

// expected-error@+6 {{expected structured size to be >= 0}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 4 : dense,
j : dense,
k mod 4 : structured[-2, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+6 {{expected n <= m in n_out_of_m}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 4 : dense,
j : dense,
k mod 4 : structured[5, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+1 {{expected all dense lvlTypes before a n_out_of_m level}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 4 : compressed,
j : dense,
k mod 4 : structured[2, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+1 {{expected n_out_of_m to be the last level type}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 4 : structured[2, 4],
j : dense,
k mod 4 : compressed
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+1 {{expected 1xm block structure for n_out_of_m level}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 2 : dense,
j : dense,
k mod 4 : structured[2, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+1 {{expected coeffiencts of Affine expressions to be equal to m of n_out_of_m level}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i : dense,
k floordiv 2 : dense,
j : dense,
k mod 2 : structured[2, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}

// -----

// expected-error@+1 {{expected only one blocked level with the same coefficients}}
#NOutOfM = #sparse_tensor.encoding<{
map = ( i, j, k ) ->
( i floordiv 2 : dense,
i mod 2 : dense,
j : dense,
k floordiv 4 : dense,
k mod 4 : structured[2, 4]
)
}>
func.func private @NOutOfM(%arg0: tensor<?x?x?xf64, #NOutOfM>) {
return
}
22 changes: 22 additions & 0 deletions mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@
crdWidth = 8
}>

#NV_58 = #sparse_tensor.encoding<{
map = ( i, j ) -> ( i : dense,
j floordiv 8 : dense,
j mod 8 : structured[5, 8]),
crdWidth = 8
}>

module {

func.func private @getTensorFilename(index) -> (!Filename)
Expand All @@ -65,6 +72,7 @@ module {
%A1 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #CSR>
%A2 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #CSR_hi>
%A3 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #NV_24>
%A4 = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #NV_58>

//
// CSR:
Expand Down Expand Up @@ -113,10 +121,24 @@ module {
%vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<12xf64>
vector.print %vecv3 : vector<12xf64>

//
// NV_58
//
// CHECK-NEXT: ( 2, 3, 5, 7, 1, 2, 4, 7, 0, 2, 4, 5 )
// CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 )
//
%crd4 = sparse_tensor.coordinates %A4 {level = 2 : index } : tensor<?x?xf64, #NV_58> to memref<?xi8>
%vecc4 = vector.transfer_read %crd4[%c0], %u0 : memref<?xi8>, vector<12xi8>
vector.print %vecc4 : vector<12xi8>
%val4 = sparse_tensor.values %A4 : tensor<?x?xf64, #NV_58> to memref<?xf64>
%vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<12xf64>
vector.print %vecv4 : vector<12xf64>

// Release the resources.
bufferization.dealloc_tensor %A1: tensor<?x?xf64, #CSR>
bufferization.dealloc_tensor %A2: tensor<?x?xf64, #CSR_hi>
bufferization.dealloc_tensor %A3: tensor<?x?xf64, #NV_24>
bufferization.dealloc_tensor %A4: tensor<?x?xf64, #NV_58>

return
}
Expand Down
Loading