Skip to content

[mlir][sparse] Implement parsing n out of m #79935

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 15 commits into from
Feb 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 14 additions & 14 deletions mlir/include/mlir-c/Dialect/SparseTensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,20 +28,20 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor);
typedef uint64_t MlirSparseTensorLevelType;

enum MlirBaseSparseTensorLevelType {
MLIR_SPARSE_TENSOR_LEVEL_DENSE = 4, // 0b00001_00
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 8, // 0b00010_00
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 9, // 0b00010_01
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 10, // 0b00010_10
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 11, // 0b00010_11
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 16, // 0b00100_00
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 17, // 0b00100_01
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 18, // 0b00100_10
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 19, // 0b00100_11
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 32, // 0b01000_00
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 33, // 0b01000_01
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 34, // 0b01000_10
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 35, // 0b01000_11
MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR = 64, // 0b10000_00
MLIR_SPARSE_TENSOR_LEVEL_DENSE = 0x000000010000,
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED = 0x000000020000,
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU = 0x000000020001,
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO = 0x000000020002,
MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO = 0x000000020003,
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON = 0x000000040000,
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU = 0x000000040001,
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO = 0x000000040002,
MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO = 0x000000040003,
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED = 0x000000080000,
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU = 0x000000080001,
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO = 0x000000080002,
MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO = 0x000000080003,
MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M = 0x000000100000,
};

//===----------------------------------------------------------------------===//
Expand Down
205 changes: 128 additions & 77 deletions mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,8 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
- **compressed** : only nonzeros along this level are stored
- **loose_compressed** : as compressed, but allows for free space between regions
- **singleton** : a variant of the compressed format, where coordinates have no siblings
- **block2_4** : the compression uses a 2:4 encoding per 1x4 block
- **structured[n, m]** : the compression uses a n:m encoding
(viz. n out of m consecutive elements are nonzero)

For a compressed level, each position interval is represented in a compact
way with a lowerbound `pos(i)` and an upperbound `pos(i+1) - 1`, which implies
Expand Down Expand Up @@ -374,7 +375,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedLT(getLvlType(l)); }
bool isSingletonLvl(::mlir::sparse_tensor::Level l) const { return isSingletonLT(getLvlType(l)); }
bool isLooseCompressedLvl(::mlir::sparse_tensor::Level l) const { return isLooseCompressedLT(getLvlType(l)); }
bool isTwoOutOfFourLvl(::mlir::sparse_tensor::Level l) const { return is2OutOf4LT(getLvlType(l)); }
bool isNOutOfMLvl(::mlir::sparse_tensor::Level l) const { return isNOutOfMLT(getLvlType(l)); }
bool isOrderedLvl(::mlir::sparse_tensor::Level l) const { return isOrderedLT(getLvlType(l)); }
bool isUniqueLvl(::mlir::sparse_tensor::Level l) const { return isUniqueLT(getLvlType(l)); }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ class SparseTensorType {
return isLooseCompressedLT(getLvlType(l));
}
bool isSingletonLvl(Level l) const { return isSingletonLT(getLvlType(l)); }
bool is2OutOf4Lvl(Level l) const { return is2OutOf4LT(getLvlType(l)); }
bool isNOutOfMLvl(Level l) const { return isNOutOfMLT(getLvlType(l)); }
bool isOrderedLvl(Level l) const { return isOrderedLT(getLvlType(l)); }
bool isUniqueLvl(Level l) const { return isUniqueLT(getLvlType(l)); }
bool isWithPos(Level l) const { return isWithPosLT(getLvlType(l)); }
Expand Down
2 changes: 1 addition & 1 deletion mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ class Merger {
if (isLvlWithNonTrivialIdxExp(b)) {
auto lt = getLoopDependentLevelType(b);
return isCompressedLT(lt) || isSingletonLT(lt) ||
isLooseCompressedLT(lt) || is2OutOf4LT(lt);
isLooseCompressedLT(lt) || isNOutOfMLT(lt);
}
return false;
}
Expand Down
20 changes: 10 additions & 10 deletions mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ class SparseTensorStorageBase {
/// Safely checks if the level uses singleton storage.
bool isSingletonLvl(uint64_t l) const { return isSingletonLT(getLvlType(l)); }

/// Safely checks if the level uses 2 out of 4 storage.
bool is2OutOf4Lvl(uint64_t l) const { return is2OutOf4LT(getLvlType(l)); }
/// Safely checks if the level uses n out of m storage.
bool isNOutOfMLvl(uint64_t l) const { return isNOutOfMLT(getLvlType(l)); }

/// Safely checks if the level is ordered.
bool isOrderedLvl(uint64_t l) const { return isOrderedLT(getLvlType(l)); }
Expand Down Expand Up @@ -450,7 +450,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
void appendCrd(uint64_t lvl, uint64_t full, uint64_t crd) {
if (!isDenseLvl(lvl)) {
assert(isCompressedLvl(lvl) || isLooseCompressedLvl(lvl) ||
isSingletonLvl(lvl) || is2OutOf4Lvl(lvl));
isSingletonLvl(lvl) || isNOutOfMLvl(lvl));
coordinates[lvl].push_back(detail::checkOverflowCast<C>(crd));
} else { // Dense level.
assert(crd >= full && "Coordinate was already filled");
Expand All @@ -473,7 +473,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
return positions[l][parentSz];
if (isLooseCompressedLvl(l))
return positions[l][2 * parentSz - 1];
if (isSingletonLvl(l) || is2OutOf4Lvl(l))
if (isSingletonLvl(l) || isNOutOfMLvl(l))
return parentSz; // new size same as the parent
assert(isDenseLvl(l));
return parentSz * getLvlSize(l);
Expand Down Expand Up @@ -527,7 +527,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
uint64_t pos = coordinates[l].size();
positions[l].insert(positions[l].end(), 2 * count,
detail::checkOverflowCast<P>(pos));
} else if (isSingletonLvl(l) || is2OutOf4Lvl(l)) {
} else if (isSingletonLvl(l) || isNOutOfMLvl(l)) {
return; // Nothing to finalize.
} else { // Dense dimension.
assert(isDenseLvl(l));
Expand Down Expand Up @@ -624,7 +624,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
lvlCursor[l] = static_cast<uint64_t>(coordinatesL[pos]);
toCOO(pos, l + 1, dimCoords);
}
} else if (isSingletonLvl(l) || is2OutOf4Lvl(l)) {
} else if (isSingletonLvl(l) || isNOutOfMLvl(l)) {
assert(parentPos < coordinates[l].size());
lvlCursor[l] = static_cast<uint64_t>(coordinates[l][parentPos]);
toCOO(parentPos, l + 1, dimCoords);
Expand Down Expand Up @@ -721,8 +721,8 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
} else if (isSingletonLvl(l)) {
coordinates[l].reserve(sz);
sz = 1;
} else if (is2OutOf4Lvl(l)) {
assert(l == lvlRank - 1 && "unexpected 2:4 usage");
} else if (isNOutOfMLvl(l)) {
assert(l == lvlRank - 1 && "unexpected n:m usage");
sz = detail::checkedMul(sz, lvlSizes[l]) / 2;
coordinates[l].reserve(sz);
values.reserve(sz);
Expand Down Expand Up @@ -791,8 +791,8 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
}
} else if (isSingletonLvl(l)) {
assert(0 && "general singleton not supported yet");
} else if (is2OutOf4Lvl(l)) {
assert(0 && "2Out4 not supported yet");
} else if (isNOutOfMLvl(l)) {
assert(0 && "n ouf of m not supported yet");
} else {
assert(isDenseLvl(l));
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ using namespace mlir::python::adaptors;
static void populateDialectSparseTensorSubmodule(const py::module &m) {
py::enum_<MlirBaseSparseTensorLevelType>(m, "LevelType", py::module_local())
.value("dense", MLIR_SPARSE_TENSOR_LEVEL_DENSE)
.value("compressed24", MLIR_SPARSE_TENSOR_LEVEL_TWO_OUT_OF_FOUR)
.value("n_out_of_m", MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M)
.value("compressed", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED)
.value("compressed_nu", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU)
.value("compressed_no", MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO)
Expand Down
49 changes: 30 additions & 19 deletions mlir/lib/CAPI/Dialect/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,36 @@ MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(SparseTensor, sparse_tensor,
mlir::sparse_tensor::SparseTensorDialect)

// Ensure the C-API enums are int-castable to C++ equivalents.
static_assert(static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_DENSE) ==
static_cast<int>(LevelType::Dense) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED) ==
static_cast<int>(LevelType::Compressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU) ==
static_cast<int>(LevelType::CompressedNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO) ==
static_cast<int>(LevelType::CompressedNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO) ==
static_cast<int>(LevelType::CompressedNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON) ==
static_cast<int>(LevelType::Singleton) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU) ==
static_cast<int>(LevelType::SingletonNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO) ==
static_cast<int>(LevelType::SingletonNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO) ==
static_cast<int>(LevelType::SingletonNuNo),
"MlirSparseTensorLevelType (C-API) and LevelType (C++) mismatch");
static_assert(
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_DENSE) ==
static_cast<int>(LevelType::Dense) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED) ==
static_cast<int>(LevelType::Compressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU) ==
static_cast<int>(LevelType::CompressedNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NO) ==
static_cast<int>(LevelType::CompressedNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_COMPRESSED_NU_NO) ==
static_cast<int>(LevelType::CompressedNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON) ==
static_cast<int>(LevelType::Singleton) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU) ==
static_cast<int>(LevelType::SingletonNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NO) ==
static_cast<int>(LevelType::SingletonNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_SINGLETON_NU_NO) ==
static_cast<int>(LevelType::SingletonNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED) ==
static_cast<int>(LevelType::LooseCompressed) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU) ==
static_cast<int>(LevelType::LooseCompressedNu) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NO) ==
static_cast<int>(LevelType::LooseCompressedNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_LOOSE_COMPRESSED_NU_NO) ==
static_cast<int>(LevelType::LooseCompressedNuNo) &&
static_cast<int>(MLIR_SPARSE_TENSOR_LEVEL_N_OUT_OF_M) ==
static_cast<int>(LevelType::NOutOfM),
"MlirSparseTensorLevelType (C-API) and LevelType (C++) mismatch");

bool mlirAttributeIsASparseTensorEncodingAttr(MlirAttribute attr) {
return isa<SparseTensorEncodingAttr>(unwrap(attr));
Expand Down
54 changes: 43 additions & 11 deletions mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,21 @@ using namespace mlir::sparse_tensor::ir_detail;
// `LvlTypeParser` implementation.
//===----------------------------------------------------------------------===//

FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
FailureOr<uint64_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
StringRef base;
const auto loc = parser.getCurrentLocation();
ERROR_IF(failed(parser.parseOptionalKeyword(&base)),
"expected valid level format (e.g. dense, compressed or singleton)")
uint8_t properties = 0;
uint64_t properties = 0;
SmallVector<unsigned> structure;

if (base.compare("structured") == 0) {
ParseResult res = parser.parseCommaSeparatedList(
mlir::OpAsmParser::Delimiter::OptionalSquare,
[&]() -> ParseResult { return parseStructure(parser, &structure); },
" in block n out of m");
FAILURE_IF_FAILED(res)
}

ParseResult res = parser.parseCommaSeparatedList(
mlir::OpAsmParser::Delimiter::OptionalParen,
Expand All @@ -44,15 +53,20 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {

// Set the base bit for properties.
if (base.compare("dense") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Dense);
properties |= static_cast<uint64_t>(LevelFormat::Dense);
} else if (base.compare("compressed") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Compressed);
} else if (base.compare("block2_4") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::TwoOutOfFour);
properties |= static_cast<uint64_t>(LevelFormat::Compressed);
} else if (base.compare("structured") == 0) {
if (structure.size() != 2) {
parser.emitError(loc, "expected exactly 2 structure sizes");
return failure();
}
properties |= static_cast<uint64_t>(LevelFormat::NOutOfM);
properties |= nToBits(structure[0]) | mToBits(structure[1]);
} else if (base.compare("loose_compressed") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::LooseCompressed);
properties |= static_cast<uint64_t>(LevelFormat::LooseCompressed);
} else if (base.compare("singleton") == 0) {
properties |= static_cast<uint8_t>(LevelFormat::Singleton);
properties |= static_cast<uint64_t>(LevelFormat::Singleton);
} else {
parser.emitError(loc, "unknown level format: ") << base;
return failure();
Expand All @@ -64,20 +78,38 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
}

ParseResult LvlTypeParser::parseProperty(AsmParser &parser,
uint8_t *properties) const {
uint64_t *properties) const {
StringRef strVal;
auto loc = parser.getCurrentLocation();
ERROR_IF(failed(parser.parseOptionalKeyword(&strVal)),
"expected valid level property (e.g. nonordered, nonunique or high)")
if (strVal.compare("nonunique") == 0) {
*properties |= static_cast<uint8_t>(LevelPropertyNondefault::Nonunique);
*properties |= static_cast<uint64_t>(LevelPropertyNondefault::Nonunique);
} else if (strVal.compare("nonordered") == 0) {
*properties |= static_cast<uint8_t>(LevelPropertyNondefault::Nonordered);
*properties |= static_cast<uint64_t>(LevelPropertyNondefault::Nonordered);
} else {
parser.emitError(loc, "unknown level property: ") << strVal;
return failure();
}
return success();
}

ParseResult
LvlTypeParser::parseStructure(AsmParser &parser,
SmallVector<unsigned> *structure) const {
int intVal;
auto loc = parser.getCurrentLocation();
OptionalParseResult intValParseResult = parser.parseOptionalInteger(intVal);
if (intValParseResult.has_value()) {
if (failed(*intValParseResult)) {
parser.emitError(loc, "failed to parse block size");
return failure();
}
structure->push_back(intVal);
return success();
}
parser.emitError(loc, "expected valid integer for block size");
return failure();
}

//===----------------------------------------------------------------------===//
6 changes: 4 additions & 2 deletions mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ namespace ir_detail {
class LvlTypeParser {
public:
LvlTypeParser() = default;
FailureOr<uint8_t> parseLvlType(AsmParser &parser) const;
FailureOr<uint64_t> parseLvlType(AsmParser &parser) const;

private:
ParseResult parseProperty(AsmParser &parser, uint8_t *properties) const;
ParseResult parseProperty(AsmParser &parser, uint64_t *properties) const;
ParseResult parseStructure(AsmParser &parser,
SmallVector<unsigned> *structure) const;
};

} // namespace ir_detail
Expand Down
16 changes: 14 additions & 2 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -613,16 +613,28 @@ void SparseTensorEncodingAttr::printDimensions(
}
}

std::string getNOutOfMString(LevelType lt) {
if (isNOutOfMLT(lt)) {
unsigned n = getN(lt);
unsigned m = getM(lt);
auto output = "[" + std::to_string(n) + ", " + std::to_string(m) + "]";
return output;
}
return "";
}

void SparseTensorEncodingAttr::printLevels(AffineMap &map, AsmPrinter &printer,
ArrayRef<LevelType> lvlTypes) const {
for (unsigned i = 0, n = map.getNumResults() - 1; i < n; i++) {
map.getResult(i).print(printer.getStream());
printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
printer << " : " << toMLIRString(lvlTypes[i])
<< getNOutOfMString(lvlTypes[i]) << ", ";
}
if (map.getNumResults() >= 1) {
auto lastIndex = map.getNumResults() - 1;
map.getResult(lastIndex).print(printer.getStream());
printer << " : " << toMLIRString(lvlTypes[lastIndex]);
printer << " : " << toMLIRString(lvlTypes[lastIndex])
<< getNOutOfMString(lvlTypes[lastIndex]);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ static bool isAdmissibleBSR(SparseTensorType &aTp) {
/// Test for 2:4 matrix with suitable metadata.
static bool isAdmissible24(SparseTensorType &aTp) {
return aTp.getDimRank() == 2 && aTp.getLvlRank() == 3 && aTp.isDenseLvl(0) &&
aTp.isDenseLvl(1) && aTp.is2OutOf4Lvl(2) && isAdmissibleMetaData(aTp);
aTp.isDenseLvl(1) && aTp.isNOutOfMLvl(2) && isAdmissibleMetaData(aTp);
}

/// Test for conversion into 2:4 matrix.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ static void allocSchemeForRank(OpBuilder &builder, Location loc,
createPushback(builder, loc, desc, SparseTensorFieldKind::PosMemRef, lvl,
/*value=*/posZero, /*repeat=*/linear);
return;
} else if (isSingletonLT(lt) || is2OutOf4LT(lt)) {
} else if (isSingletonLT(lt) || isNOutOfMLT(lt)) {
return; // nothing to do
}
// Keep compounding the size, but nothing needs to be initialized
Expand Down Expand Up @@ -409,7 +409,7 @@ static void genEndInsert(OpBuilder &builder, Location loc,
}
} else {
assert(isDenseLT(lt) || isLooseCompressedLT(lt) || isSingletonLT(lt) ||
is2OutOf4LT(lt));
isNOutOfMLT(lt));
}
}
}
Expand Down Expand Up @@ -488,7 +488,7 @@ class SparseInsertGenerator
}
parentPos =
genCompressed(builder, loc, desc, coords, value, parentPos, lvl);
} else if (isSingletonLT(lt) || is2OutOf4LT(lt)) {
} else if (isSingletonLT(lt) || isNOutOfMLT(lt)) {
// Create:
// coordinates[lvl].push_back(coords[lvl])
// positions[lvl] = positions[lvl-1]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -891,7 +891,7 @@ static scf::IfOp genIf(CodegenEnv &env, OpBuilder &builder, LoopId curr,
assert(curr == env.merger().loop(b));
Value clause;
if (isCompressedLT(lt) || isSingletonLT(lt) ||
isLooseCompressedLT(lt) || is2OutOf4LT(lt)) {
isLooseCompressedLT(lt) || isNOutOfMLT(lt)) {
assert(lvl.has_value());
const Value crd = env.emitter().getCoord(tid, *lvl);
const Value lvar = env.getLoopVar(curr);
Expand Down
Loading