Skip to content

Commit 1dd387e

Browse files
authored
[mlir][sparse] change dim level type -> level type (#73058)
The "dimension" before "level" does not really make sense Note that renaming the actual type DimLevelType to LevelType is still TBD, since this is an externally visible change (e.g. visible to Python API).
1 parent e07fec1 commit 1dd387e

File tree

18 files changed

+361
-365
lines changed

18 files changed

+361
-365
lines changed

mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h

Lines changed: 161 additions & 163 deletions
Large diffs are not rendered by default.

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -370,13 +370,13 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
370370
/// are always all-dense.)
371371
::mlir::sparse_tensor::DimLevelType getLvlType(::mlir::sparse_tensor::Level l) const;
372372

373-
bool isDenseLvl(::mlir::sparse_tensor::Level l) const { return isDenseDLT(getLvlType(l)); }
374-
bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedDLT(getLvlType(l)); }
375-
bool isSingletonLvl(::mlir::sparse_tensor::Level l) const { return isSingletonDLT(getLvlType(l)); }
376-
bool isLooseCompressedLvl(::mlir::sparse_tensor::Level l) const { return isLooseCompressedDLT(getLvlType(l)); }
377-
bool isTwoOutOfFourLvl(::mlir::sparse_tensor::Level l) const { return is2OutOf4DLT(getLvlType(l)); }
378-
bool isOrderedLvl(::mlir::sparse_tensor::Level l) const { return isOrderedDLT(getLvlType(l)); }
379-
bool isUniqueLvl(::mlir::sparse_tensor::Level l) const { return isUniqueDLT(getLvlType(l)); }
373+
bool isDenseLvl(::mlir::sparse_tensor::Level l) const { return isDenseLT(getLvlType(l)); }
374+
bool isCompressedLvl(::mlir::sparse_tensor::Level l) const { return isCompressedLT(getLvlType(l)); }
375+
bool isSingletonLvl(::mlir::sparse_tensor::Level l) const { return isSingletonLT(getLvlType(l)); }
376+
bool isLooseCompressedLvl(::mlir::sparse_tensor::Level l) const { return isLooseCompressedLT(getLvlType(l)); }
377+
bool isTwoOutOfFourLvl(::mlir::sparse_tensor::Level l) const { return is2OutOf4LT(getLvlType(l)); }
378+
bool isOrderedLvl(::mlir::sparse_tensor::Level l) const { return isOrderedLT(getLvlType(l)); }
379+
bool isUniqueLvl(::mlir::sparse_tensor::Level l) const { return isUniqueLT(getLvlType(l)); }
380380

381381
/// Returns true if every level is dense. Also returns true for
382382
/// the null encoding (since dense-tensors are always all-dense).

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ class StorageLayout {
126126
void foreachField(
127127
llvm::function_ref<bool(
128128
FieldIndex /*fieldIdx*/, SparseTensorFieldKind /*fieldKind*/,
129-
Level /*lvl (if applicable)*/, DimLevelType /*DLT (if applicable)*/)>)
129+
Level /*lvl (if applicable)*/, DimLevelType /*LT (if applicable)*/)>)
130130
const;
131131

132132
/// Gets the field index for required field.

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorType.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -293,17 +293,17 @@ class SparseTensorType {
293293

294294
// We can't just delegate these, since we want to use this class's
295295
// `getLvlType` method instead of STEA's.
296-
bool isDenseLvl(Level l) const { return isDenseDLT(getLvlType(l)); }
297-
bool isCompressedLvl(Level l) const { return isCompressedDLT(getLvlType(l)); }
296+
bool isDenseLvl(Level l) const { return isDenseLT(getLvlType(l)); }
297+
bool isCompressedLvl(Level l) const { return isCompressedLT(getLvlType(l)); }
298298
bool isLooseCompressedLvl(Level l) const {
299-
return isLooseCompressedDLT(getLvlType(l));
299+
return isLooseCompressedLT(getLvlType(l));
300300
}
301-
bool isSingletonLvl(Level l) const { return isSingletonDLT(getLvlType(l)); }
302-
bool is2OutOf4Lvl(Level l) const { return is2OutOf4DLT(getLvlType(l)); }
303-
bool isOrderedLvl(Level l) const { return isOrderedDLT(getLvlType(l)); }
304-
bool isUniqueLvl(Level l) const { return isUniqueDLT(getLvlType(l)); }
305-
bool isWithPos(Level l) const { return isWithPosDLT(getLvlType(l)); }
306-
bool isWithCrd(Level l) const { return isWithCrdDLT(getLvlType(l)); }
301+
bool isSingletonLvl(Level l) const { return isSingletonLT(getLvlType(l)); }
302+
bool is2OutOf4Lvl(Level l) const { return is2OutOf4LT(getLvlType(l)); }
303+
bool isOrderedLvl(Level l) const { return isOrderedLT(getLvlType(l)); }
304+
bool isUniqueLvl(Level l) const { return isUniqueLT(getLvlType(l)); }
305+
bool isWithPos(Level l) const { return isWithPosLT(getLvlType(l)); }
306+
bool isWithCrd(Level l) const { return isWithCrdLT(getLvlType(l)); }
307307

308308
/// Returns the coordinate-overhead bitwidth, defaulting to zero.
309309
unsigned getCrdWidth() const { return enc ? enc.getCrdWidth() : 0; }

mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ using LatPointId = unsigned;
5757
using LatSetId = unsigned;
5858

5959
/// A pair of level and its corresponding DimLevelType of a tensor.
60-
using LvlDLTPair = std::pair<Level, DimLevelType>;
60+
using LvlLTPair = std::pair<Level, DimLevelType>;
6161

6262
/// A pair of loop id and its coefficients. E.g., for affine expression in the
6363
/// affine map `2 * d0`, loop id = 0, coefficient = 2.
@@ -422,9 +422,9 @@ class Merger {
422422

423423
/// Sets the level number and level-type of the `t`th tensor on
424424
/// `i`th loop.
425-
void setLevelAndType(TensorId t, LoopId i, Level lvl, DimLevelType dlt) {
426-
assert(isValidLevel(t, lvl) && isValidLoopId(i) && isValidDLT(dlt));
427-
lvlTypes[t][i] = dlt;
425+
void setLevelAndType(TensorId t, LoopId i, Level lvl, DimLevelType lt) {
426+
assert(isValidLevel(t, lvl) && isValidLoopId(i) && isValidLT(lt));
427+
lvlTypes[t][i] = lt;
428428
loopToLvl[t][i] = lvl;
429429
lvlToLoop[t][lvl] = i;
430430
// TODO: favor a constant loop bound when there are multiple choices.
@@ -467,12 +467,12 @@ class Merger {
467467
/// Sets whether the output tensor is sparse or not.
468468
void setHasSparseOut(bool s) { hasSparseOut = s; }
469469

470-
/// Establishes the two-way map that i <-> <t, lvl, dlt>.
470+
/// Establishes the two-way map that i <-> <t, lvl, lt>.
471471
void setLoopDependentTensorLevel(LoopId i, TensorId t, Level lvl,
472-
DimLevelType dlt, unsigned coefficient) {
472+
DimLevelType lt, unsigned coefficient) {
473473
assert(isValidLoopId(i) && isValidLevel(t, lvl));
474474
assert(!loopToUnresolvedLvls[i][t].has_value()); // must be the first def
475-
loopToUnresolvedLvls[i][t] = std::make_pair(lvl, dlt);
475+
loopToUnresolvedLvls[i][t] = std::make_pair(lvl, lt);
476476
levelToDependentLoop[t][lvl].emplace_back(i, coefficient);
477477
}
478478

@@ -508,9 +508,9 @@ class Merger {
508508
/// non-trivial index expression.
509509
bool isSparseLvlWithNonTrivialIdxExp(TensorLoopId b) const {
510510
if (isLvlWithNonTrivialIdxExp(b)) {
511-
auto dlt = getLoopDependentLevelType(b);
512-
return isCompressedDLT(dlt) || isSingletonDLT(dlt) ||
513-
isLooseCompressedDLT(dlt) || is2OutOf4DLT(dlt);
511+
auto lt = getLoopDependentLevelType(b);
512+
return isCompressedLT(lt) || isSingletonLT(lt) ||
513+
isLooseCompressedLT(lt) || is2OutOf4LT(lt);
514514
}
515515
return false;
516516
}
@@ -647,9 +647,9 @@ class Merger {
647647
/// Map from a loop to its dependencies if any.
648648
/// The dependencies of a loop is a set of (tensor, level) pairs.
649649
/// It is currently only set for non-trivial index expressions.
650-
/// E.g., A[i+j] => i and j will have dependencies {A0, dlt(A0)} to indicate
650+
/// E.g., A[i+j] => i and j will have dependencies {A0, lt(A0)} to indicate
651651
/// that i and j are used in the non-trivial index expression on A0.
652-
std::vector<std::vector<std::optional<LvlDLTPair>>> loopToUnresolvedLvls;
652+
std::vector<std::vector<std::optional<LvlLTPair>>> loopToUnresolvedLvls;
653653

654654
/// The inverse map of ldxToDependencies from tensor level -> dependent loop
655655
/// E.g., A[2i+j], we have A0 => {(2, i), (1, j)}, to indicate that A0 uses

mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -108,31 +108,29 @@ class SparseTensorStorageBase {
108108
}
109109

110110
/// Safely checks if the level uses dense storage.
111-
bool isDenseLvl(uint64_t l) const { return isDenseDLT(getLvlType(l)); }
111+
bool isDenseLvl(uint64_t l) const { return isDenseLT(getLvlType(l)); }
112112

113113
/// Safely checks if the level uses compressed storage.
114114
bool isCompressedLvl(uint64_t l) const {
115-
return isCompressedDLT(getLvlType(l));
115+
return isCompressedLT(getLvlType(l));
116116
}
117117

118118
/// Safely checks if the level uses loose compressed storage.
119119
bool isLooseCompressedLvl(uint64_t l) const {
120-
return isLooseCompressedDLT(getLvlType(l));
120+
return isLooseCompressedLT(getLvlType(l));
121121
}
122122

123123
/// Safely checks if the level uses singleton storage.
124-
bool isSingletonLvl(uint64_t l) const {
125-
return isSingletonDLT(getLvlType(l));
126-
}
124+
bool isSingletonLvl(uint64_t l) const { return isSingletonLT(getLvlType(l)); }
127125

128126
/// Safely checks if the level uses 2 out of 4 storage.
129-
bool is2OutOf4Lvl(uint64_t l) const { return is2OutOf4DLT(getLvlType(l)); }
127+
bool is2OutOf4Lvl(uint64_t l) const { return is2OutOf4LT(getLvlType(l)); }
130128

131129
/// Safely checks if the level is ordered.
132-
bool isOrderedLvl(uint64_t l) const { return isOrderedDLT(getLvlType(l)); }
130+
bool isOrderedLvl(uint64_t l) const { return isOrderedLT(getLvlType(l)); }
133131

134132
/// Safely checks if the level is unique.
135-
bool isUniqueLvl(uint64_t l) const { return isUniqueDLT(getLvlType(l)); }
133+
bool isUniqueLvl(uint64_t l) const { return isUniqueLT(getLvlType(l)); }
136134

137135
/// Gets positions-overhead storage for the given level.
138136
#define DECL_GETPOSITIONS(PNAME, P) \
@@ -296,7 +294,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
296294
void lexInsert(const uint64_t *lvlCoords, V val) final {
297295
assert(lvlCoords);
298296
bool allDense = std::all_of(getLvlTypes().begin(), getLvlTypes().end(),
299-
[](DimLevelType lt) { return isDenseDLT(lt); });
297+
[](DimLevelType lt) { return isDenseLT(lt); });
300298
if (allDense) {
301299
uint64_t lvlRank = getLvlRank();
302300
uint64_t valIdx = 0;

mlir/lib/Dialect/SparseTensor/IR/Detail/DimLvlMap.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ bool DimSpec::isValid(Ranks const &ranks) const {
6565
LvlSpec::LvlSpec(LvlVar var, LvlExpr expr, DimLevelType type)
6666
: var(var), expr(expr), type(type) {
6767
assert(expr);
68-
assert(isValidDLT(type) && !isUndefDLT(type));
68+
assert(isValidLT(type) && !isUndefLT(type));
6969
}
7070

7171
bool LvlSpec::isValid(Ranks const &ranks) const {

mlir/lib/Dialect/SparseTensor/IR/Detail/LvlTypeParser.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ FailureOr<uint8_t> LvlTypeParser::parseLvlType(AsmParser &parser) const {
5858
return failure();
5959
}
6060

61-
ERROR_IF(!isValidDLT(static_cast<DimLevelType>(properties)),
61+
ERROR_IF(!isValidLT(static_cast<DimLevelType>(properties)),
6262
"invalid level type: level format doesn't support the properties");
6363
return properties;
6464
}
@@ -70,9 +70,9 @@ ParseResult LvlTypeParser::parseProperty(AsmParser &parser,
7070
ERROR_IF(failed(parser.parseOptionalKeyword(&strVal)),
7171
"expected valid level property (e.g. nonordered, nonunique or high)")
7272
if (strVal.compare("nonunique") == 0) {
73-
*properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonunique);
73+
*properties |= static_cast<uint8_t>(LevelPropertyNondefault::Nonunique);
7474
} else if (strVal.compare("nonordered") == 0) {
75-
*properties |= static_cast<uint8_t>(LevelNondefaultProperty::Nonordered);
75+
*properties |= static_cast<uint8_t>(LevelPropertyNondefault::Nonordered);
7676
} else {
7777
parser.emitError(loc, "unknown level property: ") << strVal;
7878
return failure();

mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,13 @@ void StorageLayout::foreachField(
7171
FieldIndex fieldIdx = kDataFieldStartingIdx;
7272
// Per-level storage.
7373
for (Level l = 0; l < end; l++) {
74-
const auto dlt = lvlTypes[l];
75-
if (isWithPosDLT(dlt)) {
76-
if (!(callback(fieldIdx++, SparseTensorFieldKind::PosMemRef, l, dlt)))
74+
const auto lt = lvlTypes[l];
75+
if (isWithPosLT(lt)) {
76+
if (!(callback(fieldIdx++, SparseTensorFieldKind::PosMemRef, l, lt)))
7777
return;
7878
}
79-
if (isWithCrdDLT(dlt)) {
80-
if (!(callback(fieldIdx++, SparseTensorFieldKind::CrdMemRef, l, dlt)))
79+
if (isWithCrdLT(lt)) {
80+
if (!(callback(fieldIdx++, SparseTensorFieldKind::CrdMemRef, l, lt)))
8181
return;
8282
}
8383
}
@@ -113,16 +113,16 @@ void sparse_tensor::foreachFieldAndTypeInSparseTensor(
113113
StorageLayout(stt).foreachField(
114114
[specType, posMemType, crdMemType, valMemType,
115115
callback](FieldIndex fieldIdx, SparseTensorFieldKind fieldKind,
116-
Level lvl, DimLevelType dlt) -> bool {
116+
Level lvl, DimLevelType lt) -> bool {
117117
switch (fieldKind) {
118118
case SparseTensorFieldKind::StorageSpec:
119-
return callback(specType, fieldIdx, fieldKind, lvl, dlt);
119+
return callback(specType, fieldIdx, fieldKind, lvl, lt);
120120
case SparseTensorFieldKind::PosMemRef:
121-
return callback(posMemType, fieldIdx, fieldKind, lvl, dlt);
121+
return callback(posMemType, fieldIdx, fieldKind, lvl, lt);
122122
case SparseTensorFieldKind::CrdMemRef:
123-
return callback(crdMemType, fieldIdx, fieldKind, lvl, dlt);
123+
return callback(crdMemType, fieldIdx, fieldKind, lvl, lt);
124124
case SparseTensorFieldKind::ValMemRef:
125-
return callback(valMemType, fieldIdx, fieldKind, lvl, dlt);
125+
return callback(valMemType, fieldIdx, fieldKind, lvl, lt);
126126
};
127127
llvm_unreachable("unrecognized field kind");
128128
});
@@ -167,7 +167,7 @@ StorageLayout::getFieldIndexAndStride(SparseTensorFieldKind kind,
167167
}
168168
foreachField([lvl, kind, &fieldIdx](FieldIndex fIdx,
169169
SparseTensorFieldKind fKind, Level fLvl,
170-
DimLevelType dlt) -> bool {
170+
DimLevelType lt) -> bool {
171171
if ((lvl && fLvl == lvl.value() && kind == fKind) ||
172172
(kind == fKind && fKind == SparseTensorFieldKind::ValMemRef)) {
173173
fieldIdx = fIdx;
@@ -313,15 +313,15 @@ SparseTensorEncodingAttr SparseTensorEncodingAttr::withoutDimSlices() const {
313313
}
314314

315315
bool SparseTensorEncodingAttr::isAllDense() const {
316-
return !getImpl() || llvm::all_of(getLvlTypes(), isDenseDLT);
316+
return !getImpl() || llvm::all_of(getLvlTypes(), isDenseLT);
317317
}
318318

319319
bool SparseTensorEncodingAttr::isCOO() const {
320320
return getImpl() && isCOOType(*this, 0, true);
321321
}
322322

323323
bool SparseTensorEncodingAttr::isAllOrdered() const {
324-
return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT);
324+
return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedLT);
325325
}
326326

327327
bool SparseTensorEncodingAttr::isIdentity() const {
@@ -645,14 +645,14 @@ SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
645645
return emitError() << "unexpected position bitwidth: " << posWidth;
646646
if (!acceptBitWidth(crdWidth))
647647
return emitError() << "unexpected coordinate bitwidth: " << crdWidth;
648-
if (auto it = std::find_if(lvlTypes.begin(), lvlTypes.end(), isSingletonDLT);
648+
if (auto it = std::find_if(lvlTypes.begin(), lvlTypes.end(), isSingletonLT);
649649
it != std::end(lvlTypes)) {
650650
if (it == lvlTypes.begin() ||
651-
(!isCompressedDLT(*(it - 1)) && !isLooseCompressedDLT(*(it - 1))))
651+
(!isCompressedLT(*(it - 1)) && !isLooseCompressedLT(*(it - 1))))
652652
return emitError() << "expected compressed or loose_compressed level "
653653
"before singleton level";
654654
if (!std::all_of(it, lvlTypes.end(),
655-
[](DimLevelType i) { return isSingletonDLT(i); }))
655+
[](DimLevelType i) { return isSingletonLT(i); }))
656656
return emitError() << "expected all singleton lvlTypes "
657657
"following a singleton level";
658658
}
@@ -955,17 +955,17 @@ Level mlir::sparse_tensor::toStoredDim(SparseTensorEncodingAttr enc,
955955
}
956956

957957
/// We normalized sparse tensor encoding attribute by always using
958-
/// ordered/unique DLT such that "compressed_nu_no" and "compressed_nu" (as well
958+
/// ordered/unique LT such that "compressed_nu_no" and "compressed_nu" (as well
959959
/// as other variants) lead to the same storage specifier type, and stripping
960960
/// irrelevant fields that do not alter the sparse tensor memory layout.
961961
static SparseTensorEncodingAttr
962962
getNormalizedEncodingForSpecifier(SparseTensorEncodingAttr enc) {
963-
SmallVector<DimLevelType> dlts;
964-
for (auto dlt : enc.getLvlTypes())
965-
dlts.push_back(*buildLevelType(*getLevelFormat(dlt), true, true));
963+
SmallVector<DimLevelType> lts;
964+
for (auto lt : enc.getLvlTypes())
965+
lts.push_back(*buildLevelType(*getLevelFormat(lt), true, true));
966966

967967
return SparseTensorEncodingAttr::get(
968-
enc.getContext(), dlts,
968+
enc.getContext(), lts,
969969
AffineMap(), // dimToLvl (irrelevant to storage specifier)
970970
AffineMap(), // lvlToDim (irrelevant to storage specifier)
971971
// Always use `index` for memSize and lvlSize instead of reusing
@@ -1070,15 +1070,15 @@ static LogicalResult verifyPackUnPack(Operation *op, bool requiresStaticShape,
10701070
bool misMatch = false;
10711071
layout.foreachField([&idx, &misMatch, stt, valTp,
10721072
lvlTps](FieldIndex fid, SparseTensorFieldKind fKind,
1073-
Level lvl, DimLevelType dlt) -> bool {
1073+
Level lvl, DimLevelType lt) -> bool {
10741074
if (fKind == SparseTensorFieldKind::StorageSpec)
10751075
return true;
10761076

10771077
Type inputTp = nullptr;
10781078
if (fKind == SparseTensorFieldKind::ValMemRef) {
10791079
inputTp = valTp;
10801080
} else {
1081-
assert(fid == idx && stt.getLvlType(lvl) == dlt);
1081+
assert(fid == idx && stt.getLvlType(lvl) == lt);
10821082
inputTp = lvlTps[idx++];
10831083
}
10841084
// The input element type and expected element type should match.

mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,10 +77,10 @@ class CodegenEnv {
7777
const TensorExp &exp(ExprId e) const { return latticeMerger.exp(e); }
7878
const LatPoint &lat(LatPointId l) const { return latticeMerger.lat(l); }
7979
ArrayRef<LatPointId> set(LatSetId s) const { return latticeMerger.set(s); }
80-
DimLevelType dlt(TensorId t, LoopId i) const {
80+
DimLevelType lt(TensorId t, LoopId i) const {
8181
return latticeMerger.getLvlType(t, i);
8282
}
83-
DimLevelType dlt(TensorLoopId b) const { return latticeMerger.getLvlType(b); }
83+
DimLevelType lt(TensorLoopId b) const { return latticeMerger.getLvlType(b); }
8484

8585
unsigned getLoopNum() const { return latticeMerger.getNumLoops(); }
8686

mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -429,8 +429,8 @@ inline Value constantPrimaryTypeEncoding(OpBuilder &builder, Location loc,
429429

430430
/// Generates a constant of the internal dimension level type encoding.
431431
inline Value constantDimLevelTypeEncoding(OpBuilder &builder, Location loc,
432-
DimLevelType dlt) {
433-
return constantI8(builder, loc, static_cast<uint8_t>(dlt));
432+
DimLevelType lt) {
433+
return constantI8(builder, loc, static_cast<uint8_t>(lt));
434434
}
435435

436436
inline bool isZeroRankedTensorOrScalar(Type type) {

0 commit comments

Comments
 (0)