@@ -456,7 +456,7 @@ Operation *LoopEmitter::enterLoopOverTensorAtLvl(
456
456
for (auto [t, l] : llvm::zip (tids, lvls)) {
457
457
// TODO: this check for validity of the (t,l) pairs should be
458
458
// checked/enforced at the callsites, if possible.
459
- assert (t < lvlTypes. size () && l < lvlTypes[t]. size ( ));
459
+ assert (isValidLevel (t, l ));
460
460
assert (!coords[t][l]); // We cannot re-enter the same level
461
461
const auto lvlTp = lvlTypes[t][l];
462
462
const bool isSparse = isCompressedDLT (lvlTp) || isSingletonDLT (lvlTp);
@@ -572,7 +572,7 @@ Operation *LoopEmitter::enterLoopOverTensorAtLvl(
572
572
Operation *LoopEmitter::enterFilterLoopOverTensorAtLvl (
573
573
OpBuilder &builder, Location loc, TensorId tid, Level lvl,
574
574
AffineExpr affine, MutableArrayRef<Value> reduc) {
575
- assert (tid < lvlTypes. size () && lvl < lvlTypes[tid]. size ( ));
575
+ assert (isValidLevel ( tid, lvl));
576
576
assert (!affine.isa <AffineDimExpr>() && !isDenseDLT (lvlTypes[tid][lvl]));
577
577
// We can not re-enter the same level.
578
578
assert (!coords[tid][lvl]);
@@ -862,7 +862,7 @@ Operation *LoopEmitter::enterCoIterationOverTensorsAtLvls(
862
862
863
863
void LoopEmitter::prepareLoopOverTensorAtLvl (OpBuilder &builder, Location loc,
864
864
TensorId tid, Level dstLvl) {
865
- assert (tid < lvlTypes. size () && dstLvl < lvlTypes[tid]. size ( ));
865
+ assert (isValidLevel ( tid, dstLvl));
866
866
const auto lvlTp = lvlTypes[tid][dstLvl];
867
867
868
868
if (isDenseDLT (lvlTp))
0 commit comments