@@ -303,7 +303,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
303
303
uint64_t lvlRank = getLvlRank ();
304
304
uint64_t valIdx = 0 ;
305
305
// Linearize the address
306
- for (size_t lvl = 0 ; lvl < lvlRank; lvl++)
306
+ for (uint64_t lvl = 0 ; lvl < lvlRank; lvl++)
307
307
valIdx = valIdx * getLvlSize (lvl) + lvlCoords[lvl];
308
308
values[valIdx] = val;
309
309
return ;
@@ -338,7 +338,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
338
338
values[c] = 0 ;
339
339
filled[c] = false ;
340
340
// Subsequent insertions are quick.
341
- for (uint64_t i = 1 ; i < count; ++i ) {
341
+ for (uint64_t i = 1 ; i < count; i++ ) {
342
342
assert (c < added[i] && " non-lexicographic insertion" );
343
343
c = added[i];
344
344
assert (c <= expsz);
@@ -394,27 +394,27 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
394
394
395
395
// In-place permutation.
396
396
auto applyPerm = [this ](std::vector<uint64_t > &perm) {
397
- size_t length = perm.size ();
398
- size_t lvlRank = getLvlRank ();
397
+ uint64_t length = perm.size ();
398
+ uint64_t lvlRank = getLvlRank ();
399
399
// Cache for the current level coordinates.
400
400
std::vector<P> lvlCrds (lvlRank);
401
- for (size_t i = 0 ; i < length; i++) {
402
- size_t current = i;
401
+ for (uint64_t i = 0 ; i < length; i++) {
402
+ uint64_t current = i;
403
403
if (i != perm[current]) {
404
- for (size_t l = 0 ; l < lvlRank; l++)
404
+ for (uint64_t l = 0 ; l < lvlRank; l++)
405
405
lvlCrds[l] = coordinates[l][i];
406
406
V val = values[i];
407
407
// Deals with a permutation cycle.
408
408
while (i != perm[current]) {
409
- size_t next = perm[current];
409
+ uint64_t next = perm[current];
410
410
// Swaps the level coordinates and value.
411
- for (size_t l = 0 ; l < lvlRank; l++)
411
+ for (uint64_t l = 0 ; l < lvlRank; l++)
412
412
coordinates[l][current] = coordinates[l][next];
413
413
values[current] = values[next];
414
414
perm[current] = current;
415
415
current = next;
416
416
}
417
- for (size_t l = 0 ; l < lvlRank; l++)
417
+ for (uint64_t l = 0 ; l < lvlRank; l++)
418
418
coordinates[l][current] = lvlCrds[l];
419
419
values[current] = val;
420
420
perm[current] = current;
@@ -557,7 +557,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
557
557
const uint64_t lastLvl = lvlRank - 1 ;
558
558
assert (diffLvl <= lvlRank);
559
559
const uint64_t stop = lvlRank - diffLvl;
560
- for (uint64_t i = 0 ; i < stop; ++i ) {
560
+ for (uint64_t i = 0 ; i < stop; i++ ) {
561
561
const uint64_t l = lastLvl - i;
562
562
finalizeSegment (l, lvlCursor[l] + 1 );
563
563
}
@@ -569,7 +569,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
569
569
V val) {
570
570
const uint64_t lvlRank = getLvlRank ();
571
571
assert (diffLvl <= lvlRank);
572
- for (uint64_t l = diffLvl; l < lvlRank; ++l ) {
572
+ for (uint64_t l = diffLvl; l < lvlRank; l++ ) {
573
573
const uint64_t c = lvlCoords[l];
574
574
appendCrd (l, full, c);
575
575
full = 0 ;
@@ -582,7 +582,7 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
582
582
// / in the argument differ from those in the current cursor.
583
583
uint64_t lexDiff (const uint64_t *lvlCoords) const {
584
584
const uint64_t lvlRank = getLvlRank ();
585
- for (uint64_t l = 0 ; l < lvlRank; ++l ) {
585
+ for (uint64_t l = 0 ; l < lvlRank; l++ ) {
586
586
const auto crd = lvlCoords[l];
587
587
const auto cur = lvlCursor[l];
588
588
if (crd > cur || (crd == cur && !isUniqueLvl (l)) ||
@@ -705,7 +705,7 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
705
705
// really use nnz and dense/sparse distribution.
706
706
bool allDense = true ;
707
707
uint64_t sz = 1 ;
708
- for (uint64_t l = 0 ; l < lvlRank; ++l ) {
708
+ for (uint64_t l = 0 ; l < lvlRank; l++ ) {
709
709
const DimLevelType dlt = lvlTypes[l]; // Avoid redundant bounds checking.
710
710
if (isCompressedDLT (dlt)) {
711
711
positions[l].reserve (sz + 1 );
0 commit comments