@@ -796,7 +796,7 @@ extern "C" {
796
796
797
797
#define IMPL_GETOVERHEAD (NAME, TYPE, LIB ) \
798
798
void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1 > *ref, void *tensor, \
799
- index_t d) { \
799
+ index_type d) { \
800
800
assert (ref &&tensor); \
801
801
std::vector<TYPE> *v; \
802
802
static_cast <SparseTensorStorageBase *>(tensor)->LIB (&v, d); \
@@ -808,27 +808,28 @@ extern "C" {
808
808
809
809
#define IMPL_ADDELT (NAME, TYPE ) \
810
810
void *_mlir_ciface_##NAME(void *tensor, TYPE value, \
811
- StridedMemRefType<index_t , 1 > *iref, \
812
- StridedMemRefType<index_t , 1 > *pref) { \
811
+ StridedMemRefType<index_type , 1 > *iref, \
812
+ StridedMemRefType<index_type , 1 > *pref) { \
813
813
assert (tensor &&iref &&pref); \
814
814
assert (iref->strides [0 ] == 1 && pref->strides [0 ] == 1 ); \
815
815
assert (iref->sizes [0 ] == pref->sizes [0 ]); \
816
- const index_t *indx = iref->data + iref->offset ; \
817
- const index_t *perm = pref->data + pref->offset ; \
816
+ const index_type *indx = iref->data + iref->offset ; \
817
+ const index_type *perm = pref->data + pref->offset ; \
818
818
uint64_t isize = iref->sizes [0 ]; \
819
- std::vector<index_t > indices (isize); \
819
+ std::vector<index_type > indices (isize); \
820
820
for (uint64_t r = 0 ; r < isize; r++) \
821
821
indices[perm[r]] = indx[r]; \
822
822
static_cast <SparseTensorCOO<TYPE> *>(tensor)->add (indices, value); \
823
823
return tensor; \
824
824
}
825
825
826
826
#define IMPL_GETNEXT (NAME, V ) \
827
- bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t , 1 > *iref, \
827
+ bool _mlir_ciface_##NAME(void *tensor, \
828
+ StridedMemRefType<index_type, 1 > *iref, \
828
829
StridedMemRefType<V, 0 > *vref) { \
829
830
assert (tensor &&iref &&vref); \
830
831
assert (iref->strides [0 ] == 1 ); \
831
- index_t *indx = iref->data + iref->offset ; \
832
+ index_type *indx = iref->data + iref->offset ; \
832
833
V *value = vref->data + vref->offset ; \
833
834
const uint64_t isize = iref->sizes [0 ]; \
834
835
auto iter = static_cast <SparseTensorCOO<V> *>(tensor); \
@@ -844,30 +845,30 @@ extern "C" {
844
845
}
845
846
846
847
#define IMPL_LEXINSERT (NAME, V ) \
847
- void _mlir_ciface_##NAME(void *tensor, StridedMemRefType< index_t , 1 > *cref, \
848
- V val) { \
848
+ void _mlir_ciface_##NAME(void *tensor, \
849
+ StridedMemRefType<index_type, 1 > *cref, V val) { \
849
850
assert (tensor &&cref); \
850
851
assert (cref->strides [0 ] == 1 ); \
851
- index_t *cursor = cref->data + cref->offset ; \
852
+ index_type *cursor = cref->data + cref->offset ; \
852
853
assert (cursor); \
853
854
static_cast <SparseTensorStorageBase *>(tensor)->lexInsert (cursor, val); \
854
855
}
855
856
856
857
#define IMPL_EXPINSERT (NAME, V ) \
857
858
void _mlir_ciface_##NAME( \
858
- void *tensor, StridedMemRefType<index_t , 1 > *cref, \
859
+ void *tensor, StridedMemRefType<index_type , 1 > *cref, \
859
860
StridedMemRefType<V, 1 > *vref, StridedMemRefType<bool , 1 > *fref, \
860
- StridedMemRefType<index_t , 1 > *aref, index_t count) { \
861
+ StridedMemRefType<index_type , 1 > *aref, index_type count) { \
861
862
assert (tensor &&cref &&vref &&fref &&aref); \
862
863
assert (cref->strides [0 ] == 1 ); \
863
864
assert (vref->strides [0 ] == 1 ); \
864
865
assert (fref->strides [0 ] == 1 ); \
865
866
assert (aref->strides [0 ] == 1 ); \
866
867
assert (vref->sizes [0 ] == fref->sizes [0 ]); \
867
- index_t *cursor = cref->data + cref->offset ; \
868
+ index_type *cursor = cref->data + cref->offset ; \
868
869
V *values = vref->data + vref->offset ; \
869
870
bool *filled = fref->data + fref->offset ; \
870
- index_t *added = aref->data + aref->offset ; \
871
+ index_type *added = aref->data + aref->offset ; \
871
872
static_cast <SparseTensorStorageBase *>(tensor)->expInsert ( \
872
873
cursor, values, filled, added, count); \
873
874
}
@@ -883,11 +884,11 @@ extern "C" {
883
884
delete coo; \
884
885
}
885
886
886
- // Assume index_t is in fact uint64_t, so that _mlir_ciface_newSparseTensor
887
+ // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
887
888
// can safely rewrite kIndex to kU64. We make this assertion to guarantee
888
889
// that this file cannot get out of sync with its header.
889
- static_assert (std::is_same<index_t , uint64_t >::value,
890
- " Expected index_t == uint64_t" );
890
+ static_assert (std::is_same<index_type , uint64_t >::value,
891
+ " Expected index_type == uint64_t" );
891
892
892
893
// / Constructs a new sparse tensor. This is the "swiss army knife"
893
894
// / method for materializing sparse tensors into the computation.
@@ -901,17 +902,17 @@ static_assert(std::is_same<index_t, uint64_t>::value,
901
902
// / kToIterator = returns iterator from storage in ptr (call getNext() to use)
902
903
void *
903
904
_mlir_ciface_newSparseTensor (StridedMemRefType<DimLevelType, 1 > *aref, // NOLINT
904
- StridedMemRefType<index_t , 1 > *sref,
905
- StridedMemRefType<index_t , 1 > *pref,
905
+ StridedMemRefType<index_type , 1 > *sref,
906
+ StridedMemRefType<index_type , 1 > *pref,
906
907
OverheadType ptrTp, OverheadType indTp,
907
908
PrimaryType valTp, Action action, void *ptr) {
908
909
assert (aref && sref && pref);
909
910
assert (aref->strides [0 ] == 1 && sref->strides [0 ] == 1 &&
910
911
pref->strides [0 ] == 1 );
911
912
assert (aref->sizes [0 ] == sref->sizes [0 ] && sref->sizes [0 ] == pref->sizes [0 ]);
912
913
const DimLevelType *sparsity = aref->data + aref->offset ;
913
- const index_t *sizes = sref->data + sref->offset ;
914
- const index_t *perm = pref->data + pref->offset ;
914
+ const index_type *sizes = sref->data + sref->offset ;
915
+ const index_type *perm = pref->data + pref->offset ;
915
916
uint64_t rank = aref->sizes [0 ];
916
917
917
918
// Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
@@ -1010,14 +1011,14 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
1010
1011
}
1011
1012
1012
1013
// / Methods that provide direct access to pointers.
1013
- IMPL_GETOVERHEAD (sparsePointers, index_t , getPointers)
1014
+ IMPL_GETOVERHEAD (sparsePointers, index_type , getPointers)
1014
1015
IMPL_GETOVERHEAD(sparsePointers64, uint64_t , getPointers)
1015
1016
IMPL_GETOVERHEAD(sparsePointers32, uint32_t , getPointers)
1016
1017
IMPL_GETOVERHEAD(sparsePointers16, uint16_t , getPointers)
1017
1018
IMPL_GETOVERHEAD(sparsePointers8, uint8_t , getPointers)
1018
1019
1019
1020
// / Methods that provide direct access to indices.
1020
- IMPL_GETOVERHEAD(sparseIndices, index_t , getIndices)
1021
+ IMPL_GETOVERHEAD(sparseIndices, index_type , getIndices)
1021
1022
IMPL_GETOVERHEAD(sparseIndices64, uint64_t , getIndices)
1022
1023
IMPL_GETOVERHEAD(sparseIndices32, uint32_t , getIndices)
1023
1024
IMPL_GETOVERHEAD(sparseIndices16, uint16_t , getIndices)
@@ -1092,15 +1093,15 @@ IMPL_OUT(outSparseTensorI8, int8_t)
1092
1093
1093
1094
// / Helper method to read a sparse tensor filename from the environment,
1094
1095
// / defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
1095
- char *getTensorFilename (index_t id) {
1096
+ char *getTensorFilename (index_type id) {
1096
1097
char var[80 ];
1097
1098
sprintf (var, " TENSOR%" PRIu64, id);
1098
1099
char *env = getenv (var);
1099
1100
return env;
1100
1101
}
1101
1102
1102
1103
// / Returns size of sparse tensor in given dimension.
1103
- index_t sparseDimSize (void *tensor, index_t d) {
1104
+ index_type sparseDimSize (void *tensor, index_type d) {
1104
1105
return static_cast <SparseTensorStorageBase *>(tensor)->getDimSize (d);
1105
1106
}
1106
1107
0 commit comments