Skip to content

Commit d2215e7

Browse files
committed
[mlir][sparse] Rename index_t to index_type again
While testing LLVM 14.0.0 rc1 on Solaris, I ran into a compile failure: from /var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp:22: /usr/include/sys/types.h:103:16: error: conflicting declaration ‘typedef short int index_t’ 103 | typedef short index_t; | ^~~~~~~ In file included from /var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp:17: /var/llvm/llvm-14.0.0-rc1/rc1/llvm-project/mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h:26:7: note: previous declaration as ‘using index_t = uint64_t’ 26 | using index_t = uint64_t; | ^~~~~~~ The same issue had already occured in the past and fixed in D72619 <https://reviews.llvm.org/D72619>. More detailed explanation can also be found there. Tested on `amd64-pc-solaris2.11` and `sparcv9-solaris2.11`. Differential Revision: https://reviews.llvm.org/D119323
1 parent d39f4ac commit d2215e7

File tree

2 files changed

+28
-27
lines changed

2 files changed

+28
-27
lines changed

mlir/include/mlir/ExecutionEngine/SparseTensorUtils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ extern "C" {
2323
/// type is 64-bit, but targets with different "index" bit widths should link
2424
/// with an alternatively built runtime support library.
2525
// TODO: support such targets?
26-
using index_t = uint64_t;
26+
using index_type = uint64_t;
2727

2828
/// Encoding of overhead types (both pointer overhead and indices
2929
/// overhead), for "overloading" @newSparseTensor.

mlir/lib/ExecutionEngine/SparseTensorUtils.cpp

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ extern "C" {
796796

797797
#define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \
798798
void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \
799-
index_t d) { \
799+
index_type d) { \
800800
assert(ref &&tensor); \
801801
std::vector<TYPE> *v; \
802802
static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \
@@ -808,27 +808,28 @@ extern "C" {
808808

809809
#define IMPL_ADDELT(NAME, TYPE) \
810810
void *_mlir_ciface_##NAME(void *tensor, TYPE value, \
811-
StridedMemRefType<index_t, 1> *iref, \
812-
StridedMemRefType<index_t, 1> *pref) { \
811+
StridedMemRefType<index_type, 1> *iref, \
812+
StridedMemRefType<index_type, 1> *pref) { \
813813
assert(tensor &&iref &&pref); \
814814
assert(iref->strides[0] == 1 && pref->strides[0] == 1); \
815815
assert(iref->sizes[0] == pref->sizes[0]); \
816-
const index_t *indx = iref->data + iref->offset; \
817-
const index_t *perm = pref->data + pref->offset; \
816+
const index_type *indx = iref->data + iref->offset; \
817+
const index_type *perm = pref->data + pref->offset; \
818818
uint64_t isize = iref->sizes[0]; \
819-
std::vector<index_t> indices(isize); \
819+
std::vector<index_type> indices(isize); \
820820
for (uint64_t r = 0; r < isize; r++) \
821821
indices[perm[r]] = indx[r]; \
822822
static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \
823823
return tensor; \
824824
}
825825

826826
#define IMPL_GETNEXT(NAME, V) \
827-
bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *iref, \
827+
bool _mlir_ciface_##NAME(void *tensor, \
828+
StridedMemRefType<index_type, 1> *iref, \
828829
StridedMemRefType<V, 0> *vref) { \
829830
assert(tensor &&iref &&vref); \
830831
assert(iref->strides[0] == 1); \
831-
index_t *indx = iref->data + iref->offset; \
832+
index_type *indx = iref->data + iref->offset; \
832833
V *value = vref->data + vref->offset; \
833834
const uint64_t isize = iref->sizes[0]; \
834835
auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \
@@ -844,30 +845,30 @@ extern "C" {
844845
}
845846

846847
#define IMPL_LEXINSERT(NAME, V) \
847-
void _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *cref, \
848-
V val) { \
848+
void _mlir_ciface_##NAME(void *tensor, \
849+
StridedMemRefType<index_type, 1> *cref, V val) { \
849850
assert(tensor &&cref); \
850851
assert(cref->strides[0] == 1); \
851-
index_t *cursor = cref->data + cref->offset; \
852+
index_type *cursor = cref->data + cref->offset; \
852853
assert(cursor); \
853854
static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \
854855
}
855856

856857
#define IMPL_EXPINSERT(NAME, V) \
857858
void _mlir_ciface_##NAME( \
858-
void *tensor, StridedMemRefType<index_t, 1> *cref, \
859+
void *tensor, StridedMemRefType<index_type, 1> *cref, \
859860
StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \
860-
StridedMemRefType<index_t, 1> *aref, index_t count) { \
861+
StridedMemRefType<index_type, 1> *aref, index_type count) { \
861862
assert(tensor &&cref &&vref &&fref &&aref); \
862863
assert(cref->strides[0] == 1); \
863864
assert(vref->strides[0] == 1); \
864865
assert(fref->strides[0] == 1); \
865866
assert(aref->strides[0] == 1); \
866867
assert(vref->sizes[0] == fref->sizes[0]); \
867-
index_t *cursor = cref->data + cref->offset; \
868+
index_type *cursor = cref->data + cref->offset; \
868869
V *values = vref->data + vref->offset; \
869870
bool *filled = fref->data + fref->offset; \
870-
index_t *added = aref->data + aref->offset; \
871+
index_type *added = aref->data + aref->offset; \
871872
static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \
872873
cursor, values, filled, added, count); \
873874
}
@@ -883,11 +884,11 @@ extern "C" {
883884
delete coo; \
884885
}
885886

886-
// Assume index_t is in fact uint64_t, so that _mlir_ciface_newSparseTensor
887+
// Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
887888
// can safely rewrite kIndex to kU64. We make this assertion to guarantee
888889
// that this file cannot get out of sync with its header.
889-
static_assert(std::is_same<index_t, uint64_t>::value,
890-
"Expected index_t == uint64_t");
890+
static_assert(std::is_same<index_type, uint64_t>::value,
891+
"Expected index_type == uint64_t");
891892

892893
/// Constructs a new sparse tensor. This is the "swiss army knife"
893894
/// method for materializing sparse tensors into the computation.
@@ -901,17 +902,17 @@ static_assert(std::is_same<index_t, uint64_t>::value,
901902
/// kToIterator = returns iterator from storage in ptr (call getNext() to use)
902903
void *
903904
_mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
904-
StridedMemRefType<index_t, 1> *sref,
905-
StridedMemRefType<index_t, 1> *pref,
905+
StridedMemRefType<index_type, 1> *sref,
906+
StridedMemRefType<index_type, 1> *pref,
906907
OverheadType ptrTp, OverheadType indTp,
907908
PrimaryType valTp, Action action, void *ptr) {
908909
assert(aref && sref && pref);
909910
assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
910911
pref->strides[0] == 1);
911912
assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
912913
const DimLevelType *sparsity = aref->data + aref->offset;
913-
const index_t *sizes = sref->data + sref->offset;
914-
const index_t *perm = pref->data + pref->offset;
914+
const index_type *sizes = sref->data + sref->offset;
915+
const index_type *perm = pref->data + pref->offset;
915916
uint64_t rank = aref->sizes[0];
916917

917918
// Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
@@ -1010,14 +1011,14 @@ _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
10101011
}
10111012

10121013
/// Methods that provide direct access to pointers.
1013-
IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers)
1014+
IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers)
10141015
IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
10151016
IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
10161017
IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
10171018
IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
10181019

10191020
/// Methods that provide direct access to indices.
1020-
IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices)
1021+
IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices)
10211022
IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
10221023
IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
10231024
IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
@@ -1092,15 +1093,15 @@ IMPL_OUT(outSparseTensorI8, int8_t)
10921093

10931094
/// Helper method to read a sparse tensor filename from the environment,
10941095
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
1095-
char *getTensorFilename(index_t id) {
1096+
char *getTensorFilename(index_type id) {
10961097
char var[80];
10971098
sprintf(var, "TENSOR%" PRIu64, id);
10981099
char *env = getenv(var);
10991100
return env;
11001101
}
11011102

11021103
/// Returns size of sparse tensor in given dimension.
1103-
index_t sparseDimSize(void *tensor, index_t d) {
1104+
index_type sparseDimSize(void *tensor, index_type d) {
11041105
return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
11051106
}
11061107

0 commit comments

Comments
 (0)