@@ -108,112 +108,6 @@ class SparseTensorIterator final {
108
108
const typename SparseTensorCOO<V>::const_iterator end;
109
109
};
110
110
111
- // TODO: When using this library from MLIR, the `toMLIRSparseTensor`/
112
- // `IMPL_CONVERTTOMLIRSPARSETENSOR` and `fromMLIRSparseTensor`/
113
- // `IMPL_CONVERTFROMMLIRSPARSETENSOR` constructs will be codegened away;
114
- // therefore, these functions are only used by PyTACO, one place in the
115
- // Python integration tests, and possibly by out-of-tree projects.
116
- // This is notable because neither function can be easily generalized
117
- // to handle non-permutations. In particular, while we could adjust
118
- // the functions to take all the arguments they'd need, that would just
119
- // push the problem into client code. So if we want to generalize these
120
- // functions to support non-permutations, we'll need to figure out how
121
- // to do so without putting undue burden on clients.
122
-
123
- // / Initializes sparse tensor from an external COO-flavored format.
124
- // / The `rank` argument is both dimension-rank and level-rank, and the
125
- // / `dim2lvl` argument must be a permutation.
126
- // / Used by `IMPL_CONVERTTOMLIRSPARSETENSOR`.
127
- //
128
- // TODO: generalize beyond 64-bit overhead types.
129
- template <typename V>
130
- static SparseTensorStorage<uint64_t , uint64_t , V> *
131
- toMLIRSparseTensor (uint64_t rank, uint64_t nse, const uint64_t *dimSizes,
132
- const V *values, const uint64_t *dimCoordinates,
133
- const uint64_t *dim2lvl, const DimLevelType *lvlTypes) {
134
- #ifndef NDEBUG
135
- // Verify that the sparsity values are supported.
136
- // TODO: update this check to match what we actually support.
137
- for (uint64_t i = 0 ; i < rank; ++i)
138
- if (lvlTypes[i] != DimLevelType::Dense &&
139
- lvlTypes[i] != DimLevelType::Compressed)
140
- MLIR_SPARSETENSOR_FATAL (" unsupported level type: %d\n " ,
141
- static_cast <uint8_t >(lvlTypes[i]));
142
- #endif
143
- // Verify that `dim2lvl` is a permutation of `[0..(rank-1)]`.
144
- // NOTE: The construction of `lvlSizes` and `lvl2dim` don't generalize
145
- // to arbitrary `dim2lvl` mappings. Whereas constructing `lvlCoords` from
146
- // `dimCoords` does (though the details would have to be updated, just
147
- // like for `IMPL_ADDELT`).
148
- const detail::PermutationRef d2l (rank, dim2lvl);
149
- // Convert external format to internal COO.
150
- const auto lvlSizes = d2l.pushforward (rank, dimSizes);
151
- auto *lvlCOO = new SparseTensorCOO<V>(lvlSizes, nse);
152
- std::vector<uint64_t > lvlCoords (rank);
153
- const uint64_t *dimCoords = dimCoordinates;
154
- for (uint64_t i = 0 ; i < nse; ++i) {
155
- d2l.pushforward (rank, dimCoords, lvlCoords.data ());
156
- lvlCOO->add (lvlCoords, values[i]);
157
- dimCoords += rank;
158
- }
159
- // Return sparse tensor storage format as opaque pointer.
160
- const auto lvl2dim = d2l.inverse ();
161
- auto *tensor = SparseTensorStorage<uint64_t , uint64_t , V>::newFromCOO (
162
- rank, dimSizes, rank, lvlTypes, lvl2dim.data (), *lvlCOO);
163
- delete lvlCOO;
164
- return tensor;
165
- }
166
-
167
- // / Converts a sparse tensor to an external COO-flavored format.
168
- // / Used by `IMPL_CONVERTFROMMLIRSPARSETENSOR`.
169
- //
170
- // TODO: Currently, values are copied from SparseTensorStorage to
171
- // SparseTensorCOO, then to the output. We may want to reduce the number
172
- // of copies.
173
- //
174
- // TODO: generalize beyond 64-bit overhead types, no dim ordering,
175
- // all dimensions compressed
176
- template <typename V>
177
- static void
178
- fromMLIRSparseTensor (const SparseTensorStorage<uint64_t , uint64_t , V> *tensor,
179
- uint64_t *pRank, uint64_t *pNse, uint64_t **pShape,
180
- V **pValues, uint64_t **pCoordinates) {
181
- assert (tensor && " Received nullptr for tensor" );
182
- const uint64_t dimRank = tensor->getDimRank ();
183
- const auto &dimSizes = tensor->getDimSizes ();
184
- std::vector<uint64_t > identityPerm (dimRank);
185
- std::iota (identityPerm.begin (), identityPerm.end (), 0 );
186
- SparseTensorCOO<V> *coo =
187
- tensor->toCOO (dimRank, dimSizes.data (), dimRank, identityPerm.data ());
188
-
189
- const std::vector<Element<V>> &elements = coo->getElements ();
190
- const uint64_t nse = elements.size ();
191
-
192
- const auto &cooSizes = coo->getDimSizes ();
193
- assert (cooSizes.size () == dimRank && " Rank mismatch" );
194
- uint64_t *dimShape = new uint64_t [dimRank];
195
- std::memcpy (static_cast <void *>(dimShape),
196
- static_cast <const void *>(cooSizes.data ()),
197
- sizeof (uint64_t ) * dimRank);
198
-
199
- V *values = new V[nse];
200
- uint64_t *coordinates = new uint64_t [dimRank * nse];
201
-
202
- for (uint64_t i = 0 , base = 0 ; i < nse; ++i) {
203
- values[i] = elements[i].value ;
204
- for (uint64_t d = 0 ; d < dimRank; ++d)
205
- coordinates[base + d] = elements[i].coords [d];
206
- base += dimRank;
207
- }
208
-
209
- delete coo;
210
- *pRank = dimRank;
211
- *pNse = nse;
212
- *pShape = dimShape;
213
- *pValues = values;
214
- *pCoordinates = coordinates;
215
- }
216
-
217
111
// ===----------------------------------------------------------------------===//
218
112
//
219
113
// Utilities for manipulating `StridedMemRefType`.
@@ -863,29 +757,6 @@ void readSparseTensorShape(char *filename, std::vector<uint64_t> *out) {
863
757
out->assign (dimSizes, dimSizes + dimRank);
864
758
}
865
759
866
- // We can't use `static_cast` here because `DimLevelType` is an enum-class.
867
- #define IMPL_CONVERTTOMLIRSPARSETENSOR (VNAME, V ) \
868
- void *convertToMLIRSparseTensor##VNAME( \
869
- uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \
870
- uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes) { \
871
- return toMLIRSparseTensor<V>(rank, nse, dimSizes, values, dimCoordinates, \
872
- dim2lvl, \
873
- reinterpret_cast <DimLevelType *>(lvlTypes)); \
874
- }
875
- MLIR_SPARSETENSOR_FOREVERY_V (IMPL_CONVERTTOMLIRSPARSETENSOR)
876
- #undef IMPL_CONVERTTOMLIRSPARSETENSOR
877
-
878
- #define IMPL_CONVERTFROMMLIRSPARSETENSOR (VNAME, V ) \
879
- void convertFromMLIRSparseTensor##VNAME( \
880
- void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \
881
- V **pValues, uint64_t **pCoordinates) { \
882
- fromMLIRSparseTensor<V>( \
883
- static_cast <SparseTensorStorage<uint64_t , uint64_t , V> *>(tensor), \
884
- pRank, pNse, pShape, pValues, pCoordinates); \
885
- }
886
- MLIR_SPARSETENSOR_FOREVERY_V (IMPL_CONVERTFROMMLIRSPARSETENSOR)
887
- #undef IMPL_CONVERTFROMMLIRSPARSETENSOR
888
-
889
760
index_type getSparseTensorReaderRank (void *p) {
890
761
return static_cast <SparseTensorReader *>(p)->getRank ();
891
762
}
0 commit comments