Skip to content

Commit 156a4ba

Browse files
authored
[mlir][sparse] deprecate the convert{To,From}MLIRSparseTensor methods (#66304)
Rationale: These libraries provided COO input and output at external boundaries which, since then, has been generalized to the much more powerful pack and unpack operations of the sparse tensor dialect.
1 parent 9dfc6d3 commit 156a4ba

File tree

3 files changed

+3
-197
lines changed

3 files changed

+3
-197
lines changed

mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -88,29 +88,13 @@ class PermutationRef final {
8888
return out;
8989
}
9090

91-
// NOTE: This form of the method is required by `toMLIRSparseTensor`,
92-
// so it can reuse the `out` buffer for each iteration of a loop.
9391
template <typename T>
9492
inline void pushforward(uint64_t size, const T *values, T *out) const {
9593
assert(size == permSize && "size mismatch");
9694
for (uint64_t i = 0; i < permSize; ++i)
9795
out[perm[i]] = values[i];
9896
}
9997

100-
// NOTE: this is only needed by `toMLIRSparseTensor`, which in
101-
// turn only needs it as a vector to hand off to `newSparseTensor`.
102-
// Otherwise we would want the result to be an owning-permutation,
103-
// to retain the knowledge that `isPermutation` is true.
104-
//
105-
/// Constructs the inverse permutation. This is equivalent to calling
106-
/// `pushforward` with `std::iota` for the values.
107-
inline std::vector<uint64_t> inverse() const {
108-
std::vector<uint64_t> out(permSize);
109-
for (uint64_t i = 0; i < permSize; ++i)
110-
out[perm[i]] = i;
111-
return out;
112-
}
113-
11498
/// Constructs a permuted array of values. This method is the inverse
11599
/// of `pushforward` in the sense that for all `p` and `xs` we have:
116100
/// * `p.permute(p.pushforward(xs)) == xs`

mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h

Lines changed: 3 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -227,54 +227,6 @@ MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id);
227227
MLIR_CRUNNERUTILS_EXPORT void readSparseTensorShape(char *filename,
228228
std::vector<uint64_t> *out);
229229

230-
/// Initializes sparse tensor from a COO-flavored format expressed using
231-
/// C-style data structures. The expected parameters are:
232-
///
233-
/// rank: rank of tensor
234-
/// nse: number of specified elements (usually the nonzeros)
235-
/// shape: array with dimension size for each rank
236-
/// values: a "nse" array with values for all specified elements
237-
/// coordinates: a flat "nse * rank" array with coordinates for all
238-
/// specified elements
239-
/// perm: the permutation of the levels in the storage
240-
/// sparse: the sparsity for the levels
241-
///
242-
/// For example, the sparse matrix
243-
/// | 1.0 0.0 0.0 |
244-
/// | 0.0 5.0 3.0 |
245-
/// can be passed as
246-
/// rank = 2
247-
/// nse = 3
248-
/// shape = [2, 3]
249-
/// values = [1.0, 5.0, 3.0]
250-
/// coordinates = [ 0, 0, 1, 1, 1, 2]
251-
#define DECL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \
252-
MLIR_CRUNNERUTILS_EXPORT void *convertToMLIRSparseTensor##VNAME( \
253-
uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \
254-
uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes);
255-
MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
256-
#undef DECL_CONVERTTOMLIRSPARSETENSOR
257-
258-
/// Converts a sparse tensor to COO-flavored format expressed using
259-
/// C-style data structures. The expected output parameters are pointers
260-
/// for these values:
261-
///
262-
/// rank: rank of tensor
263-
/// nse: number of specified elements (usually the nonzeros)
264-
/// shape: array with size for each dimension
265-
/// values: a "nse" array with values for all specified elements
266-
/// coordinates: a flat "nse * rank" array with coordinates for all
267-
/// specified elements
268-
///
269-
/// The input is a pointer to `SparseTensorStorage<P, C, V>`, typically
270-
/// returned from `convertToMLIRSparseTensor`.
271-
#define DECL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \
272-
MLIR_CRUNNERUTILS_EXPORT void convertFromMLIRSparseTensor##VNAME( \
273-
void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \
274-
V **pValues, uint64_t **pCoordinates);
275-
MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR)
276-
#undef DECL_CONVERTFROMMLIRSPARSETENSOR
277-
278230
/// Returns the rank of the sparse tensor being read.
279231
MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p);
280232

@@ -292,10 +244,9 @@ MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p,
292244
/// the reader.
293245
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p);
294246

295-
/// Creates a SparseTensorWriter for outputing a sparse tensor to a file with
296-
/// the given file name. When the file name is empty, std::cout is used.
297-
//
298-
// Only the extended FROSTT format is supported currently.
247+
/// Creates a SparseTensorWriter for outputting a sparse tensor to a file
248+
/// with the given file name. When the file name is empty, std::cout is used.
249+
/// Only the extended FROSTT format is supported currently.
299250
MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorWriter(char *filename);
300251

301252
/// Finalizes the outputing of a sparse tensor to a file and releases the

mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp

Lines changed: 0 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -108,112 +108,6 @@ class SparseTensorIterator final {
108108
const typename SparseTensorCOO<V>::const_iterator end;
109109
};
110110

111-
// TODO: When using this library from MLIR, the `toMLIRSparseTensor`/
112-
// `IMPL_CONVERTTOMLIRSPARSETENSOR` and `fromMLIRSparseTensor`/
113-
// `IMPL_CONVERTFROMMLIRSPARSETENSOR` constructs will be codegened away;
114-
// therefore, these functions are only used by PyTACO, one place in the
115-
// Python integration tests, and possibly by out-of-tree projects.
116-
// This is notable because neither function can be easily generalized
117-
// to handle non-permutations. In particular, while we could adjust
118-
// the functions to take all the arguments they'd need, that would just
119-
// push the problem into client code. So if we want to generalize these
120-
// functions to support non-permutations, we'll need to figure out how
121-
// to do so without putting undue burden on clients.
122-
123-
/// Initializes sparse tensor from an external COO-flavored format.
124-
/// The `rank` argument is both dimension-rank and level-rank, and the
125-
/// `dim2lvl` argument must be a permutation.
126-
/// Used by `IMPL_CONVERTTOMLIRSPARSETENSOR`.
127-
//
128-
// TODO: generalize beyond 64-bit overhead types.
129-
template <typename V>
130-
static SparseTensorStorage<uint64_t, uint64_t, V> *
131-
toMLIRSparseTensor(uint64_t rank, uint64_t nse, const uint64_t *dimSizes,
132-
const V *values, const uint64_t *dimCoordinates,
133-
const uint64_t *dim2lvl, const DimLevelType *lvlTypes) {
134-
#ifndef NDEBUG
135-
// Verify that the sparsity values are supported.
136-
// TODO: update this check to match what we actually support.
137-
for (uint64_t i = 0; i < rank; ++i)
138-
if (lvlTypes[i] != DimLevelType::Dense &&
139-
lvlTypes[i] != DimLevelType::Compressed)
140-
MLIR_SPARSETENSOR_FATAL("unsupported level type: %d\n",
141-
static_cast<uint8_t>(lvlTypes[i]));
142-
#endif
143-
// Verify that `dim2lvl` is a permutation of `[0..(rank-1)]`.
144-
// NOTE: The construction of `lvlSizes` and `lvl2dim` don't generalize
145-
// to arbitrary `dim2lvl` mappings. Whereas constructing `lvlCoords` from
146-
// `dimCoords` does (though the details would have to be updated, just
147-
// like for `IMPL_ADDELT`).
148-
const detail::PermutationRef d2l(rank, dim2lvl);
149-
// Convert external format to internal COO.
150-
const auto lvlSizes = d2l.pushforward(rank, dimSizes);
151-
auto *lvlCOO = new SparseTensorCOO<V>(lvlSizes, nse);
152-
std::vector<uint64_t> lvlCoords(rank);
153-
const uint64_t *dimCoords = dimCoordinates;
154-
for (uint64_t i = 0; i < nse; ++i) {
155-
d2l.pushforward(rank, dimCoords, lvlCoords.data());
156-
lvlCOO->add(lvlCoords, values[i]);
157-
dimCoords += rank;
158-
}
159-
// Return sparse tensor storage format as opaque pointer.
160-
const auto lvl2dim = d2l.inverse();
161-
auto *tensor = SparseTensorStorage<uint64_t, uint64_t, V>::newFromCOO(
162-
rank, dimSizes, rank, lvlTypes, lvl2dim.data(), *lvlCOO);
163-
delete lvlCOO;
164-
return tensor;
165-
}
166-
167-
/// Converts a sparse tensor to an external COO-flavored format.
168-
/// Used by `IMPL_CONVERTFROMMLIRSPARSETENSOR`.
169-
//
170-
// TODO: Currently, values are copied from SparseTensorStorage to
171-
// SparseTensorCOO, then to the output. We may want to reduce the number
172-
// of copies.
173-
//
174-
// TODO: generalize beyond 64-bit overhead types, no dim ordering,
175-
// all dimensions compressed
176-
template <typename V>
177-
static void
178-
fromMLIRSparseTensor(const SparseTensorStorage<uint64_t, uint64_t, V> *tensor,
179-
uint64_t *pRank, uint64_t *pNse, uint64_t **pShape,
180-
V **pValues, uint64_t **pCoordinates) {
181-
assert(tensor && "Received nullptr for tensor");
182-
const uint64_t dimRank = tensor->getDimRank();
183-
const auto &dimSizes = tensor->getDimSizes();
184-
std::vector<uint64_t> identityPerm(dimRank);
185-
std::iota(identityPerm.begin(), identityPerm.end(), 0);
186-
SparseTensorCOO<V> *coo =
187-
tensor->toCOO(dimRank, dimSizes.data(), dimRank, identityPerm.data());
188-
189-
const std::vector<Element<V>> &elements = coo->getElements();
190-
const uint64_t nse = elements.size();
191-
192-
const auto &cooSizes = coo->getDimSizes();
193-
assert(cooSizes.size() == dimRank && "Rank mismatch");
194-
uint64_t *dimShape = new uint64_t[dimRank];
195-
std::memcpy(static_cast<void *>(dimShape),
196-
static_cast<const void *>(cooSizes.data()),
197-
sizeof(uint64_t) * dimRank);
198-
199-
V *values = new V[nse];
200-
uint64_t *coordinates = new uint64_t[dimRank * nse];
201-
202-
for (uint64_t i = 0, base = 0; i < nse; ++i) {
203-
values[i] = elements[i].value;
204-
for (uint64_t d = 0; d < dimRank; ++d)
205-
coordinates[base + d] = elements[i].coords[d];
206-
base += dimRank;
207-
}
208-
209-
delete coo;
210-
*pRank = dimRank;
211-
*pNse = nse;
212-
*pShape = dimShape;
213-
*pValues = values;
214-
*pCoordinates = coordinates;
215-
}
216-
217111
//===----------------------------------------------------------------------===//
218112
//
219113
// Utilities for manipulating `StridedMemRefType`.
@@ -863,29 +757,6 @@ void readSparseTensorShape(char *filename, std::vector<uint64_t> *out) {
863757
out->assign(dimSizes, dimSizes + dimRank);
864758
}
865759

866-
// We can't use `static_cast` here because `DimLevelType` is an enum-class.
867-
#define IMPL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \
868-
void *convertToMLIRSparseTensor##VNAME( \
869-
uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \
870-
uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes) { \
871-
return toMLIRSparseTensor<V>(rank, nse, dimSizes, values, dimCoordinates, \
872-
dim2lvl, \
873-
reinterpret_cast<DimLevelType *>(lvlTypes)); \
874-
}
875-
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
876-
#undef IMPL_CONVERTTOMLIRSPARSETENSOR
877-
878-
#define IMPL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \
879-
void convertFromMLIRSparseTensor##VNAME( \
880-
void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \
881-
V **pValues, uint64_t **pCoordinates) { \
882-
fromMLIRSparseTensor<V>( \
883-
static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor), \
884-
pRank, pNse, pShape, pValues, pCoordinates); \
885-
}
886-
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
887-
#undef IMPL_CONVERTFROMMLIRSPARSETENSOR
888-
889760
index_type getSparseTensorReaderRank(void *p) {
890761
return static_cast<SparseTensorReader *>(p)->getRank();
891762
}

0 commit comments

Comments
 (0)