Skip to content

[mlir][sparse] deprecate the convert{To,From}MLIRSparseTensor methods #66304

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 0 additions & 16 deletions mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,29 +88,13 @@ class PermutationRef final {
return out;
}

// NOTE: This form of the method is required by `toMLIRSparseTensor`,
// so it can reuse the `out` buffer for each iteration of a loop.
template <typename T>
inline void pushforward(uint64_t size, const T *values, T *out) const {
assert(size == permSize && "size mismatch");
for (uint64_t i = 0; i < permSize; ++i)
out[perm[i]] = values[i];
}

// NOTE: this is only needed by `toMLIRSparseTensor`, which in
// turn only needs it as a vector to hand off to `newSparseTensor`.
// Otherwise we would want the result to be an owning-permutation,
// to retain the knowledge that `isPermutation` is true.
//
/// Constructs the inverse permutation. This is equivalent to calling
/// `pushforward` with `std::iota` for the values.
inline std::vector<uint64_t> inverse() const {
std::vector<uint64_t> out(permSize);
for (uint64_t i = 0; i < permSize; ++i)
out[perm[i]] = i;
return out;
}

/// Constructs a permuted array of values. This method is the inverse
/// of `pushforward` in the sense that for all `p` and `xs` we have:
/// * `p.permute(p.pushforward(xs)) == xs`
Expand Down
55 changes: 3 additions & 52 deletions mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -227,54 +227,6 @@ MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id);
MLIR_CRUNNERUTILS_EXPORT void readSparseTensorShape(char *filename,
std::vector<uint64_t> *out);

/// Initializes sparse tensor from a COO-flavored format expressed using
/// C-style data structures. The expected parameters are:
///
/// rank: rank of tensor
/// nse: number of specified elements (usually the nonzeros)
/// shape: array with dimension size for each rank
/// values: a "nse" array with values for all specified elements
/// coordinates: a flat "nse * rank" array with coordinates for all
/// specified elements
/// perm: the permutation of the levels in the storage
/// sparse: the sparsity for the levels
///
/// For example, the sparse matrix
/// | 1.0 0.0 0.0 |
/// | 0.0 5.0 3.0 |
/// can be passed as
/// rank = 2
/// nse = 3
/// shape = [2, 3]
/// values = [1.0, 5.0, 3.0]
/// coordinates = [ 0, 0, 1, 1, 1, 2]
#define DECL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \
MLIR_CRUNNERUTILS_EXPORT void *convertToMLIRSparseTensor##VNAME( \
uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \
uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTTOMLIRSPARSETENSOR)
#undef DECL_CONVERTTOMLIRSPARSETENSOR

/// Converts a sparse tensor to COO-flavored format expressed using
/// C-style data structures. The expected output parameters are pointers
/// for these values:
///
/// rank: rank of tensor
/// nse: number of specified elements (usually the nonzeros)
/// shape: array with size for each dimension
/// values: a "nse" array with values for all specified elements
/// coordinates: a flat "nse * rank" array with coordinates for all
/// specified elements
///
/// The input is a pointer to `SparseTensorStorage<P, C, V>`, typically
/// returned from `convertToMLIRSparseTensor`.
#define DECL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \
MLIR_CRUNNERUTILS_EXPORT void convertFromMLIRSparseTensor##VNAME( \
void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \
V **pValues, uint64_t **pCoordinates);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_CONVERTFROMMLIRSPARSETENSOR)
#undef DECL_CONVERTFROMMLIRSPARSETENSOR

/// Returns the rank of the sparse tensor being read.
MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p);

Expand All @@ -292,10 +244,9 @@ MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderDimSize(void *p,
/// the reader.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorReader(void *p);

/// Creates a SparseTensorWriter for outputing a sparse tensor to a file with
/// the given file name. When the file name is empty, std::cout is used.
//
// Only the extended FROSTT format is supported currently.
/// Creates a SparseTensorWriter for outputting a sparse tensor to a file
/// with the given file name. When the file name is empty, std::cout is used.
/// Only the extended FROSTT format is supported currently.
MLIR_CRUNNERUTILS_EXPORT void *createSparseTensorWriter(char *filename);

/// Finalizes the outputing of a sparse tensor to a file and releases the
Expand Down
129 changes: 0 additions & 129 deletions mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,112 +108,6 @@ class SparseTensorIterator final {
const typename SparseTensorCOO<V>::const_iterator end;
};

// TODO: When using this library from MLIR, the `toMLIRSparseTensor`/
// `IMPL_CONVERTTOMLIRSPARSETENSOR` and `fromMLIRSparseTensor`/
// `IMPL_CONVERTFROMMLIRSPARSETENSOR` constructs will be codegened away;
// therefore, these functions are only used by PyTACO, one place in the
// Python integration tests, and possibly by out-of-tree projects.
// This is notable because neither function can be easily generalized
// to handle non-permutations. In particular, while we could adjust
// the functions to take all the arguments they'd need, that would just
// push the problem into client code. So if we want to generalize these
// functions to support non-permutations, we'll need to figure out how
// to do so without putting undue burden on clients.

/// Initializes sparse tensor from an external COO-flavored format.
/// The `rank` argument is both dimension-rank and level-rank, and the
/// `dim2lvl` argument must be a permutation.
/// Used by `IMPL_CONVERTTOMLIRSPARSETENSOR`.
//
// TODO: generalize beyond 64-bit overhead types.
template <typename V>
static SparseTensorStorage<uint64_t, uint64_t, V> *
toMLIRSparseTensor(uint64_t rank, uint64_t nse, const uint64_t *dimSizes,
const V *values, const uint64_t *dimCoordinates,
const uint64_t *dim2lvl, const DimLevelType *lvlTypes) {
#ifndef NDEBUG
// Verify that the sparsity values are supported.
// TODO: update this check to match what we actually support.
for (uint64_t i = 0; i < rank; ++i)
if (lvlTypes[i] != DimLevelType::Dense &&
lvlTypes[i] != DimLevelType::Compressed)
MLIR_SPARSETENSOR_FATAL("unsupported level type: %d\n",
static_cast<uint8_t>(lvlTypes[i]));
#endif
// Verify that `dim2lvl` is a permutation of `[0..(rank-1)]`.
// NOTE: The construction of `lvlSizes` and `lvl2dim` don't generalize
// to arbitrary `dim2lvl` mappings. Whereas constructing `lvlCoords` from
// `dimCoords` does (though the details would have to be updated, just
// like for `IMPL_ADDELT`).
const detail::PermutationRef d2l(rank, dim2lvl);
// Convert external format to internal COO.
const auto lvlSizes = d2l.pushforward(rank, dimSizes);
auto *lvlCOO = new SparseTensorCOO<V>(lvlSizes, nse);
std::vector<uint64_t> lvlCoords(rank);
const uint64_t *dimCoords = dimCoordinates;
for (uint64_t i = 0; i < nse; ++i) {
d2l.pushforward(rank, dimCoords, lvlCoords.data());
lvlCOO->add(lvlCoords, values[i]);
dimCoords += rank;
}
// Return sparse tensor storage format as opaque pointer.
const auto lvl2dim = d2l.inverse();
auto *tensor = SparseTensorStorage<uint64_t, uint64_t, V>::newFromCOO(
rank, dimSizes, rank, lvlTypes, lvl2dim.data(), *lvlCOO);
delete lvlCOO;
return tensor;
}

/// Converts a sparse tensor to an external COO-flavored format.
/// Used by `IMPL_CONVERTFROMMLIRSPARSETENSOR`.
//
// TODO: Currently, values are copied from SparseTensorStorage to
// SparseTensorCOO, then to the output. We may want to reduce the number
// of copies.
//
// TODO: generalize beyond 64-bit overhead types, no dim ordering,
// all dimensions compressed
template <typename V>
static void
fromMLIRSparseTensor(const SparseTensorStorage<uint64_t, uint64_t, V> *tensor,
uint64_t *pRank, uint64_t *pNse, uint64_t **pShape,
V **pValues, uint64_t **pCoordinates) {
assert(tensor && "Received nullptr for tensor");
const uint64_t dimRank = tensor->getDimRank();
const auto &dimSizes = tensor->getDimSizes();
std::vector<uint64_t> identityPerm(dimRank);
std::iota(identityPerm.begin(), identityPerm.end(), 0);
SparseTensorCOO<V> *coo =
tensor->toCOO(dimRank, dimSizes.data(), dimRank, identityPerm.data());

const std::vector<Element<V>> &elements = coo->getElements();
const uint64_t nse = elements.size();

const auto &cooSizes = coo->getDimSizes();
assert(cooSizes.size() == dimRank && "Rank mismatch");
uint64_t *dimShape = new uint64_t[dimRank];
std::memcpy(static_cast<void *>(dimShape),
static_cast<const void *>(cooSizes.data()),
sizeof(uint64_t) * dimRank);

V *values = new V[nse];
uint64_t *coordinates = new uint64_t[dimRank * nse];

for (uint64_t i = 0, base = 0; i < nse; ++i) {
values[i] = elements[i].value;
for (uint64_t d = 0; d < dimRank; ++d)
coordinates[base + d] = elements[i].coords[d];
base += dimRank;
}

delete coo;
*pRank = dimRank;
*pNse = nse;
*pShape = dimShape;
*pValues = values;
*pCoordinates = coordinates;
}

//===----------------------------------------------------------------------===//
//
// Utilities for manipulating `StridedMemRefType`.
Expand Down Expand Up @@ -863,29 +757,6 @@ void readSparseTensorShape(char *filename, std::vector<uint64_t> *out) {
out->assign(dimSizes, dimSizes + dimRank);
}

// We can't use `static_cast` here because `DimLevelType` is an enum-class.
#define IMPL_CONVERTTOMLIRSPARSETENSOR(VNAME, V) \
void *convertToMLIRSparseTensor##VNAME( \
uint64_t rank, uint64_t nse, uint64_t *dimSizes, V *values, \
uint64_t *dimCoordinates, uint64_t *dim2lvl, uint8_t *lvlTypes) { \
return toMLIRSparseTensor<V>(rank, nse, dimSizes, values, dimCoordinates, \
dim2lvl, \
reinterpret_cast<DimLevelType *>(lvlTypes)); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTTOMLIRSPARSETENSOR)
#undef IMPL_CONVERTTOMLIRSPARSETENSOR

#define IMPL_CONVERTFROMMLIRSPARSETENSOR(VNAME, V) \
void convertFromMLIRSparseTensor##VNAME( \
void *tensor, uint64_t *pRank, uint64_t *pNse, uint64_t **pShape, \
V **pValues, uint64_t **pCoordinates) { \
fromMLIRSparseTensor<V>( \
static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor), \
pRank, pNse, pShape, pValues, pCoordinates); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_CONVERTFROMMLIRSPARSETENSOR)
#undef IMPL_CONVERTFROMMLIRSPARSETENSOR

index_type getSparseTensorReaderRank(void *p) {
return static_cast<SparseTensorReader *>(p)->getRank();
}
Expand Down