Skip to content

[mlir][sparse] remove unused sparse tensor iterator #68951

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,8 @@ enum class Action : uint32_t {
kEmptyForward = 1,
kFromCOO = 2,
kSparseToSparse = 3,
kFuture = 4, // not used
kToCOO = 5,
kToIterator = 6,
kPack = 7,
// Sort an unordered COO in place.
kSortCOOInPlace = 8,
};

Expand Down
22 changes: 4 additions & 18 deletions mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,18 @@ extern "C" {

/// This is the "swiss army knife" method for materializing sparse
/// tensors into the computation. The types of the `ptr` argument and
/// the result depend on the action, as explained in the following table
/// (where "STS" means a sparse-tensor-storage object, "COO" means
/// a coordinate-scheme object, and "Iterator" means an iterator object).
/// the result depend on the action, as explained in the following table,
/// where "STS" means a sparse-tensor-storage object and "COO" means
/// a coordinate-scheme object.
///
/// Action: `ptr`: Returns:
/// kEmpty - STS, empty
/// kEmptyForward - STS, empty, with forwarding COO
/// kFromCOO COO STS, copied from the COO source
/// kSparseToSparse STS STS, copied from the STS source
/// kToCOO STS COO, copied from the STS source
/// kToIterator STS Iterator (@getNext/@delSparseTensorIterator)
/// kPack buffers STS, from level buffers
/// kSortCOOInPlace STS STS, sorted in place
MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT
StridedMemRefType<index_type, 1> *dimSizesRef,
StridedMemRefType<index_type, 1> *lvlSizesRef,
Expand Down Expand Up @@ -90,14 +90,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT)
#undef DECL_FORWARDINGINSERT

/// Coordinate-scheme method for getting the next element while iterating.
#define DECL_GETNEXT(VNAME, V) \
MLIR_CRUNNERUTILS_EXPORT bool _mlir_ciface_getNext##VNAME( \
void *iter, StridedMemRefType<index_type, 1> *cref, \
StridedMemRefType<V, 0> *vref);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETNEXT)
#undef DECL_GETNEXT

/// Tensor-storage method to insert elements in lexicographical
/// level-coordinate order.
#define DECL_LEXINSERT(VNAME, V) \
Expand Down Expand Up @@ -201,12 +193,6 @@ MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO)
#undef DECL_DELCOO

/// Releases the memory for an iterator object.
#define DECL_DELITER(VNAME, V) \
MLIR_CRUNNERUTILS_EXPORT void delSparseTensorIterator##VNAME(void *iter);
MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELITER)
#undef DECL_DELITER

/// Helper function to read a sparse tensor filename from the environment,
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id);
Expand Down
109 changes: 3 additions & 106 deletions mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,71 +63,18 @@ using namespace mlir::sparse_tensor;

//===----------------------------------------------------------------------===//
//
// Implementation details for public functions, which don't have a good
// place to live in the C++ library this file is wrapping.
// Utilities for manipulating `StridedMemRefType`.
//
//===----------------------------------------------------------------------===//

namespace {

/// Wrapper class to avoid memory leakage issues. The `SparseTensorCOO<V>`
/// class provides a standard C++ iterator interface, where the iterator
/// is implemented as per `std::vector`'s iterator. However, for MLIR's
/// usage we need to have an iterator which also holds onto the underlying
/// `SparseTensorCOO<V>` so that it can be freed whenever the iterator
/// is freed.
//
// We name this `SparseTensorIterator` rather than `SparseTensorCOOIterator`
// for future-proofing, since the use of `SparseTensorCOO` is an
// implementation detail that we eventually want to change (e.g., to
// use `SparseTensorEnumerator` directly, rather than constructing the
// intermediate `SparseTensorCOO` at all).
template <typename V>
class SparseTensorIterator final {
public:
/// This ctor requires `coo` to be a non-null pointer to a dynamically
/// allocated object, and takes ownership of that object. Therefore,
/// callers must not free the underlying COO object, since the iterator's
/// dtor will do so.
explicit SparseTensorIterator(const SparseTensorCOO<V> *coo)
: coo(coo), it(coo->begin()), end(coo->end()) {}

~SparseTensorIterator() { delete coo; }

// Disable copy-ctor and copy-assignment, to prevent double-free.
SparseTensorIterator(const SparseTensorIterator<V> &) = delete;
SparseTensorIterator<V> &operator=(const SparseTensorIterator<V> &) = delete;

/// Gets the next element. If there are no remaining elements, then
/// returns nullptr.
const Element<V> *getNext() { return it < end ? &*it++ : nullptr; }

private:
const SparseTensorCOO<V> *const coo; // Owning pointer.
typename SparseTensorCOO<V>::const_iterator it;
const typename SparseTensorCOO<V>::const_iterator end;
};

//===----------------------------------------------------------------------===//
//
// Utilities for manipulating `StridedMemRefType`.
//
//===----------------------------------------------------------------------===//

// We shouldn't need to use `detail::safelyEQ` here since the `1` is a literal.
#define ASSERT_NO_STRIDE(MEMREF) \
do { \
assert((MEMREF) && "Memref is nullptr"); \
assert(((MEMREF)->strides[0] == 1) && "Memref has non-trivial stride"); \
} while (false)

// All our functions use `uint64_t` for ranks, but `StridedMemRefType::sizes`
// uses `int64_t` on some platforms. So we explicitly cast this lookup to
// ensure we get a consistent type, and we use `checkOverflowCast` rather
// than `static_cast` just to be extremely sure that the casting can't
// go awry. (The cast should aways be safe since (1) sizes should never
// be negative, and (2) the maximum `int64_t` is smaller than the maximum
// `uint64_t`. But it's better to be safe than sorry.)
#define MEMREF_GET_USIZE(MEMREF) \
detail::checkOverflowCast<uint64_t>((MEMREF)->sizes[0])

Expand All @@ -137,22 +84,13 @@ class SparseTensorIterator final {

#define MEMREF_GET_PAYLOAD(MEMREF) ((MEMREF)->data + (MEMREF)->offset)

/// Initializes the memref with the provided size and data pointer. This
/// Initializes the memref with the provided size and data pointer. This
/// is designed for functions which want to "return" a memref that aliases
/// into memory owned by some other object (e.g., `SparseTensorStorage`),
/// without doing any actual copying. (The "return" is in scarequotes
/// because the `_mlir_ciface_` calling convention migrates any returned
/// memrefs into an out-parameter passed before all the other function
/// parameters.)
///
/// We make this a function rather than a macro mainly for type safety
/// reasons. This function does not modify the data pointer, but it
/// cannot be marked `const` because it is stored into the (necessarily)
/// non-`const` memref. This function is templated over the `DataSizeT`
/// to work around signedness warnings due to many data types having
/// varying signedness across different platforms. The templating allows
/// this function to ensure that it does the right thing and never
/// introduces errors due to implicit conversions.
template <typename DataSizeT, typename T>
static inline void aliasIntoMemref(DataSizeT size, T *data,
StridedMemRefType<T, 1> &ref) {
Expand Down Expand Up @@ -200,20 +138,11 @@ extern "C" {
dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
dimRank, tensor); \
} \
case Action::kFuture: { \
break; \
} \
case Action::kToCOO: { \
assert(ptr && "Received nullptr for SparseTensorStorage object"); \
auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \
return tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl, lvl2dim); \
} \
case Action::kToIterator: { \
assert(ptr && "Received nullptr for SparseTensorStorage object"); \
auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \
auto *coo = tensor.toCOO(lvlRank, lvlSizes, dimRank, dim2lvl, lvl2dim); \
return new SparseTensorIterator<V>(coo); \
} \
case Action::kPack: { \
assert(ptr && "Received nullptr for SparseTensorStorage object"); \
intptr_t *buffers = static_cast<intptr_t *>(ptr); \
Expand Down Expand Up @@ -372,7 +301,6 @@ void *_mlir_ciface_newSparseTensor( // NOLINT
CASE_SECSAME(OverheadType::kU64, PrimaryType::kC32, uint64_t, complex32);

// Unsupported case (add above if needed).
// TODO: better pretty-printing of enum values!
MLIR_SPARSETENSOR_FATAL(
"unsupported combination of types: <P=%d, C=%d, V=%d>\n",
static_cast<int>(posTp), static_cast<int>(crdTp),
Expand Down Expand Up @@ -428,29 +356,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES)
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT)
#undef IMPL_FORWARDINGINSERT

// NOTE: the `cref` argument uses the same coordinate-space as the `iter`
// (which can be either dim- or lvl-coords, depending on context).
#define IMPL_GETNEXT(VNAME, V) \
bool _mlir_ciface_getNext##VNAME(void *iter, \
StridedMemRefType<index_type, 1> *cref, \
StridedMemRefType<V, 0> *vref) { \
assert(iter &&vref); \
ASSERT_NO_STRIDE(cref); \
index_type *coords = MEMREF_GET_PAYLOAD(cref); \
V *value = MEMREF_GET_PAYLOAD(vref); \
const uint64_t rank = MEMREF_GET_USIZE(cref); \
const Element<V> *elem = \
static_cast<SparseTensorIterator<V> *>(iter)->getNext(); \
if (elem == nullptr) \
return false; \
for (uint64_t d = 0; d < rank; d++) \
coords[d] = elem->coords[d]; \
*value = elem->value; \
return true; \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETNEXT)
#undef IMPL_GETNEXT

#define IMPL_LEXINSERT(VNAME, V) \
void _mlir_ciface_lexInsert##VNAME( \
void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \
Expand Down Expand Up @@ -636,7 +541,6 @@ void *_mlir_ciface_newSparseTensorFromReader(
CASE_SECSAME(kU64, kC32, uint64_t, complex32);

// Unsupported case (add above if needed).
// TODO: better pretty-printing of enum values!
MLIR_SPARSETENSOR_FATAL(
"unsupported combination of types: <P=%d, C=%d, V=%d>\n",
static_cast<int>(posTp), static_cast<int>(crdTp),
Expand Down Expand Up @@ -701,7 +605,7 @@ void endLexInsert(void *tensor) {

#define IMPL_OUTSPARSETENSOR(VNAME, V) \
void outSparseTensor##VNAME(void *coo, void *dest, bool sort) { \
assert(coo && "Got nullptr for COO object"); \
assert(coo); \
auto &coo_ = *static_cast<SparseTensorCOO<V> *>(coo); \
if (sort) \
coo_.sort(); \
Expand All @@ -721,13 +625,6 @@ void delSparseTensor(void *tensor) {
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO)
#undef IMPL_DELCOO

#define IMPL_DELITER(VNAME, V) \
void delSparseTensorIterator##VNAME(void *iter) { \
delete static_cast<SparseTensorIterator<V> *>(iter); \
}
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELITER)
#undef IMPL_DELITER

char *getTensorFilename(index_type id) {
constexpr size_t BUF_SIZE = 80;
char var[BUF_SIZE];
Expand Down