Skip to content

Commit 0500c93

Browse files
authored
[mlir][sparse] update doc of sparse tensor storage-layout/descriptor (llvm#71249)
This prepares actual "direct IR codegen" for loose compressed and 2:4. Also bit of cleanup of stale TODOs
1 parent b53c04a commit 0500c93

File tree

3 files changed

+29
-44
lines changed

3 files changed

+29
-44
lines changed

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,22 @@ namespace sparse_tensor {
2121

2222
///===----------------------------------------------------------------------===//
2323
/// The sparse tensor storage scheme for a tensor is organized as a single
24-
/// compound type with the following fields. Note that every memref with ? size
25-
/// actually behaves as a "vector", i.e. the stored size is the capacity and the
26-
/// used size resides in the storage_specifier struct.
24+
/// compound type with the following fields. Note that every memref with `?`
25+
/// size actually behaves as a "vector", i.e. the stored size is the capacity
26+
/// and the used size resides in the storage_specifier struct.
2727
///
2828
/// struct {
2929
/// ; per-level l:
3030
/// ; if dense:
3131
/// <nothing>
32-
/// ; if compresed:
33-
/// memref<? x pos> positions-l ; positions for sparse level l
34-
/// memref<? x crd> coordinates-l ; coordinates for sparse level l
35-
/// ; if singleton:
36-
/// memref<? x crd> coordinates-l ; coordinates for singleton level l
32+
/// ; if compressed:
33+
/// memref<? x pos> positions ; positions for level l
34+
/// memref<? x crd> coordinates ; coordinates for level l
35+
/// ; if loose-compressed:
36+
/// memref<? x pos> positions ; lo/hi position pairs for level l
37+
/// memref<? x crd> coordinates ; coordinates for level l
38+
/// ; if singleton/2-out-of-4:
39+
/// memref<? x crd> coordinates ; coordinates for level l
3740
///
3841
/// memref<? x eltType> values ; values
3942
///
@@ -59,25 +62,25 @@ namespace sparse_tensor {
5962
/// Examples.
6063
///
6164
/// #CSR storage of 2-dim matrix yields
62-
/// memref<?xindex> ; positions-1
63-
/// memref<?xindex> ; coordinates-1
64-
/// memref<?xf64> ; values
65-
/// struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes
65+
/// memref<?xindex> ; positions-1
66+
/// memref<?xindex> ; coordinates-1
67+
/// memref<?xf64> ; values
68+
/// struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes
6669
///
6770
/// #COO storage of 2-dim matrix yields
68-
/// memref<?xindex>, ; positions-0, essentially
69-
/// [0,sz] memref<?xindex> ; AOS coordinates storage
70-
/// memref<?xf64> ; values
71-
/// struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes
71+
/// memref<?xindex>, ; positions-0, essentially [0,sz]
72+
/// memref<?xindex> ; AOS coordinates storage
73+
/// memref<?xf64> ; values
74+
/// struct<(array<2 x i64>, array<3 x i64>)>) ; lvl0, lvl1, 3xsizes
7275
///
7376
/// Slice on #COO storage of 2-dim matrix yields
74-
/// ;; Inherited from the original sparse tensors
75-
/// memref<?xindex>, ; positions-0, essentially
76-
/// [0,sz] memref<?xindex> ; AOS coordinates storage
77-
/// memref<?xf64> ; values
78-
/// struct<(array<2 x i64>, array<3 x i64>, ; lvl0, lvl1, 3xsizes
79-
/// ;; Extra slicing-metadata
80-
/// array<2 x i64>, array<2 x i64>)>) ; dim offset, dim stride.
77+
/// ;; Inherited from the original sparse tensors
78+
/// memref<?xindex>, ; positions-0, essentially [0,sz]
79+
/// memref<?xindex> ; AOS coordinates storage
80+
/// memref<?xf64> ; values
81+
/// struct<(array<2 x i64>, array<3 x i64>, ; lvl0, lvl1, 3xsizes
82+
/// ;; Extra slicing-metadata
83+
/// array<2 x i64>, array<2 x i64>)>) ; dim offset, dim stride.
8184
///
8285
///===----------------------------------------------------------------------===//
8386

@@ -107,9 +110,6 @@ using FieldIndex = unsigned;
107110
/// encoding.
108111
class StorageLayout {
109112
public:
110-
// TODO: Functions/methods marked with [NUMFIELDS] should use
111-
// `FieldIndex` for their return type, via the same reasoning for why
112-
// `Dimension`/`Level` are used both for identifiers and ranks.
113113
explicit StorageLayout(const SparseTensorType &stt)
114114
: StorageLayout(stt.getEncoding()) {}
115115
explicit StorageLayout(SparseTensorEncodingAttr enc) : enc(enc) {
@@ -154,12 +154,10 @@ class StorageLayout {
154154
// Wrapper functions to invoke StorageLayout-related method.
155155
//
156156

157-
// See note [NUMFIELDS].
158157
inline unsigned getNumFieldsFromEncoding(SparseTensorEncodingAttr enc) {
159158
return StorageLayout(enc).getNumFields();
160159
}
161160

162-
// See note [NUMFIELDS].
163161
inline unsigned getNumDataFieldsFromEncoding(SparseTensorEncodingAttr enc) {
164162
return StorageLayout(enc).getNumDataFields();
165163
}

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===- SparseTensorStorageLayout.cpp --------------------------------------===//
1+
//===- SparseTensorDescriptor.cpp -----------------------------------------===//
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.h

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===- SparseTensorDescriptor.h ------------------------------*- C++ -*-===//
1+
//===- SparseTensorDescriptor.h ---------------------------------*- C++ -*-===//
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.
@@ -21,13 +21,6 @@
2121
namespace mlir {
2222
namespace sparse_tensor {
2323

24-
//===----------------------------------------------------------------------===//
25-
// SparseTensorDescriptor and helpers that manage the sparse tensor memory
26-
// layout scheme during "direct code generation" (i.e. when sparsification
27-
// generates the buffers as part of actual IR, in constrast with the library
28-
// approach where data structures are hidden behind opaque pointers).
29-
//===----------------------------------------------------------------------===//
30-
3124
class SparseTensorSpecifier {
3225
public:
3326
explicit SparseTensorSpecifier(Value specifier)
@@ -57,9 +50,6 @@ class SparseTensorSpecifier {
5750
template <typename ValueArrayRef>
5851
class SparseTensorDescriptorImpl {
5952
protected:
60-
// TODO: Functions/methods marked with [NUMFIELDS] might should use
61-
// `FieldIndex` for their return type, via the same reasoning for why
62-
// `Dimension`/`Level` are used both for identifiers and ranks.
6353
SparseTensorDescriptorImpl(SparseTensorType stt, ValueArrayRef fields)
6454
: rType(stt), fields(fields), layout(stt) {
6555
assert(layout.getNumFields() == getNumFields());
@@ -76,7 +66,6 @@ class SparseTensorDescriptorImpl {
7666
return layout.getMemRefFieldIndex(kind, lvl);
7767
}
7868

79-
// TODO: See note [NUMFIELDS].
8069
unsigned getNumFields() const { return fields.size(); }
8170

8271
///
@@ -140,8 +129,7 @@ class SparseTensorDescriptorImpl {
140129
}
141130

142131
ValueRange getMemRefFields() const {
143-
// Drop the last metadata fields.
144-
return fields.drop_back();
132+
return fields.drop_back(); // drop the last metadata fields
145133
}
146134

147135
std::pair<FieldIndex, unsigned> getCrdMemRefIndexAndStride(Level lvl) const {
@@ -173,7 +161,6 @@ class SparseTensorDescriptor : public SparseTensorDescriptorImpl<ValueRange> {
173161
Value getCrdMemRefOrView(OpBuilder &builder, Location loc, Level lvl) const;
174162
};
175163

176-
/// Uses SmallVectorImpl<Value> & for mutable descriptors.
177164
/// Using SmallVector for mutable descriptor allows users to reuse it as a
178165
/// tmp buffers to append value for some special cases, though users should
179166
/// be responsible to restore the buffer to legal states after their use. It

0 commit comments

Comments
 (0)