Skip to content

Commit 0bfaa30

Browse files
committed
[mlir][sparse] implement singleton dimension level type
This is a first step towards fully implementing the new dimension level types and properties, illustrating with a fully functional sorted COO of any dimension. Note that the sparsification part is pretty complete. The required parts in the runtime support library have been kept to a minimum, to avoid huge conflicts with Wren's ongoing refactoring. The missing parts will be filled in later. Reviewed By: Peiming Differential Revision: https://reviews.llvm.org/D134096
1 parent 3716851 commit 0bfaa30

File tree

2 files changed

+326
-0
lines changed

2 files changed

+326
-0
lines changed
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
// RUN: mlir-opt %s -sparsification | FileCheck %s
2+
3+
#SortedCOO = #sparse_tensor.encoding<{
4+
dimLevelType = [ "compressed-nu", "singleton" ]
5+
}>
6+
7+
#trait_scale = {
8+
indexing_maps = [
9+
affine_map<(i,j) -> (i,j)> // X (out)
10+
],
11+
iterator_types = ["parallel", "parallel"],
12+
doc = "X(i,j) = X(i,j) * 2.0"
13+
}
14+
15+
#trait_matvec = {
16+
indexing_maps = [
17+
affine_map<(i,j) -> (i,j)>, // A
18+
affine_map<(i,j) -> (j)>, // b
19+
affine_map<(i,j) -> (i)> // x (out)
20+
],
21+
iterator_types = ["parallel","reduction"],
22+
doc = "x(i) += A(i,j) * b(j)"
23+
}
24+
25+
//
26+
// Two kernels that operate on SortedCOO format.
27+
//
28+
29+
// CHECK-LABEL: func.func @sparse_scale(
30+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> {
31+
// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index
32+
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
33+
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f32
34+
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
35+
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf32>
36+
// CHECK: %[[VAL_6:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex>
37+
// CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
38+
// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_2]] {
39+
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_8]]] : memref<?xf32>
40+
// CHECK: %[[VAL_10:.*]] = arith.mulf %[[VAL_9]], %[[VAL_3]] : f32
41+
// CHECK: memref.store %[[VAL_10]], %[[VAL_5]]{{\[}}%[[VAL_8]]] : memref<?xf32>
42+
// CHECK: }
43+
// CHECK: %[[VAL_11:.*]] = sparse_tensor.load %[[VAL_0]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>
44+
// CHECK: return %[[VAL_11]] : tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>
45+
// CHECK: }
46+
func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #SortedCOO> {
47+
%c = arith.constant 2.0 : f32
48+
%0 = linalg.generic #trait_scale
49+
outs(%argx: tensor<?x?xf32, #SortedCOO>) {
50+
^bb(%x: f32):
51+
%1 = arith.mulf %x, %c : f32
52+
linalg.yield %1 : f32
53+
} -> tensor<?x?xf32, #SortedCOO>
54+
return %0 : tensor<?x?xf32, #SortedCOO>
55+
}
56+
57+
// CHECK-LABEL: func.func @matvec(
58+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>,
59+
// CHECK-SAME: %[[VAL_1:.*]]: tensor<64xf64>,
60+
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
61+
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
62+
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
63+
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
64+
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
65+
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]] {dimension = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xindex>
66+
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref<?xf64>
67+
// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
68+
// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
69+
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
70+
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
71+
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_4]] {
72+
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
73+
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_14]]] : memref<32xf64>
74+
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
75+
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xf64>
76+
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_16]]] : memref<64xf64>
77+
// CHECK: %[[VAL_19:.*]] = arith.mulf %[[VAL_17]], %[[VAL_18]] : f64
78+
// CHECK: %[[VAL_20:.*]] = arith.addf %[[VAL_15]], %[[VAL_19]] : f64
79+
// CHECK: memref.store %[[VAL_20]], %[[VAL_10]]{{\[}}%[[VAL_14]]] : memref<32xf64>
80+
// CHECK: }
81+
// CHECK: %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_10]] : memref<32xf64>
82+
// CHECK: return %[[VAL_21]] : tensor<32xf64>
83+
// CHECK: }
84+
func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
85+
%argb: tensor<64xf64>,
86+
%argx: tensor<32xf64>) -> tensor<32xf64> {
87+
%0 = linalg.generic #trait_matvec
88+
ins(%arga, %argb : tensor<32x64xf64, #SortedCOO>, tensor<64xf64>)
89+
outs(%argx: tensor<32xf64>) {
90+
^bb(%A: f64, %b: f64, %x: f64):
91+
%0 = arith.mulf %A, %b : f64
92+
%1 = arith.addf %x, %0 : f64
93+
linalg.yield %1 : f64
94+
} -> tensor<32xf64>
95+
return %0 : tensor<32xf64>
96+
}
Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
// RUN: mlir-opt %s --sparse-compiler | \
2+
// RUN: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \
3+
// RUN: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \
4+
// RUN: mlir-cpu-runner \
5+
// RUN: -e entry -entry-point-result=void \
6+
// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
7+
// RUN: FileCheck %s
8+
9+
!Filename = !llvm.ptr<i8>
10+
11+
#SortedCOO = #sparse_tensor.encoding<{
12+
dimLevelType = [ "compressed-nu", "singleton" ]
13+
}>
14+
15+
#SortedCOOPermuted = #sparse_tensor.encoding<{
16+
dimLevelType = [ "compressed-nu", "singleton" ],
17+
dimOrdering = affine_map<(i,j) -> (j,i)>
18+
}>
19+
20+
#SortedCOO3D = #sparse_tensor.encoding<{
21+
dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ]
22+
}>
23+
24+
#SortedCOO3DPermuted = #sparse_tensor.encoding<{
25+
dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ],
26+
dimOrdering = affine_map<(i,j,k) -> (k,i,j)>
27+
}>
28+
29+
#trait_scale = {
30+
indexing_maps = [
31+
affine_map<(i,j) -> (i,j)> // X (out)
32+
],
33+
iterator_types = ["parallel", "parallel"],
34+
doc = "X(i,j) = X(i,j) * 2.0"
35+
}
36+
37+
//
38+
// Tests reading in matrix/tensor from file into Sorted COO formats
39+
// as well as applying various operations to this format.
40+
//
41+
module {
42+
43+
func.func private @getTensorFilename(index) -> (!Filename)
44+
45+
//
46+
// A kernel that scales a sparse matrix A by a factor of 2.0.
47+
//
48+
func.func @sparse_scale(%argx: tensor<?x?xf64, #SortedCOO>)
49+
-> tensor<?x?xf64, #SortedCOO> {
50+
%c = arith.constant 2.0 : f64
51+
%0 = linalg.generic #trait_scale
52+
outs(%argx: tensor<?x?xf64, #SortedCOO>) {
53+
^bb(%x: f64):
54+
%1 = arith.mulf %x, %c : f64
55+
linalg.yield %1 : f64
56+
} -> tensor<?x?xf64, #SortedCOO>
57+
return %0 : tensor<?x?xf64, #SortedCOO>
58+
}
59+
60+
func.func @dumpi(%arg0: memref<?xindex>) {
61+
%c0 = arith.constant 0 : index
62+
%v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex>, vector<20xindex>
63+
vector.print %v : vector<20xindex>
64+
return
65+
}
66+
67+
func.func @dumpf(%arg0: memref<?xf64>) {
68+
%c0 = arith.constant 0 : index
69+
%nan = arith.constant 0x7FF0000001000000 : f64
70+
%v = vector.transfer_read %arg0[%c0], %nan: memref<?xf64>, vector<20xf64>
71+
vector.print %v : vector<20xf64>
72+
return
73+
}
74+
75+
func.func @entry() {
76+
%c0 = arith.constant 0 : index
77+
%c1 = arith.constant 1 : index
78+
79+
%fileName0 = call @getTensorFilename(%c0) : (index) -> (!Filename)
80+
%fileName1 = call @getTensorFilename(%c1) : (index) -> (!Filename)
81+
82+
// Read the sparse tensors from file, construct sparse storage.
83+
%0 = sparse_tensor.new %fileName0 : !Filename to tensor<?x?xf64, #SortedCOO>
84+
%1 = sparse_tensor.new %fileName0 : !Filename to tensor<?x?xf64, #SortedCOOPermuted>
85+
%2 = sparse_tensor.new %fileName1 : !Filename to tensor<?x?x?xf64, #SortedCOO3D>
86+
%3 = sparse_tensor.new %fileName1 : !Filename to tensor<?x?x?xf64, #SortedCOO3DPermuted>
87+
88+
// Conversion from literal.
89+
%m = arith.constant sparse<
90+
[ [0,0], [1,3], [2,0], [2,3], [3,1], [4,1] ],
91+
[6.0, 5.0, 4.0, 3.0, 2.0, 11.0 ]
92+
> : tensor<5x4xf64>
93+
%4 = sparse_tensor.convert %m : tensor<5x4xf64> to tensor<?x?xf64, #SortedCOO>
94+
95+
//
96+
// CHECK: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
97+
// CHECK-NEXT: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0 )
98+
// CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 )
99+
// CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, nan, nan, nan )
100+
//
101+
%p0 = sparse_tensor.pointers %0 { dimension = 0 : index }
102+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
103+
%i00 = sparse_tensor.indices %0 { dimension = 0 : index }
104+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
105+
%i01 = sparse_tensor.indices %0 { dimension = 1 : index }
106+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
107+
%v0 = sparse_tensor.values %0
108+
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
109+
call @dumpi(%p0) : (memref<?xindex>) -> ()
110+
call @dumpi(%i00) : (memref<?xindex>) -> ()
111+
call @dumpi(%i01) : (memref<?xindex>) -> ()
112+
call @dumpf(%v0) : (memref<?xf64>) -> ()
113+
114+
//
115+
// CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
116+
// CHECK-NEXT: ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255, 0, 0, 0 )
117+
// CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 )
118+
// CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, nan, nan, nan )
119+
//
120+
%p1 = sparse_tensor.pointers %1 { dimension = 0 : index }
121+
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
122+
%i10 = sparse_tensor.indices %1 { dimension = 0 : index }
123+
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
124+
%i11 = sparse_tensor.indices %1 { dimension = 1 : index }
125+
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex>
126+
%v1 = sparse_tensor.values %1
127+
: tensor<?x?xf64, #SortedCOOPermuted> to memref<?xf64>
128+
call @dumpi(%p1) : (memref<?xindex>) -> ()
129+
call @dumpi(%i10) : (memref<?xindex>) -> ()
130+
call @dumpi(%i11) : (memref<?xindex>) -> ()
131+
call @dumpf(%v1) : (memref<?xf64>) -> ()
132+
133+
//
134+
// CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
135+
// CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 )
136+
// CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
137+
// CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 )
138+
// CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, nan, nan, nan )
139+
//
140+
%p2 = sparse_tensor.pointers %2 { dimension = 0 : index }
141+
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
142+
%i20 = sparse_tensor.indices %2 { dimension = 0 : index }
143+
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
144+
%i21 = sparse_tensor.indices %2 { dimension = 1 : index }
145+
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
146+
%i22 = sparse_tensor.indices %2 { dimension = 2 : index }
147+
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex>
148+
%v2 = sparse_tensor.values %2
149+
: tensor<?x?x?xf64, #SortedCOO3D> to memref<?xf64>
150+
call @dumpi(%p2) : (memref<?xindex>) -> ()
151+
call @dumpi(%i20) : (memref<?xindex>) -> ()
152+
call @dumpi(%i21) : (memref<?xindex>) -> ()
153+
call @dumpi(%i21) : (memref<?xindex>) -> ()
154+
call @dumpf(%v2) : (memref<?xf64>) -> ()
155+
156+
//
157+
// CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
158+
// CHECK-NEXT: ( 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0 )
159+
// CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
160+
// CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 )
161+
// CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, nan, nan, nan )
162+
//
163+
%p3 = sparse_tensor.pointers %3 { dimension = 0 : index }
164+
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
165+
%i30 = sparse_tensor.indices %3 { dimension = 0 : index }
166+
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
167+
%i31 = sparse_tensor.indices %3 { dimension = 1 : index }
168+
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
169+
%i32 = sparse_tensor.indices %3 { dimension = 2 : index }
170+
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex>
171+
%v3 = sparse_tensor.values %3
172+
: tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xf64>
173+
call @dumpi(%p3) : (memref<?xindex>) -> ()
174+
call @dumpi(%i30) : (memref<?xindex>) -> ()
175+
call @dumpi(%i31) : (memref<?xindex>) -> ()
176+
call @dumpi(%i31) : (memref<?xindex>) -> ()
177+
call @dumpf(%v3) : (memref<?xf64>) -> ()
178+
179+
//
180+
// CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
181+
// CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
182+
// CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
183+
// CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan )
184+
//
185+
%p4 = sparse_tensor.pointers %4 { dimension = 0 : index }
186+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
187+
%i40 = sparse_tensor.indices %4 { dimension = 0 : index }
188+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
189+
%i41 = sparse_tensor.indices %4 { dimension = 1 : index }
190+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
191+
%v4 = sparse_tensor.values %4
192+
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
193+
call @dumpi(%p4) : (memref<?xindex>) -> ()
194+
call @dumpi(%i40) : (memref<?xindex>) -> ()
195+
call @dumpi(%i41) : (memref<?xindex>) -> ()
196+
call @dumpf(%v4) : (memref<?xf64>) -> ()
197+
198+
// And last but not least, an actual operation applied to COO.
199+
// Note that this performs the operation "in place".
200+
%5 = call @sparse_scale(%4) : (tensor<?x?xf64, #SortedCOO>) -> tensor<?x?xf64, #SortedCOO>
201+
202+
//
203+
// CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
204+
// CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
205+
// CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
206+
// CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan )
207+
//
208+
%p5 = sparse_tensor.pointers %5 { dimension = 0 : index }
209+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
210+
%i50 = sparse_tensor.indices %5 { dimension = 0 : index }
211+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
212+
%i51 = sparse_tensor.indices %5 { dimension = 1 : index }
213+
: tensor<?x?xf64, #SortedCOO> to memref<?xindex>
214+
%v5 = sparse_tensor.values %5
215+
: tensor<?x?xf64, #SortedCOO> to memref<?xf64>
216+
call @dumpi(%p5) : (memref<?xindex>) -> ()
217+
call @dumpi(%i50) : (memref<?xindex>) -> ()
218+
call @dumpi(%i51) : (memref<?xindex>) -> ()
219+
call @dumpf(%v5) : (memref<?xf64>) -> ()
220+
221+
// Release the resources.
222+
bufferization.dealloc_tensor %0 : tensor<?x?xf64, #SortedCOO>
223+
bufferization.dealloc_tensor %1 : tensor<?x?xf64, #SortedCOOPermuted>
224+
bufferization.dealloc_tensor %2 : tensor<?x?x?xf64, #SortedCOO3D>
225+
bufferization.dealloc_tensor %3 : tensor<?x?x?xf64, #SortedCOO3DPermuted>
226+
bufferization.dealloc_tensor %4 : tensor<?x?xf64, #SortedCOO>
227+
228+
return
229+
}
230+
}

0 commit comments

Comments
 (0)