Skip to content

Commit fdf44b3

Browse files
authored
[mlir][sparse] migrate integration tests to sparse_tensor.print (#83357)
This is first step (of many) cleaning up our tests to use the new and exciting sparse_tensor.print operation instead of lengthy extraction + print ops.
1 parent f83f712 commit fdf44b3

File tree

6 files changed

+103
-139
lines changed

6 files changed

+103
-139
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -82,38 +82,39 @@ module {
8282
return %0 : tensor<?x?xf64, #BSR>
8383
}
8484

85-
func.func @entry() {
85+
func.func @main() {
8686
%c0 = arith.constant 0 : index
8787
%f0 = arith.constant 0.0 : f64
8888

8989
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
9090
%A = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #BSR>
9191

92-
// CHECK: ( 0, 2, 3 )
93-
// CHECK-NEXT: ( 0, 2, 1 )
94-
// CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
95-
%pos = sparse_tensor.positions %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
96-
%vecp = vector.transfer_read %pos[%c0], %c0 : memref<?xindex>, vector<3xindex>
97-
vector.print %vecp : vector<3xindex>
98-
%crd = sparse_tensor.coordinates %A {level = 1 : index } : tensor<?x?xf64, #BSR> to memref<?xindex>
99-
%vecc = vector.transfer_read %crd[%c0], %c0 : memref<?xindex>, vector<3xindex>
100-
vector.print %vecc : vector<3xindex>
101-
%val = sparse_tensor.values %A : tensor<?x?xf64, #BSR> to memref<?xf64>
102-
%vecv = vector.transfer_read %val[%c0], %f0 : memref<?xf64>, vector<12xf64>
103-
vector.print %vecv : vector<12xf64>
92+
// CHECK: ---- Sparse Tensor ----
93+
// CHECK-NEXT: nse = 12
94+
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
95+
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
96+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
97+
// CHECK-NEXT: ----
98+
sparse_tensor.print %A : tensor<?x?xf64, #BSR>
10499

105-
// CHECK-NEXT: ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0 )
100+
// CHECK-NEXT: ---- Sparse Tensor ----
101+
// CHECK-NEXT: nse = 12
102+
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
103+
// CHECK-NEXT: crd[1] : ( 0, 2, 1
104+
// CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 0, 5, 6, 7, 8, 0,
105+
// CHECK-NEXT: ----
106106
%t1 = sparse_tensor.reinterpret_map %A : tensor<?x?xf64, #BSR>
107107
to tensor<?x?x2x2xf64, #DSDD>
108-
%vdsdd = sparse_tensor.values %t1 : tensor<?x?x2x2xf64, #DSDD> to memref<?xf64>
109-
%vecdsdd = vector.transfer_read %vdsdd[%c0], %f0 : memref<?xf64>, vector<12xf64>
110-
vector.print %vecdsdd : vector<12xf64>
108+
sparse_tensor.print %t1 : tensor<?x?x2x2xf64, #DSDD>
111109

112-
// CHECK-NEXT: ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0 )
110+
// CHECK-NEXT: ---- Sparse Tensor ----
111+
// CHECK-NEXT: nse = 12
112+
// CHECK-NEXT: pos[1] : ( 0, 2, 3,
113+
// CHECK-NEXT: crd[1] : ( 0, 2, 1,
114+
// CHECK-NEXT: values : ( 3, 6, 0, 9, 12, 0, 0, 15, 18, 21, 24, 0,
115+
// CHECK-NEXT: ----
113116
%As = call @scale(%A) : (tensor<?x?xf64, #BSR>) -> (tensor<?x?xf64, #BSR>)
114-
%vals = sparse_tensor.values %As : tensor<?x?xf64, #BSR> to memref<?xf64>
115-
%vecs = vector.transfer_read %vals[%c0], %f0 : memref<?xf64>, vector<12xf64>
116-
vector.print %vecs : vector<12xf64>
117+
sparse_tensor.print %As : tensor<?x?xf64, #BSR>
117118

118119
// Release the resources.
119120
bufferization.dealloc_tensor %A: tensor<?x?xf64, #BSR>

mlir/test/Integration/Dialect/SparseTensor/CPU/block_majors.mlir

Lines changed: 37 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,15 @@
102102
//
103103
module {
104104

105-
// CHECK: ( 0, 1, 2 )
106-
// CHECK-NEXT: ( 0, 2 )
107-
// CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
105+
106+
//
107+
// CHECK: ---- Sparse Tensor ----
108+
// CHECK-NEXT: nse = 24
109+
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
110+
// CHECK-NEXT: crd[1] : ( 0, 2,
111+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
112+
// CHECK-NEXT: ----
113+
//
108114
func.func @foo1() {
109115
// Build.
110116
%c0 = arith.constant 0 : index
@@ -115,23 +121,20 @@ module {
115121
> : tensor<6x16xf64>
116122
%s1 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_rowmajor>
117123
// Test.
118-
%pos1 = sparse_tensor.positions %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
119-
%vecp1 = vector.transfer_read %pos1[%c0], %c0 : memref<?xindex>, vector<3xindex>
120-
vector.print %vecp1 : vector<3xindex>
121-
%crd1 = sparse_tensor.coordinates %s1 {level = 1 : index } : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xindex>
122-
%vecc1 = vector.transfer_read %crd1[%c0], %c0 : memref<?xindex>, vector<2xindex>
123-
vector.print %vecc1 : vector<2xindex>
124-
%val1 = sparse_tensor.values %s1 : tensor<?x?xf64, #BSR_row_rowmajor> to memref<?xf64>
125-
%vecv1 = vector.transfer_read %val1[%c0], %f0 : memref<?xf64>, vector<24xf64>
126-
vector.print %vecv1 : vector<24xf64>
124+
sparse_tensor.print %s1 : tensor<?x?xf64, #BSR_row_rowmajor>
127125
// Release.
128126
bufferization.dealloc_tensor %s1: tensor<?x?xf64, #BSR_row_rowmajor>
129127
return
130128
}
131129

132-
// CHECK-NEXT: ( 0, 1, 2 )
133-
// CHECK-NEXT: ( 0, 2 )
134-
// CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
130+
//
131+
// CHECK-NEXT: ---- Sparse Tensor ----
132+
// CHECK-NEXT: nse = 24
133+
// CHECK-NEXT: pos[1] : ( 0, 1, 2,
134+
// CHECK-NEXT: crd[1] : ( 0, 2,
135+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
136+
// CHECK-NEXT: ----
137+
//
135138
func.func @foo2() {
136139
// Build.
137140
%c0 = arith.constant 0 : index
@@ -142,23 +145,20 @@ module {
142145
> : tensor<6x16xf64>
143146
%s2 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_row_colmajor>
144147
// Test.
145-
%pos2 = sparse_tensor.positions %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
146-
%vecp2 = vector.transfer_read %pos2[%c0], %c0 : memref<?xindex>, vector<3xindex>
147-
vector.print %vecp2 : vector<3xindex>
148-
%crd2 = sparse_tensor.coordinates %s2 {level = 1 : index } : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xindex>
149-
%vecc2 = vector.transfer_read %crd2[%c0], %c0 : memref<?xindex>, vector<2xindex>
150-
vector.print %vecc2 : vector<2xindex>
151-
%val2 = sparse_tensor.values %s2 : tensor<?x?xf64, #BSR_row_colmajor> to memref<?xf64>
152-
%vecv2 = vector.transfer_read %val2[%c0], %f0 : memref<?xf64>, vector<24xf64>
153-
vector.print %vecv2 : vector<24xf64>
148+
sparse_tensor.print %s2 : tensor<?x?xf64, #BSR_row_colmajor>
154149
// Release.
155150
bufferization.dealloc_tensor %s2: tensor<?x?xf64, #BSR_row_colmajor>
156151
return
157152
}
158153

159-
// CHECK-NEXT: ( 0, 1, 1, 2, 2 )
160-
// CHECK-NEXT: ( 0, 1 )
161-
// CHECK-NEXT: ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7 )
154+
//
155+
// CHECK-NEXT: ---- Sparse Tensor ----
156+
// CHECK-NEXT: nse = 24
157+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
158+
// CHECK-NEXT: crd[1] : ( 0, 1,
159+
// CHECK-NEXT: values : ( 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7,
160+
// CHECK-NEXT: ----
161+
//
162162
func.func @foo3() {
163163
// Build.
164164
%c0 = arith.constant 0 : index
@@ -169,23 +169,20 @@ module {
169169
> : tensor<6x16xf64>
170170
%s3 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_rowmajor>
171171
// Test.
172-
%pos3 = sparse_tensor.positions %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
173-
%vecp3 = vector.transfer_read %pos3[%c0], %c0 : memref<?xindex>, vector<5xindex>
174-
vector.print %vecp3 : vector<5xindex>
175-
%crd3 = sparse_tensor.coordinates %s3 {level = 1 : index } : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xindex>
176-
%vecc3 = vector.transfer_read %crd3[%c0], %c0 : memref<?xindex>, vector<2xindex>
177-
vector.print %vecc3 : vector<2xindex>
178-
%val3 = sparse_tensor.values %s3 : tensor<?x?xf64, #BSR_col_rowmajor> to memref<?xf64>
179-
%vecv3 = vector.transfer_read %val3[%c0], %f0 : memref<?xf64>, vector<24xf64>
180-
vector.print %vecv3 : vector<24xf64>
172+
sparse_tensor.print %s3 : tensor<?x?xf64, #BSR_col_rowmajor>
181173
// Release.
182174
bufferization.dealloc_tensor %s3: tensor<?x?xf64, #BSR_col_rowmajor>
183175
return
184176
}
185177

186-
// CHECK-NEXT: ( 0, 1, 1, 2, 2 )
187-
// CHECK-NEXT: ( 0, 1 )
188-
// CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7 )
178+
//
179+
// CHECK-NEXT: ---- Sparse Tensor ----
180+
// CHECK-NEXT: nse = 24
181+
// CHECK-NEXT: pos[1] : ( 0, 1, 1, 2, 2,
182+
// CHECK-NEXT: crd[1] : ( 0, 1,
183+
// CHECK-NEXT: values : ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 0, 0, 6, 0, 0, 7,
184+
// CHECK-NEXT: ----
185+
//
189186
func.func @foo4() {
190187
// Build.
191188
%c0 = arith.constant 0 : index
@@ -196,15 +193,7 @@ module {
196193
> : tensor<6x16xf64>
197194
%s4 = sparse_tensor.convert %m : tensor<6x16xf64> to tensor<?x?xf64, #BSR_col_colmajor>
198195
// Test.
199-
%pos4 = sparse_tensor.positions %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
200-
%vecp4 = vector.transfer_read %pos4[%c0], %c0 : memref<?xindex>, vector<5xindex>
201-
vector.print %vecp4 : vector<5xindex>
202-
%crd4 = sparse_tensor.coordinates %s4 {level = 1 : index } : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xindex>
203-
%vecc4 = vector.transfer_read %crd4[%c0], %c0 : memref<?xindex>, vector<2xindex>
204-
vector.print %vecc4 : vector<2xindex>
205-
%val4 = sparse_tensor.values %s4 : tensor<?x?xf64, #BSR_col_colmajor> to memref<?xf64>
206-
%vecv4 = vector.transfer_read %val4[%c0], %f0 : memref<?xf64>, vector<24xf64>
207-
vector.print %vecv4 : vector<24xf64>
196+
sparse_tensor.print %s4 : tensor<?x?xf64, #BSR_col_colmajor>
208197
// Release.
209198
bufferization.dealloc_tensor %s4: tensor<?x?xf64, #BSR_col_colmajor>
210199
return

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -83,12 +83,11 @@ module {
8383
}
8484

8585
func.func private @getTensorFilename(index) -> (!Filename)
86-
func.func private @printMemref1dF64(%ptr : memref<?xf64>) attributes { llvm.emit_c_interface }
8786

8887
//
8988
// Main driver that reads matrix from file and calls the kernel.
9089
//
91-
func.func @entry() {
90+
func.func @main() {
9291
%d0 = arith.constant 0.0 : f64
9392
%c0 = arith.constant 0 : index
9493
%c1 = arith.constant 1 : index
@@ -104,14 +103,13 @@ module {
104103

105104
//
106105
// Print the linearized 5x5 result for verification.
107-
// CHECK: 25
108-
// CHECK: [2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10
109106
//
110-
%n = sparse_tensor.number_of_entries %0 : tensor<?x?xf64, #DenseMatrix>
111-
vector.print %n : index
112-
%m = sparse_tensor.values %0
113-
: tensor<?x?xf64, #DenseMatrix> to memref<?xf64>
114-
call @printMemref1dF64(%m) : (memref<?xf64>) -> ()
107+
// CHECK: ---- Sparse Tensor ----
108+
// CHECK-NEXT: nse = 25
109+
// CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10,
110+
// CHECK-NEXT: ----
111+
//
112+
sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
115113

116114
// Release the resources.
117115
bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_bf16.mlir

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -67,20 +67,8 @@ module {
6767
return %0 : tensor<?xbf16, #DenseVector>
6868
}
6969

70-
// Dumps a dense vector of type bf16.
71-
func.func @dump_vec(%arg0: tensor<?xbf16, #DenseVector>) {
72-
// Dump the values array to verify only sparse contents are stored.
73-
%c0 = arith.constant 0 : index
74-
%d0 = arith.constant -1.0 : bf16
75-
%0 = sparse_tensor.values %arg0 : tensor<?xbf16, #DenseVector> to memref<?xbf16>
76-
%1 = vector.transfer_read %0[%c0], %d0: memref<?xbf16>, vector<32xbf16>
77-
%f1 = arith.extf %1: vector<32xbf16> to vector<32xf32>
78-
vector.print %f1 : vector<32xf32>
79-
return
80-
}
81-
8270
// Driver method to call and verify the kernel.
83-
func.func @entry() {
71+
func.func @main() {
8472
%c0 = arith.constant 0 : index
8573

8674
// Setup sparse vectors.
@@ -103,8 +91,12 @@ module {
10391
//
10492
// Verify the result.
10593
//
106-
// CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
107-
call @dump_vec(%0) : (tensor<?xbf16, #DenseVector>) -> ()
94+
// CHECK: ---- Sparse Tensor ----
95+
// CHECK-NEXT: nse = 32
96+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
97+
// CHECK-NEXT: ----
98+
//
99+
sparse_tensor.print %0 : tensor<?xbf16, #DenseVector>
108100

109101
// Release the resources.
110102
bufferization.dealloc_tensor %sv1 : tensor<?xbf16, #SparseVector>

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output_f16.mlir

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -68,20 +68,8 @@ module {
6868
return %0 : tensor<?xf16, #DenseVector>
6969
}
7070

71-
// Dumps a dense vector of type f16.
72-
func.func @dump_vec(%arg0: tensor<?xf16, #DenseVector>) {
73-
// Dump the values array to verify only sparse contents are stored.
74-
%c0 = arith.constant 0 : index
75-
%d0 = arith.constant -1.0 : f16
76-
%0 = sparse_tensor.values %arg0 : tensor<?xf16, #DenseVector> to memref<?xf16>
77-
%1 = vector.transfer_read %0[%c0], %d0: memref<?xf16>, vector<32xf16>
78-
%f1 = arith.extf %1: vector<32xf16> to vector<32xf32>
79-
vector.print %f1 : vector<32xf32>
80-
return
81-
}
82-
8371
// Driver method to call and verify the kernel.
84-
func.func @entry() {
72+
func.func @main() {
8573
%c0 = arith.constant 0 : index
8674

8775
// Setup sparse vectors.
@@ -104,8 +92,12 @@ module {
10492
//
10593
// Verify the result.
10694
//
107-
// CHECK: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
108-
call @dump_vec(%0) : (tensor<?xf16, #DenseVector>) -> ()
95+
// CHECK: ---- Sparse Tensor ----
96+
// CHECK-NEXT: nse = 32
97+
// CHECK-NEXT: values : ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9,
98+
// CHECK-NEXT: ----
99+
//
100+
sparse_tensor.print %0 : tensor<?xf16, #DenseVector>
109101

110102
// Release the resources.
111103
bufferization.dealloc_tensor %sv1 : tensor<?xf16, #SparseVector>

0 commit comments

Comments
 (0)