Skip to content

Commit 83c9244

Browse files
[mlir][sparse] Migrate more tests to use sparse_tensor.print (#84833)
Continuous efforts following #84249.
1 parent a950c06 commit 83c9244

19 files changed

+777
-707
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_0_permute.mlir

Lines changed: 28 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616

@@ -99,39 +99,15 @@ module {
9999
return
100100
}
101101

102-
func.func @dump_mat_perm_9x4(%A: tensor<9x4xf64, #MAT_C_C_P>) {
103-
%c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C_P> to tensor<9x4xf64>
104-
%cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
105-
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
106-
107-
%n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C_P>
108-
vector.print %n : index
109-
110-
%1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C_P> to memref<?xf64>
111-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
112-
113-
return
114-
}
115-
116102
func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
117103
%u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
118104
call @printMemrefF64(%u) : (tensor<*xf64>) -> ()
119105

120106
return
121107
}
122108

123-
func.func @dump_mat_annotated_dense_9x4(%A: tensor<9x4xf64, #MAT_D_D>) {
124-
%n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_D_D>
125-
vector.print %n : index
126-
127-
%1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_D_D> to memref<?xf64>
128-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
129-
130-
return
131-
}
132-
133109
// Driver method to call and verify kernels.
134-
func.func @entry() {
110+
func.func @main() {
135111
%m42 = arith.constant dense<
136112
[ [ 1.0, 0.0 ],
137113
[ 3.1, 0.0 ],
@@ -163,20 +139,21 @@ module {
163139
%sm34cdp = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D_P>
164140
%sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>
165141

166-
// CHECK: {{\[}}[1, 0, 3, 0],
167-
// CHECK-NEXT: [0, 2, 0, 0],
168-
// CHECK-NEXT: [1, 0, 1, 1],
169-
// CHECK-NEXT: [0, 0.5, 0, 0],
170-
// CHECK-NEXT: [1, 5, 2, 0],
171-
// CHECK-NEXT: [0, 0, 1.5, 1],
172-
// CHECK-NEXT: [0, 3.5, 0, 0],
173-
// CHECK-NEXT: [1, 5, 2, 0],
174-
// CHECK-NEXT: [1, 0.5, 0, 0]]
175-
// CHECK-NEXT: 18
176-
// CHECK: [1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
142+
//
143+
// CHECK: ---- Sparse Tensor ----
144+
// CHECK-NEXT: nse = 18
145+
// CHECK-NEXT: dim = ( 9, 4 )
146+
// CHECK-NEXT: lvl = ( 4, 9 )
147+
// CHECK-NEXT: pos[0] : ( 0, 4
148+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
149+
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
150+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
151+
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
152+
// CHECK-NEXT: ----
153+
//
177154
%4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
178155
: (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C_P>
179-
call @dump_mat_perm_9x4(%4) : (tensor<9x4xf64, #MAT_C_C_P>) -> ()
156+
sparse_tensor.print %4 : tensor<9x4xf64, #MAT_C_C_P>
180157

181158
// CHECK: {{\[}}[1, 0, 3, 0],
182159
// CHECK-NEXT: [0, 2, 0, 0],
@@ -191,20 +168,21 @@ module {
191168
: (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
192169
call @dump_mat_dense_9x4(%5) : (tensor<9x4xf64>) -> ()
193170

194-
// CHECK: {{\[}}[1, 0, 3, 0],
195-
// CHECK-NEXT: [0, 2, 0, 0],
196-
// CHECK-NEXT: [1, 0, 1, 1],
197-
// CHECK-NEXT: [0, 0.5, 0, 0],
198-
// CHECK-NEXT: [1, 5, 2, 0],
199-
// CHECK-NEXT: [0, 0, 1.5, 1],
200-
// CHECK-NEXT: [0, 3.5, 0, 0],
201-
// CHECK-NEXT: [1, 5, 2, 0],
202-
// CHECK-NEXT: [1, 0.5, 0, 0]]
203-
// CHECK-NEXT: 18
204-
// CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
171+
//
172+
// CHECK: ---- Sparse Tensor ----
173+
// CHECK-NEXT: nse = 18
174+
// CHECK-NEXT: dim = ( 9, 4 )
175+
// CHECK-NEXT: lvl = ( 9, 4 )
176+
// CHECK-NEXT: pos[0] : ( 0, 9
177+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
178+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
179+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
180+
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
181+
// CHECK-NEXT: ----
182+
//
205183
%6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)
206184
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
207-
call @dump_mat_9x4(%6) : (tensor<9x4xf64, #MAT_C_C>) -> ()
185+
sparse_tensor.print %6 : tensor<9x4xf64, #MAT_C_C>
208186

209187
// CHECK: {{\[}}[1, 0, 3, 0],
210188
// CHECK-NEXT: [0, 2, 0, 0],

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1.mlir

Lines changed: 28 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616

@@ -82,20 +82,6 @@ module {
8282
return %0 : tensor<4x9xf64>
8383
}
8484

85-
func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
86-
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
87-
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
88-
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
89-
90-
%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
91-
vector.print %n : index
92-
93-
%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
94-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
95-
96-
return
97-
}
98-
9985
func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
10086
%1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
10187
call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
@@ -104,7 +90,7 @@ module {
10490
}
10591

10692
// Driver method to call and verify kernels.
107-
func.func @entry() {
93+
func.func @main() {
10894
%m42 = arith.constant dense<
10995
[ [ 1.0, 0.0 ],
11096
[ 3.1, 0.0 ],
@@ -125,15 +111,21 @@ module {
125111
%sm43cd = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D>
126112
%sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>
127113

128-
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
129-
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
130-
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
131-
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
132-
// CHECK-NEXT: 18
133-
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
114+
//
115+
// CHECK: ---- Sparse Tensor ----
116+
// CHECK-NEXT: nse = 18
117+
// CHECK-NEXT: dim = ( 4, 9 )
118+
// CHECK-NEXT: lvl = ( 4, 9 )
119+
// CHECK-NEXT: pos[0] : ( 0, 4
120+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
121+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
122+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
123+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
124+
// CHECK-NEXT: ----
125+
//
134126
%8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
135127
: (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
136-
call @dump_mat_4x9(%8) : (tensor<4x9xf64, #MAT_C_C>) -> ()
128+
sparse_tensor.print %8 : tensor<4x9xf64, #MAT_C_C>
137129

138130
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
139131
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
@@ -143,15 +135,21 @@ module {
143135
: (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
144136
call @dump_mat_dense_4x9(%9) : (tensor<4x9xf64>) -> ()
145137

146-
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
147-
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
148-
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
149-
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
150-
// CHECK-NEXT: 18
151-
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
138+
//
139+
// CHECK: ---- Sparse Tensor ----
140+
// CHECK-NEXT: nse = 18
141+
// CHECK-NEXT: dim = ( 4, 9 )
142+
// CHECK-NEXT: lvl = ( 4, 9 )
143+
// CHECK-NEXT: pos[0] : ( 0, 4
144+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
145+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
146+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
147+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
148+
// CHECK-NEXT: ----
149+
//
152150
%10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)
153151
: (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
154-
call @dump_mat_4x9(%10) : (tensor<4x9xf64, #MAT_C_C>) -> ()
152+
sparse_tensor.print %10 : tensor<4x9xf64, #MAT_C_C>
155153

156154
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
157155
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],

mlir/test/Integration/Dialect/SparseTensor/CPU/concatenate_dim_1_permute.mlir

Lines changed: 28 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616

@@ -85,34 +85,6 @@ module {
8585
return %0 : tensor<4x9xf64>
8686
}
8787

88-
func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
89-
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
90-
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
91-
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
92-
93-
%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
94-
vector.print %n : index
95-
96-
%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
97-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
98-
99-
return
100-
}
101-
102-
func.func @dump_mat_perm_4x9(%A: tensor<4x9xf64, #MAT_C_C_P>) {
103-
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C_P> to tensor<4x9xf64>
104-
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
105-
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()
106-
107-
%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C_P>
108-
vector.print %n : index
109-
110-
%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C_P> to memref<?xf64>
111-
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()
112-
113-
return
114-
}
115-
11688
func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
11789
%1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
11890
call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
@@ -121,7 +93,7 @@ module {
12193
}
12294

12395
// Driver method to call and verify kernels.
124-
func.func @entry() {
96+
func.func @main() {
12597
%m42 = arith.constant dense<
12698
[ [ 1.0, 0.0 ],
12799
[ 3.1, 0.0 ],
@@ -153,15 +125,21 @@ module {
153125
%sm43cdp = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D_P>
154126
%sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>
155127

156-
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
157-
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
158-
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
159-
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
160-
// CHECK-NEXT: 18
161-
// CHECK: [1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
128+
//
129+
// CHECK: ---- Sparse Tensor ----
130+
// CHECK-NEXT: nse = 18
131+
// CHECK-NEXT: dim = ( 4, 9 )
132+
// CHECK-NEXT: lvl = ( 9, 4 )
133+
// CHECK-NEXT: pos[0] : ( 0, 9
134+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
135+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
136+
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
137+
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
138+
// CHECK-NEXT: ----
139+
//
162140
%12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
163141
: (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C_P>
164-
call @dump_mat_perm_4x9(%12) : (tensor<4x9xf64, #MAT_C_C_P>) -> ()
142+
sparse_tensor.print %12 : tensor<4x9xf64, #MAT_C_C_P>
165143

166144
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
167145
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
@@ -171,15 +149,21 @@ module {
171149
: (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
172150
call @dump_mat_dense_4x9(%13) : (tensor<4x9xf64>) -> ()
173151

174-
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
175-
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
176-
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
177-
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
178-
// CHECK-NEXT: 18
179-
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
152+
//
153+
// CHECK: ---- Sparse Tensor ----
154+
// CHECK-NEXT: nse = 18
155+
// CHECK-NEXT: dim = ( 4, 9 )
156+
// CHECK-NEXT: lvl = ( 4, 9 )
157+
// CHECK-NEXT: pos[0] : ( 0, 4
158+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
159+
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
160+
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
161+
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
162+
// CHECK-NEXT: ----
163+
//
180164
%14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)
181165
: (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
182-
call @dump_mat_4x9(%14) : (tensor<4x9xf64, #MAT_C_C>) -> ()
166+
sparse_tensor.print %14 : tensor<4x9xf64, #MAT_C_C>
183167

184168
// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
185169
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],

0 commit comments

Comments
 (0)