Skip to content

Commit 05390df

Browse files
authored
[mlir][sparse] migration to sparse_tensor.print (llvm#83926)
Continuing the efforts started in llvm#83357
1 parent 488ac3d commit 05390df

File tree

2 files changed

+165
-132
lines changed

2 files changed

+165
-132
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -73,7 +73,7 @@ module {
7373
}
7474

7575
// Driver method to call and verify sign kernel.
76-
func.func @entry() {
76+
func.func @main() {
7777
%c0 = arith.constant 0 : index
7878
%df = arith.constant 99.99 : f64
7979
%di = arith.constant 9999 : i32
@@ -116,21 +116,26 @@ module {
116116
//
117117
// Verify the results.
118118
//
119-
// CHECK: 12
120-
// CHECK-NEXT: ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0 )
121-
// CHECK-NEXT: 9
122-
// CHECK-NEXT: ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647 )
119+
// CHECK: ---- Sparse Tensor ----
120+
// CHECK-NEXT: nse = 12
121+
// CHECK-NEXT: dim = ( 32 )
122+
// CHECK-NEXT: lvl = ( 32 )
123+
// CHECK-NEXT: pos[0] : ( 0, 12,
124+
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31,
125+
// CHECK-NEXT: values : ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0,
126+
// CHECK-NEXT: ----
123127
//
124-
%x = sparse_tensor.values %0 : tensor<?xf64, #SparseVector> to memref<?xf64>
125-
%y = sparse_tensor.values %1 : tensor<?xi32, #SparseVector> to memref<?xi32>
126-
%a = vector.transfer_read %x[%c0], %df: memref<?xf64>, vector<12xf64>
127-
%b = vector.transfer_read %y[%c0], %di: memref<?xi32>, vector<9xi32>
128-
%na = sparse_tensor.number_of_entries %0 : tensor<?xf64, #SparseVector>
129-
%nb = sparse_tensor.number_of_entries %1 : tensor<?xi32, #SparseVector>
130-
vector.print %na : index
131-
vector.print %a : vector<12xf64>
132-
vector.print %nb : index
133-
vector.print %b : vector<9xi32>
128+
// CHECK-NEXT: ---- Sparse Tensor ----
129+
// CHECK-NEXT: nse = 9
130+
// CHECK-NEXT: dim = ( 32 )
131+
// CHECK-NEXT: lvl = ( 32 )
132+
// CHECK-NEXT: pos[0] : ( 0, 9,
133+
// CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 21, 31,
134+
// CHECK-NEXT: values : ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647,
135+
// CHECK-NEXT: ----
136+
//
137+
sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
138+
sparse_tensor.print %1 : tensor<?xi32, #SparseVector>
134139

135140
// Release the resources.
136141
bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir

Lines changed: 144 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
1111
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1212
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13-
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
13+
// DEFINE: %{run_opts} = -e main -entry-point-result=void
1414
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
1515
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
1616
//
@@ -365,84 +365,8 @@ module {
365365
return %0 : tensor<4x4xf64, #DCSR>
366366
}
367367

368-
//
369-
// Utility functions to dump the value of a tensor.
370-
//
371-
372-
func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
373-
// Dump the values array to verify only sparse contents are stored.
374-
%c0 = arith.constant 0 : index
375-
%d0 = arith.constant 0.0 : f64
376-
%0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
377-
%1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
378-
vector.print %1 : vector<16xf64>
379-
// Dump the dense vector to verify structure is correct.
380-
%dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
381-
%3 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<32xf64>
382-
vector.print %3 : vector<32xf64>
383-
bufferization.dealloc_tensor %dv : tensor<?xf64>
384-
return
385-
}
386-
387-
func.func @dump_vec_i32(%arg0: tensor<?xi32, #SparseVector>) {
388-
// Dump the values array to verify only sparse contents are stored.
389-
%c0 = arith.constant 0 : index
390-
%d0 = arith.constant 0 : i32
391-
%0 = sparse_tensor.values %arg0 : tensor<?xi32, #SparseVector> to memref<?xi32>
392-
%1 = vector.transfer_read %0[%c0], %d0: memref<?xi32>, vector<24xi32>
393-
vector.print %1 : vector<24xi32>
394-
// Dump the dense vector to verify structure is correct.
395-
%dv = sparse_tensor.convert %arg0 : tensor<?xi32, #SparseVector> to tensor<?xi32>
396-
%3 = vector.transfer_read %dv[%c0], %d0: tensor<?xi32>, vector<32xi32>
397-
vector.print %3 : vector<32xi32>
398-
bufferization.dealloc_tensor %dv : tensor<?xi32>
399-
return
400-
}
401-
402-
func.func @dump_mat(%arg0: tensor<?x?xf64, #DCSR>) {
403-
%d0 = arith.constant 0.0 : f64
404-
%c0 = arith.constant 0 : index
405-
%dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
406-
%1 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<4x8xf64>
407-
vector.print %1 : vector<4x8xf64>
408-
bufferization.dealloc_tensor %dm : tensor<?x?xf64>
409-
return
410-
}
411-
412-
func.func @dump_mat_4x4(%A: tensor<4x4xf64, #DCSR>) {
413-
%c0 = arith.constant 0 : index
414-
%du = arith.constant 0.0 : f64
415-
416-
%c = sparse_tensor.convert %A : tensor<4x4xf64, #DCSR> to tensor<4x4xf64>
417-
%v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xf64>, vector<4x4xf64>
418-
vector.print %v : vector<4x4xf64>
419-
420-
%1 = sparse_tensor.values %A : tensor<4x4xf64, #DCSR> to memref<?xf64>
421-
%2 = vector.transfer_read %1[%c0], %du: memref<?xf64>, vector<16xf64>
422-
vector.print %2 : vector<16xf64>
423-
424-
bufferization.dealloc_tensor %c : tensor<4x4xf64>
425-
return
426-
}
427-
428-
func.func @dump_mat_4x4_i8(%A: tensor<4x4xi8, #DCSR>) {
429-
%c0 = arith.constant 0 : index
430-
%du = arith.constant 0 : i8
431-
432-
%c = sparse_tensor.convert %A : tensor<4x4xi8, #DCSR> to tensor<4x4xi8>
433-
%v = vector.transfer_read %c[%c0, %c0], %du: tensor<4x4xi8>, vector<4x4xi8>
434-
vector.print %v : vector<4x4xi8>
435-
436-
%1 = sparse_tensor.values %A : tensor<4x4xi8, #DCSR> to memref<?xi8>
437-
%2 = vector.transfer_read %1[%c0], %du: memref<?xi8>, vector<16xi8>
438-
vector.print %2 : vector<16xi8>
439-
440-
bufferization.dealloc_tensor %c : tensor<4x4xi8>
441-
return
442-
}
443-
444368
// Driver method to call and verify kernels.
445-
func.func @entry() {
369+
func.func @main() {
446370
%c0 = arith.constant 0 : index
447371

448372
// Setup sparse vectors.
@@ -525,45 +449,149 @@ module {
525449
//
526450
// Verify the results.
527451
//
528-
// CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 )
529-
// CHECK-NEXT: ( 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
530-
// CHECK-NEXT: ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 0, 0, 0, 0, 0 )
531-
// CHECK-NEXT: ( 0, 11, 0, 12, 13, 0, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 15, 0, 16, 0, 0, 17, 0, 0, 0, 0, 0, 0, 18, 19, 0, 20 )
532-
// CHECK-NEXT: ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
533-
// CHECK-NEXT: ( 1, 11, 0, 2, 13, 0, 0, 0, 0, 0, 14, 3, 0, 0, 0, 0, 15, 4, 16, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 8, 0, 9 )
534-
// CHECK-NEXT: ( 0, 6, 3, 28, 0, 6, 56, 72, 9, 0, 0, 0, 0, 0, 0, 0 )
535-
// CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 28, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 56, 72, 0, 9 )
536-
// CHECK-NEXT: ( 1, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
537-
// CHECK-NEXT: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
538-
// CHECK-NEXT: ( 0, 3, 11, 17, 20, 21, 28, 29, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
539-
// CHECK-NEXT: ( 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 17, 0, 0, 20, 21, 0, 0, 0, 0, 0, 0, 28, 29, 0, 31 )
540-
// CHECK-NEXT: ( ( 7, 0, 0, 0, 0, 0, 0, -5 ), ( -4, 0, 0, 0, 0, 0, -3, 0 ), ( 0, -2, 0, 0, 0, 0, 0, 7 ), ( 0, 0, 0, 0, 0, 0, 0, 0 ) )
541-
// CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) )
542-
// CHECK-NEXT: ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, 0, 0, 0, 0, 0, 0 )
543-
// CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( 1, 5, 2, 4 ), ( 5, 4, 0, 0 ) )
544-
// CHECK-NEXT: ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4, 0, 0, 0, 0, 0, 0 )
545-
// CHECK-NEXT: ( ( 2, 0, 4, 1 ), ( 0, 2.5, 0, 0 ), ( -1, -5, 2, 4 ), ( 1, 4, 0, 0 ) )
546-
// CHECK-NEXT: ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4, 0, 0, 0, 0, 0, 0 )
547-
// CHECK-NEXT: ( ( 0, 0, 1, -1 ), ( 0, 1, 0, 0 ), ( -1, -2, -2, 2 ), ( 1, 2, 0, 0 ) )
548-
// CHECK-NEXT: ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2, 0, 0, 0, 0, 0, 0 )
549-
// CHECK-NEXT: ( ( 1, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ), ( 0, 0, 0, 0 ) )
550-
// CHECK-NEXT: ( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
551-
// CHECK-NEXT: ( ( 0, 0, 0, -1 ), ( 0, 0, 0, 0 ), ( -1, -5, -2, 4 ), ( 0, 4, 0, 0 ) )
552-
// CHECK-NEXT: ( -1, -1, -5, -2, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
452+
// CHECK: ---- Sparse Tensor ----
453+
// CHECK-NEXT: nse = 9
454+
// CHECK-NEXT: dim = ( 32 )
455+
// CHECK-NEXT: lvl = ( 32 )
456+
// CHECK-NEXT: pos[0] : ( 0, 9,
457+
// CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
458+
// CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9,
459+
// CHECK-NEXT: ----
460+
//
461+
// CHECK-NEXT: ---- Sparse Tensor ----
462+
// CHECK-NEXT: nse = 10
463+
// CHECK-NEXT: dim = ( 32 )
464+
// CHECK-NEXT: lvl = ( 32 )
465+
// CHECK-NEXT: pos[0] : ( 0, 10,
466+
// CHECK-NEXT: crd[0] : ( 1, 3, 4, 10, 16, 18, 21, 28, 29, 31,
467+
// CHECK-NEXT: values : ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
468+
// CHECK-NEXT: ----
469+
//
470+
// CHECK-NEXT: ---- Sparse Tensor ----
471+
// CHECK-NEXT: nse = 14
472+
// CHECK-NEXT: dim = ( 32 )
473+
// CHECK-NEXT: lvl = ( 32 )
474+
// CHECK-NEXT: pos[0] : ( 0, 14,
475+
// CHECK-NEXT: crd[0] : ( 0, 1, 3, 4, 10, 11, 16, 17, 18, 20, 21, 28, 29, 31,
476+
// CHECK-NEXT: values : ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9,
477+
// CHECK-NEXT: ----
478+
//
479+
// CHECK-NEXT: ---- Sparse Tensor ----
480+
// CHECK-NEXT: nse = 9
481+
// CHECK-NEXT: dim = ( 32 )
482+
// CHECK-NEXT: lvl = ( 32 )
483+
// CHECK-NEXT: pos[0] : ( 0, 9,
484+
// CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
485+
// CHECK-NEXT: values : ( 0, 6, 3, 28, 0, 6, 56, 72, 9,
486+
// CHECK-NEXT: ----
487+
//
488+
// CHECK-NEXT: ---- Sparse Tensor ----
489+
// CHECK-NEXT: nse = 4
490+
// CHECK-NEXT: dim = ( 32 )
491+
// CHECK-NEXT: lvl = ( 32 )
492+
// CHECK-NEXT: pos[0] : ( 0, 4,
493+
// CHECK-NEXT: crd[0] : ( 0, 11, 17, 20,
494+
// CHECK-NEXT: values : ( 1, 3, 4, 5,
495+
// CHECK-NEXT: ----
496+
//
497+
// CHECK-NEXT: ---- Sparse Tensor ----
498+
// CHECK-NEXT: nse = 9
499+
// CHECK-NEXT: dim = ( 32 )
500+
// CHECK-NEXT: lvl = ( 32 )
501+
// CHECK-NEXT: pos[0] : ( 0, 9,
502+
// CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
503+
// CHECK-NEXT: values : ( 0, 3, 11, 17, 20, 21, 28, 29, 31,
504+
// CHECK-NEXT: ----
505+
//
506+
// CHECK-NEXT: ---- Sparse Tensor ----
507+
// CHECK-NEXT: nse = 6
508+
// CHECK-NEXT: dim = ( 4, 8 )
509+
// CHECK-NEXT: lvl = ( 4, 8 )
510+
// CHECK-NEXT: pos[0] : ( 0, 3,
511+
// CHECK-NEXT: crd[0] : ( 0, 1, 2,
512+
// CHECK-NEXT: pos[1] : ( 0, 2, 4, 6,
513+
// CHECK-NEXT: crd[1] : ( 0, 7, 0, 6, 1, 7,
514+
// CHECK-NEXT: values : ( 7, -5, -4, -3, -2, 7,
515+
// CHECK-NEXT: ----
516+
//
517+
// CHECK-NEXT: ---- Sparse Tensor ----
518+
// CHECK-NEXT: nse = 10
519+
// CHECK-NEXT: dim = ( 4, 4 )
520+
// CHECK-NEXT: lvl = ( 4, 4 )
521+
// CHECK-NEXT: pos[0] : ( 0, 4,
522+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
523+
// CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
524+
// CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
525+
// CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4,
526+
// CHECK-NEXT: ----
527+
//
528+
// CHECK-NEXT: ---- Sparse Tensor ----
529+
// CHECK-NEXT: nse = 10
530+
// CHECK-NEXT: dim = ( 4, 4 )
531+
// CHECK-NEXT: lvl = ( 4, 4 )
532+
// CHECK-NEXT: pos[0] : ( 0, 4,
533+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
534+
// CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
535+
// CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
536+
// CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4,
537+
// CHECK-NEXT: ----
538+
//
539+
// CHECK-NEXT: ---- Sparse Tensor ----
540+
// CHECK-NEXT: nse = 10
541+
// CHECK-NEXT: dim = ( 4, 4 )
542+
// CHECK-NEXT: lvl = ( 4, 4 )
543+
// CHECK-NEXT: pos[0] : ( 0, 4,
544+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
545+
// CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
546+
// CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
547+
// CHECK-NEXT: values : ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4,
548+
// CHECK-NEXT: ----
549+
//
550+
// CHECK-NEXT: ---- Sparse Tensor ----
551+
// CHECK-NEXT: nse = 10
552+
// CHECK-NEXT: dim = ( 4, 4 )
553+
// CHECK-NEXT: lvl = ( 4, 4 )
554+
// CHECK-NEXT: pos[0] : ( 0, 4,
555+
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3,
556+
// CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10,
557+
// CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1,
558+
// CHECK-NEXT: values : ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2,
559+
// CHECK-NEXT: ----
560+
//
561+
// CHECK-NEXT: ---- Sparse Tensor ----
562+
// CHECK-NEXT: nse = 4
563+
// CHECK-NEXT: dim = ( 4, 4 )
564+
// CHECK-NEXT: lvl = ( 4, 4 )
565+
// CHECK-NEXT: pos[0] : ( 0, 3,
566+
// CHECK-NEXT: crd[0] : ( 0, 1, 3,
567+
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 4,
568+
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0,
569+
// CHECK-NEXT: values : ( 1, 0, 0, 0,
570+
// CHECK-NEXT: ----
571+
//
572+
// CHECK-NEXT: ---- Sparse Tensor ----
573+
// CHECK-NEXT: nse = 6
574+
// CHECK-NEXT: dim = ( 4, 4 )
575+
// CHECK-NEXT: lvl = ( 4, 4 )
576+
// CHECK-NEXT: pos[0] : ( 0, 3,
577+
// CHECK-NEXT: crd[0] : ( 0, 2, 3,
578+
// CHECK-NEXT: pos[1] : ( 0, 1, 5, 6,
579+
// CHECK-NEXT: crd[1] : ( 3, 0, 1, 2, 3, 1,
580+
// CHECK-NEXT: values : ( -1, -1, -5, -2, 4, 4,
553581
//
554-
call @dump_vec(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
555-
call @dump_vec(%sv2) : (tensor<?xf64, #SparseVector>) -> ()
556-
call @dump_vec_i32(%0) : (tensor<?xi32, #SparseVector>) -> ()
557-
call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> ()
558-
call @dump_vec(%2) : (tensor<?xf64, #SparseVector>) -> ()
559-
call @dump_vec_i32(%3) : (tensor<?xi32, #SparseVector>) -> ()
560-
call @dump_mat(%5) : (tensor<?x?xf64, #DCSR>) -> ()
561-
call @dump_mat_4x4(%6) : (tensor<4x4xf64, #DCSR>) -> ()
562-
call @dump_mat_4x4(%7) : (tensor<4x4xf64, #DCSR>) -> ()
563-
call @dump_mat_4x4(%8) : (tensor<4x4xf64, #DCSR>) -> ()
564-
call @dump_mat_4x4(%9) : (tensor<4x4xf64, #DCSR>) -> ()
565-
call @dump_mat_4x4_i8(%10) : (tensor<4x4xi8, #DCSR>) -> ()
566-
call @dump_mat_4x4(%11) : (tensor<4x4xf64, #DCSR>) -> ()
582+
sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
583+
sparse_tensor.print %sv2 : tensor<?xf64, #SparseVector>
584+
sparse_tensor.print %0 : tensor<?xi32, #SparseVector>
585+
sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
586+
sparse_tensor.print %2 : tensor<?xf64, #SparseVector>
587+
sparse_tensor.print %3 : tensor<?xi32, #SparseVector>
588+
sparse_tensor.print %5 : tensor<?x?xf64, #DCSR>
589+
sparse_tensor.print %6 : tensor<4x4xf64, #DCSR>
590+
sparse_tensor.print %7 : tensor<4x4xf64, #DCSR>
591+
sparse_tensor.print %8 : tensor<4x4xf64, #DCSR>
592+
sparse_tensor.print %9 : tensor<4x4xf64, #DCSR>
593+
sparse_tensor.print %10 : tensor<4x4xi8, #DCSR>
594+
sparse_tensor.print %11 : tensor<4x4xf64, #DCSR>
567595

568596
// Release the resources.
569597
bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>

0 commit comments

Comments
 (0)