-
Notifications
You must be signed in to change notification settings - Fork 14.2k
[mlir][sparse] Migrate more tests to sparse_tensor.print #84249
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-mlir @llvm/pr-subscribers-mlir-sparse Author: Yinying Li (yinying-lisa-li) ChangesContinuous efforts following #83946. Patch is 77.88 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/84249.diff 20 Files Affected:
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
index 6ec13fd623b5cd..4e9090ae201d02 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -65,7 +65,7 @@ module {
// and then calls the sparse scaling kernel with the sparse tensor
// as input argument.
//
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
@@ -88,11 +88,16 @@ module {
// Print the resulting compacted values for verification.
//
- // CHECK: ( 2, 2, 2, 4, 6, 8, 2, 10, 2, 2, 12, 2, 14, 2, 2, 16 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 16
+ // CHECK-NEXT: dim = ( 8, 8 )
+ // CHECK-NEXT: lvl = ( 8, 8 )
+ // CHECK-NEXT: pos[1] : ( 0, 3, 4, 5, 6, 8, 11, 14, 16
+ // CHECK-NEXT: crd[1] : ( 0, 2, 7, 1, 2, 3, 1, 4, 1, 2, 5, 2, 6, 7, 2, 7
+ // CHECK-NEXT: values : ( 2, 2, 2, 4, 6, 8, 2, 10, 2, 2, 12, 2, 14, 2, 2, 16
+ // CHECK-NEXT: ----
//
- %m = sparse_tensor.values %2 : tensor<8x8xf32, #CSR> to memref<?xf32>
- %v = vector.transfer_read %m[%c0], %f0: memref<?xf32>, vector<16xf32>
- vector.print %v : vector<16xf32>
+ sparse_tensor.print %2 : tensor<8x8xf32, #CSR>
// Release the resources.
bufferization.dealloc_tensor %1 : tensor<8x8xf32, #CSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
index 439144fedeeb89..dd8396dc23b036 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scf_nested.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -68,17 +68,7 @@ module @func_sparse.2 {
return %1 : tensor<2x3x4xf64, #SparseMatrix>
}
- func.func @dump(%arg0: tensor<2x3x4xf64, #SparseMatrix>) {
- %d0 = arith.constant 0.0 : f64
- %c0 = arith.constant 0 : index
- %dm = sparse_tensor.convert %arg0 : tensor<2x3x4xf64, #SparseMatrix> to tensor<2x3x4xf64>
- %0 = vector.transfer_read %dm[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64>
- vector.print %0 : vector<2x3x4xf64>
- bufferization.dealloc_tensor %dm : tensor<2x3x4xf64>
- return
- }
-
- func.func public @entry() {
+ func.func public @main() {
%src = arith.constant dense<[
[ [ 1.0, 2.0, 3.0, 4.0 ],
[ 5.0, 6.0, 7.0, 8.0 ],
@@ -96,10 +86,34 @@ module @func_sparse.2 {
%sm_t = call @condition(%t, %sm) : (i1, tensor<2x3x4xf64, #SparseMatrix>) -> tensor<2x3x4xf64, #SparseMatrix>
%sm_f = call @condition(%f, %sm) : (i1, tensor<2x3x4xf64, #SparseMatrix>) -> tensor<2x3x4xf64, #SparseMatrix>
- // CHECK: ( ( ( 0, 1, 2, 3 ), ( 4, 5, 6, 7 ), ( 8, 9, 10, 11 ) ), ( ( 12, 13, 14, 15 ), ( 16, 17, 18, 19 ), ( 20, 21, 22, 23 ) ) )
- // CHECK-NEXT: ( ( ( 2, 3, 4, 5 ), ( 6, 7, 8, 9 ), ( 10, 11, 12, 13 ) ), ( ( 14, 15, 16, 17 ), ( 18, 19, 20, 21 ), ( 22, 23, 24, 25 ) ) )
- call @dump(%sm_t) : (tensor<2x3x4xf64, #SparseMatrix>) -> ()
- call @dump(%sm_f) : (tensor<2x3x4xf64, #SparseMatrix>) -> ()
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: dim = ( 2, 3, 4 )
+ // CHECK-NEXT: lvl = ( 2, 3, 4 )
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 0, 1
+ // CHECK-NEXT: pos[1] : ( 0, 3, 6
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+ // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+ // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 24
+ // CHECK-NEXT: dim = ( 2, 3, 4 )
+ // CHECK-NEXT: lvl = ( 2, 3, 4 )
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 0, 1
+ // CHECK-NEXT: pos[1] : ( 0, 3, 6
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2
+ // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24
+ // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+ // CHECK-NEXT: values : ( 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %sm_t : tensor<2x3x4xf64, #SparseMatrix>
+ sparse_tensor.print %sm_f : tensor<2x3x4xf64, #SparseMatrix>
bufferization.dealloc_tensor %sm : tensor<2x3x4xf64, #SparseMatrix>
bufferization.dealloc_tensor %sm_t : tensor<2x3x4xf64, #SparseMatrix>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
index 533afb6644aeda..68bc17175e3b4b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -97,39 +97,8 @@ module {
return %0 : tensor<?x?xf64, #CSR>
}
- // Dumps a sparse vector of type f64.
- func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant 0.0 : f64
- %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<8xf64>
- vector.print %1 : vector<8xf64>
- // Dump the dense vector to verify structure is correct.
- %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
- %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<16xf64>
- vector.print %2 : vector<16xf64>
- bufferization.dealloc_tensor %dv : tensor<?xf64>
- return
- }
-
- // Dump a sparse matrix.
- func.func @dump_mat(%arg0: tensor<?x?xf64, #CSR>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant 0.0 : f64
- %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #CSR> to memref<?xf64>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
- vector.print %1 : vector<16xf64>
- %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #CSR> to tensor<?x?xf64>
- %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<5x5xf64>
- vector.print %2 : vector<5x5xf64>
- bufferization.dealloc_tensor %dm : tensor<?x?xf64>
- return
- }
-
// Driver method to call and verify vector kernels.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse matrices.
@@ -151,19 +120,43 @@ module {
//
// Verify the results.
//
- // CHECK: ( 1, 2, -4, 0, 5, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 1, 0, 2, 0, -4, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 3, 0, 4, 0 ), ( 0, 0, 0, 5, 6 ), ( 0, 0, 7, 0, 0 ) )
- // CHECK-NEXT: ( 1, 2, 5, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 1, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( 1, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( ( 0, 0, 0, 1, 0 ), ( 0, 0, 0, 0, 2 ), ( 0, 0, 0, 4, 0 ), ( 0, 0, 0, 0, 6 ), ( 0, 0, 0, 0, 0 ) )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 5
+ // CHECK-NEXT: dim = ( 10 )
+ // CHECK-NEXT: lvl = ( 10 )
+ // CHECK-NEXT: pos[0] : ( 0, 5
+ // CHECK-NEXT: crd[0] : ( 1, 3, 5, 7, 9
+ // CHECK-NEXT: values : ( 1, 2, -4, 0, 5
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 7
+ // CHECK-NEXT: dim = ( 5, 5 )
+ // CHECK-NEXT: lvl = ( 5, 5 )
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 4, 6, 7
+ // CHECK-NEXT: crd[1] : ( 3, 4, 1, 3, 3, 4, 2
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 3
+ // CHECK-NEXT: dim = ( 10 )
+ // CHECK-NEXT: lvl = ( 10 )
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 1, 3, 9
+ // CHECK-NEXT: values : ( 1, 2, 5
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 4
+ // CHECK-NEXT: dim = ( 5, 5 )
+ // CHECK-NEXT: lvl = ( 5, 5 )
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2, 3, 4, 4
+ // CHECK-NEXT: crd[1] : ( 3, 4, 3, 4
+ // CHECK-NEXT: values : ( 1, 2, 4, 6
+ // CHECK-NEXT: ----
//
- call @dump_vec(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
- call @dump_mat(%sm1) : (tensor<?x?xf64, #CSR>) -> ()
- call @dump_vec(%1) : (tensor<?xf64, #SparseVector>) -> ()
- call @dump_mat(%2) : (tensor<?x?xf64, #CSR>) -> ()
+ sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
+ sparse_tensor.print %sm1 : tensor<?x?xf64, #CSR>
+ sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
+ sparse_tensor.print %2 : tensor<?x?xf64, #CSR>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
index 6244be0ba7ab64..f4435c81117b2d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_semiring_select.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -60,7 +60,7 @@ module {
}
// Driver method to call and verify vector kernels.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f64
@@ -86,20 +86,24 @@ module {
tensor<5x5xf64, #DCSR>) -> tensor<5x5xf64, #DCSR>
- // CHECK: ( ( 0.1, 1.1, 0, 0, 0 ),
- // CHECK-SAME: ( 0, 1.1, 2.2, 0, 0 ),
- // CHECK-SAME: ( 0, 0, 2.1, 3.3, 0 ),
- // CHECK-SAME: ( 0, 0, 0, 3.1, 4.4 ),
- // CHECK-SAME: ( 0, 0, 0, 0, 4.1 ) )
- %r = sparse_tensor.convert %1 : tensor<5x5xf64, #DCSR> to tensor<5x5xf64>
- %v2 = vector.transfer_read %r[%c0, %c0], %f0 : tensor<5x5xf64>, vector<5x5xf64>
- vector.print %v2 : vector<5x5xf64>
+ //
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: dim = ( 5, 5 )
+ // CHECK-NEXT: lvl = ( 5, 5 )
+ // CHECK-NEXT: pos[0] : ( 0, 5
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8, 9
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1, 2, 2, 3, 3, 4, 4
+ // CHECK-NEXT: values : ( 0.1, 1.1, 1.1, 2.2, 2.1, 3.3, 3.1, 4.4, 4.1
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %1 : tensor<5x5xf64, #DCSR>
// Release the resources.
bufferization.dealloc_tensor %sl: tensor<5x5xf64, #DCSR>
bufferization.dealloc_tensor %sr: tensor<5x5xf64, #DCSR>
bufferization.dealloc_tensor %1: tensor<5x5xf64, #DCSR>
- bufferization.dealloc_tensor %r : tensor<5x5xf64>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
index 08e75dfa2c02ca..c09374918b7d6a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sign.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -79,7 +79,7 @@ module {
}
// Driver method to call and verify sign kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%du = arith.constant 0.0 : f64
@@ -110,11 +110,16 @@ module {
//
// Verify the results.
//
- // CHECK: ( -1, 1, -1, 1, 1, -1, nan, -nan, 1, -1, -0, 0, 0 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 12
+ // CHECK-NEXT: dim = ( 32 )
+ // CHECK-NEXT: lvl = ( 32 )
+ // CHECK-NEXT: pos[0] : ( 0, 12
+ // CHECK-NEXT: crd[0] : ( 0, 3, 5, 11, 13, 17, 18, 20, 21, 28, 29, 31
+ // CHECK-NEXT: values : ( -1, 1, -1, 1, 1, -1, nan, -nan, 1, -1, -0, 0
+ // CHECK-NEXT: ----
//
- %1 = sparse_tensor.values %0 : tensor<?xf64, #SparseVector> to memref<?xf64>
- %2 = vector.transfer_read %1[%c0], %du: memref<?xf64>, vector<13xf64>
- vector.print %2 : vector<13xf64>
+ sparse_tensor.print %0 : tensor<?xf64, #SparseVector>
// Release the resources.
bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index e0111f692601f0..7b3f9a2ce0e012 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -35,19 +35,19 @@
!Filename = !llvm.ptr
#SortedCOO = #sparse_tensor.encoding<{
- map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
+ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton(soa))
}>
#SortedCOOPermuted = #sparse_tensor.encoding<{
- map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton),
+ map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton(soa)),
}>
#SortedCOO3D = #sparse_tensor.encoding<{
- map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique), d2 : singleton)
+ map = (d0, d1, d2) -> (d0 : compressed(nonunique), d1 : singleton(nonunique, soa), d2 : singleton(soa))
}>
#SortedCOO3DPermuted = #sparse_tensor.encoding<{
- map = (d0, d1, d2) -> (d2 : compressed(nonunique), d0 : singleton(nonunique), d1 : singleton)
+ map = (d0, d1, d2) -> (d2 : compressed(nonunique), d0 : singleton(nonunique, soa), d1 : singleton(soa))
}>
@@ -82,29 +82,7 @@ module {
return %0 : tensor<?x?xf64, #SortedCOO>
}
- func.func @dumpi(%arg0: memref<?xindex>) {
- %c0 = arith.constant 0 : index
- %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex>, vector<20xindex>
- vector.print %v : vector<20xindex>
- return
- }
-
- func.func @dumpsi(%arg0: memref<?xindex, strided<[?], offset: ?>>) {
- %c0 = arith.constant 0 : index
- %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex, strided<[?], offset: ?>>, vector<20xindex>
- vector.print %v : vector<20xindex>
- return
- }
-
- func.func @dumpf(%arg0: memref<?xf64>) {
- %c0 = arith.constant 0 : index
- %nan = arith.constant 0x0 : f64
- %v = vector.transfer_read %arg0[%c0], %nan: memref<?xf64>, vector<20xf64>
- vector.print %v : vector<20xf64>
- return
- }
-
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@@ -125,130 +103,88 @@ module {
%4 = sparse_tensor.convert %m : tensor<5x4xf64> to tensor<?x?xf64, #SortedCOO>
//
- // CHECK: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 )
- // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, 0, 0, 0 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 17
+ // CHECK-NEXT: dim = ( 4, 256 )
+ // CHECK-NEXT: lvl = ( 4, 256 )
+ // CHECK-NEXT: pos[0] : ( 0, 17
+ // CHECK-NEXT: crd[0] : ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ // CHECK-NEXT: crd[1] : ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255
+ // CHECK-NEXT: values : ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17
+ // CHECK-NEXT: ----
//
- %p0 = sparse_tensor.positions %0 { level = 0 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex>
- %i00 = sparse_tensor.coordinates %0 { level = 0 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
- %i01 = sparse_tensor.coordinates %0 { level = 1 : index }
- : tensor<?x?xf64, #SortedCOO> to memref<?xindex, strided<[?], offset: ?>>
- %v0 = sparse_tensor.values %0
- : tensor<?x?xf64, #SortedCOO> to memref<?xf64>
- call @dumpi(%p0) : (memref<?xindex>) -> ()
- call @dumpsi(%i00) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
- call @dumpsi(%i01) : (memref<?xindex, strided<[?], offset: ?>>) -> ()
- call @dumpf(%v0) : (memref<?xf64>) -> ()
+ sparse_tensor.print %0 : tensor<?x?xf64, #SortedCOO>
//
- // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255, 0, 0, 0 )
- // CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 )
- // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, 0, 0, 0 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 17
+ // CHECK-NEXT: dim = ( 4, 256 )
+ // CHECK-NEXT: lvl = ( 256, 4 )
+ // CHECK-NEXT: pos[0] : ( 0, 17
+ // CHECK-NEXT: crd[0] : ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255
+ // CHECK-NEXT: crd[1] : ( 0, 3...
[truncated]
|
aartbik
approved these changes
Mar 7, 2024
yinying-lisa-li
added a commit
that referenced
this pull request
Mar 11, 2024
Continuous efforts following #84249.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Continuous efforts following #83946.