-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir][sparse] Migrate to sparse_tensor.print #83946
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-mlir @llvm/pr-subscribers-mlir-sparse Author: Yinying Li (yinying-lisa-li) ChangesPatch is 40.96 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/83946.diff 20 Files Affected:
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index bdad713709afa2..b9d1148301dd16 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -88,7 +88,7 @@ module {
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
- func.func @entry() {
+ func.func @main() {
%i0 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index 30e620b9d610ba..d615cb66c3d08d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -84,7 +84,7 @@ module {
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
- func.func @entry() {
+ func.func @main() {
%f0 = arith.constant 0.0 : f64
%cst0 = arith.constant 0 : index
%cst1 = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
index f7bcd1122d46c1..c30c6b9b5cc2f5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -60,7 +60,7 @@ module {
}
// Driver method to call and verify kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
@@ -84,11 +84,18 @@ module {
//
// Verify results. Only two entries stored in result!
//
- // CHECK: ( 14, 20, 0, 0 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 2
+ // CHECK-NEXT: dim = ( 32, 16 )
+ // CHECK-NEXT: lvl = ( 32, 16 )
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 2, 31
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2
+ // CHECK-NEXT: crd[1] : ( 2, 0
+ // CHECK-NEXT: values : ( 14, 20
+ // CHECK-NEXT: ----
//
- %val = sparse_tensor.values %0 : tensor<32x16xf32, #DCSR> to memref<?xf32>
- %vv = vector.transfer_read %val[%c0], %f0: memref<?xf32>, vector<4xf32>
- vector.print %vv : vector<4xf32>
+ sparse_tensor.print %0 : tensor<32x16xf32, #DCSR>
// Release the resources.
bufferization.dealloc_tensor %sta : tensor<32x16xf32, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
index 17def3f52c003e..d572f4e14bfe2c 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -70,7 +70,7 @@ module {
}
// Driver method to call and verify tensor kernel.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%i0 = arith.constant 0 : i32
@@ -94,23 +94,23 @@ module {
//
// Verify results. Only two entries stored in result. Correct structure.
//
- // CHECK: ( 7, 69, 0, 0 )
- // CHECK-NEXT: ( ( 0, 0, 0 ), ( 0, 7, 0 ), ( 0, 0, 69 ) )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 2
+ // CHECK-NEXT: dim = ( 3, 3 )
+ // CHECK-NEXT: lvl = ( 3, 3 )
+ // CHECK-NEXT: pos[0] : ( 0, 2
+ // CHECK-NEXT: crd[0] : ( 1, 2
+ // CHECK-NEXT: pos[1] : ( 0, 1, 2
+ // CHECK-NEXT: crd[1] : ( 1, 2
+ // CHECK-NEXT: values : ( 7, 69
+ // CHECK-NEXT: ----
//
- %val = sparse_tensor.values %0
- : tensor<?x?xi32, #SparseMatrix> to memref<?xi32>
- %vv = vector.transfer_read %val[%c0], %i0: memref<?xi32>, vector<4xi32>
- vector.print %vv : vector<4xi32>
- %dm = sparse_tensor.convert %0
- : tensor<?x?xi32, #SparseMatrix> to tensor<?x?xi32>
- %vm = vector.transfer_read %dm[%c0, %c0], %i0: tensor<?x?xi32>, vector<3x3xi32>
- vector.print %vm : vector<3x3xi32>
+ sparse_tensor.print %0 : tensor<?x?xi32, #SparseMatrix>
// Release the resources.
bufferization.dealloc_tensor %st1 : tensor<?x?x?xi32, #SparseTensor>
bufferization.dealloc_tensor %st2 : tensor<?x?x?xi32, #SparseTensor>
bufferization.dealloc_tensor %0 : tensor<?x?xi32, #SparseMatrix>
- bufferization.dealloc_tensor %dm : tensor<?x?xi32>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
index e2d8c4fd4628d9..88513c80219a85 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -70,7 +70,7 @@ module {
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
- func.func @entry() {
+ func.func @main() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
@@ -83,11 +83,18 @@ module {
// Print the result for verification.
//
- // CHECK: ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: dim = ( 5, 5 )
+ // CHECK-NEXT: lvl = ( 5, 5 )
+ // CHECK-NEXT: pos[0] : ( 0, 5
+ // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4, 5, 7, 9
+ // CHECK-NEXT: crd[1] : ( 0, 3, 1, 4, 2, 0, 3, 1, 4
+ // CHECK-NEXT: values : ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25
+ // CHECK-NEXT: ----
//
- %m = sparse_tensor.values %0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
- %v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
- vector.print %v : vector<9xf64>
+ sparse_tensor.print %0 : tensor<?x?xf64, #DCSR>
// Release the resources.
bufferization.dealloc_tensor %x : tensor<?x?xf64, #DCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
index 2b9b73a1990e65..a8796d32a1e990 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -54,7 +54,7 @@ module {
//
// Main driver.
//
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f64
%i0 = arith.constant 0 : i32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
index da816c7fbb1172..06e5ee070778c2 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pack_d.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -48,7 +48,7 @@ module {
//
// Main driver.
//
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32
@@ -108,26 +108,39 @@ module {
//
// Verify.
//
- // CHECK: ( ( ( 1, 2 ), ( 3, 4 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 0, 0 ), ( 0, 0 ) ), ( ( 0, 0 ), ( 5, 0 ), ( 6, 7 ) ), ( ( 0, 0 ), ( 8, 0 ), ( 0, 0 ) ) )
- // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
- // CHECK: ( ( ( 1, 2 ), ( 0, 3 ), ( 4, 0 ) ), ( ( 5, 6 ), ( 0, 0 ), ( 0, 7 ) ), ( ( 8, 9 ), ( 10, 11 ), ( 12, 13 ) ), ( ( 14, 0 ), ( 0, 15 ), ( 0, 16 ) ) )
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 8
+ // CHECK-NEXT: dim = ( 4, 3, 2 )
+ // CHECK-NEXT: lvl = ( 4, 3, 2 )
+ // CHECK-NEXT: pos[0] : ( 0, 3
+ // CHECK-NEXT: crd[0] : ( 0, 2, 3
+ // CHECK-NEXT: pos[1] : ( 0, 2, 4, 5
+ // CHECK-NEXT: crd[1] : ( 0, 1, 1, 2, 1
+ // CHECK-NEXT: pos[2] : ( 0, 2, 4, 5, 7, 8
+ // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 0, 1, 0
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 16
+ // CHECK-NEXT: dim = ( 4, 3, 2 )
+ // CHECK-NEXT: lvl = ( 4, 3, 2 )
+ // CHECK-NEXT: pos[2] : ( 0, 2, 3, 4, 6, 6, 7, 9, 11, 13, 14, 15, 16
+ // CHECK-NEXT: crd[2] : ( 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1
+ // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
+ // CHECK-NEXT: ----
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 22
+ // CHECK-NEXT: dim = ( 4, 3, 2 )
+ // CHECK-NEXT: lvl = ( 4, 3, 2 )
+ // CHECK-NEXT: pos[1] : ( 0, 3, 5, 8, 11
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 2, 0, 1, 2, 0, 1, 2
+ // CHECK-NEXT: values : ( 1, 2, 0, 3, 4, 0, 5, 6, 0, 7, 8, 9, 10, 11, 12, 13, 14, 0, 0, 15, 0, 16
+ // CHECK-NEXT: ----
//
+ sparse_tensor.print %s0 : tensor<4x3x2xf32, #CCC>
+ sparse_tensor.print %s1 : tensor<4x3x2xf32, #BatchedCSR>
+ sparse_tensor.print %s2 : tensor<4x3x2xf32, #CSRDense>
- %d0 = sparse_tensor.convert %s0 : tensor<4x3x2xf32, #CCC> to tensor<4x3x2xf32>
- %v0 = vector.transfer_read %d0[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
- vector.print %v0 : vector<4x3x2xf32>
-
- %d1 = sparse_tensor.convert %s1 : tensor<4x3x2xf32, #BatchedCSR> to tensor<4x3x2xf32>
- %v1 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
- vector.print %v1 : vector<4x3x2xf32>
-
- %d2 = sparse_tensor.convert %s2 : tensor<4x3x2xf32, #CSRDense> to tensor<4x3x2xf32>
- %v2 = vector.transfer_read %d1[%c0, %c0, %c0], %f0 : tensor<4x3x2xf32>, vector<4x3x2xf32>
- vector.print %v2 : vector<4x3x2xf32>
-
- bufferization.dealloc_tensor %d0 : tensor<4x3x2xf32>
- bufferization.dealloc_tensor %d1 : tensor<4x3x2xf32>
- bufferization.dealloc_tensor %d2 : tensor<4x3x2xf32>
// FIXME: doing this explicitly crashes runtime
// bufferization.dealloc_tensor %s0 : tensor<4x3x2xf32, #CCC>
// bufferization.dealloc_tensor %s1 : tensor<4x3x2xf32, #BatchedCSR>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
index 3ce089d7a7cf6b..39699fbdb14e59 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_pooling_nhwc.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -47,7 +47,7 @@ func.func @pooling_nhwc_sum(%input: tensor<1x4x4x1xf32>, %filter: tensor<2x2xf32
}
-func.func @entry() {
+func.func @main() {
%c0 = arith.constant 0 : index
%zero = arith.constant 0.00000e+00 : f32
@@ -76,17 +76,26 @@ func.func @entry() {
//
// Sparse pooling should have the same output.
//
-
- // CHECK-NEXT: ( ( ( ( 6 ), ( 6 ), ( 6 ) ), ( ( 6 ), ( 6 ), ( 6 ) ), ( ( 6 ), ( 6 ), ( 6 ) ) ) )
- %s1 = sparse_tensor.convert %CCCC_ret : tensor<1x3x3x1xf32, #CCCC> to tensor<1x3x3x1xf32>
- %v1 = vector.transfer_read %s1[%c0, %c0, %c0, %c0], %zero
- : tensor<1x3x3x1xf32>, vector<1x3x3x1xf32>
- vector.print %v1 : vector<1x3x3x1xf32>
+ // CHECK: ---- Sparse Tensor ----
+ // CHECK-NEXT: nse = 9
+ // CHECK-NEXT: dim = ( 1, 3, 3, 1 )
+ // CHECK-NEXT: lvl = ( 1, 3, 3, 1 )
+ // CHECK-NEXT: pos[0] : ( 0, 1
+ // CHECK-NEXT: crd[0] : ( 0
+ // CHECK-NEXT: pos[1] : ( 0, 3
+ // CHECK-NEXT: crd[1] : ( 0, 1, 2
+ // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9
+ // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2
+ // CHECK-NEXT: pos[3] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
+ // CHECK-NEXT: crd[3] : ( 0, 0, 0, 0, 0, 0, 0, 0, 0
+ // CHECK-NEXT: values : ( 6, 6, 6, 6, 6, 6, 6, 6, 6
+ // CHECK-NEXT: ----
+ //
+ sparse_tensor.print %CCCC_ret : tensor<1x3x3x1xf32, #CCCC>
// Releases resources.
bufferization.dealloc_tensor %in_CCCC : tensor<1x4x4x1xf32, #CCCC>
bufferization.dealloc_tensor %CCCC_ret : tensor<1x3x3x1xf32, #CCCC>
bufferization.dealloc_tensor %dense_ret : tensor<1x3x3x1xf32>
- bufferization.dealloc_tensor %s1 : tensor<1x3x3x1xf32>
return
}
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
index b322d965f2dc7e..873322929232a7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -49,7 +49,7 @@ module {
return %0: tensor<5x6xi32>
}
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
%i0 = arith.constant 0 : i32
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
index 17219cde035d34..a927a5dfb94bc2 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reduce_custom.mlir
@@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
-// DEFINE: %{run_opts} = -e entry -entry-point-result=void
+// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}
//
@@ -114,39 +114,8 @@ module {
return %0 : tensor<?x?xf64, #CSR>
}
- // Dumps a sparse vector of type f64.
- func.func @dump_vec(%arg0: tensor<?xf64, #SparseVector>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant 0.0 : f64
- %0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf64>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<8xf64>
- vector.print %1 : vector<8xf64>
- // Dump the dense vector to verify structure is correct.
- %dv = sparse_tensor.convert %arg0 : tensor<?xf64, #SparseVector> to tensor<?xf64>
- %2 = vector.transfer_read %dv[%c0], %d0: tensor<?xf64>, vector<16xf64>
- vector.print %2 : vector<16xf64>
- bufferization.dealloc_tensor %dv : tensor<?xf64>
- return
- }
-
- // Dump a sparse matrix.
- func.func @dump_mat(%arg0: tensor<?x?xf64, #CSR>) {
- // Dump the values array to verify only sparse contents are stored.
- %c0 = arith.constant 0 : index
- %d0 = arith.constant 0.0 : f64
- %0 = sparse_tensor.values %arg0 : tensor<?x?xf64, #CSR> to memref<?xf64>
- %1 = vector.transfer_read %0[%c0], %d0: memref<?xf64>, vector<16xf64>
- vector.print %1 : vector<16xf64>
- %dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #CSR> to tensor<?x?xf64>
- %2 = vector.transfer_read %dm[%c0, %c0], %d0: tensor<?x?xf64>, vector<5x5xf64>
- vector.print %2 : vector<5x5xf64>
- bufferization.dealloc_tensor %dm : tensor<?x?xf64>
- return
- }
-
// Driver method to call and verify vector kernels.
- func.func @entry() {
+ func.func @main() {
%c0 = arith.constant 0 : index
// Setup sparse matrices.
@@ -171,19 +140,43 @@ module {
//
// Verify the results.
//
- // CHECK: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( ( 1, 2, 0, 0, 0 ), ( 3, 0, 0, 0, 0 ), ( 0, 0, 4, 5, 6 ), ( 7, 0, 8, 9, 0 ), ( 0, 0, 0, 0, 0 ) )
- // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )
- // CHECK-NEXT: ( ( 6, 0, 0, 0, 0 ),...
[truncated]
|
aartbik
approved these changes
Mar 7, 2024
mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir
Outdated
Show resolved
Hide resolved
yinying-lisa-li
added a commit
that referenced
this pull request
Mar 7, 2024
Continuous efforts following #83946.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Continuous efforts following #83506.