Skip to content

[mlir][sparse] Migrate more tests to use sparse_tensor.print #84833

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}

Expand Down Expand Up @@ -99,39 +99,15 @@ module {
return
}

func.func @dump_mat_perm_9x4(%A: tensor<9x4xf64, #MAT_C_C_P>) {
%c = sparse_tensor.convert %A : tensor<9x4xf64, #MAT_C_C_P> to tensor<9x4xf64>
%cu = tensor.cast %c : tensor<9x4xf64> to tensor<*xf64>
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()

%n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_C_C_P>
vector.print %n : index

%1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_C_C_P> to memref<?xf64>
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()

return
}

func.func @dump_mat_dense_9x4(%A: tensor<9x4xf64>) {
%u = tensor.cast %A : tensor<9x4xf64> to tensor<*xf64>
call @printMemrefF64(%u) : (tensor<*xf64>) -> ()

return
}

func.func @dump_mat_annotated_dense_9x4(%A: tensor<9x4xf64, #MAT_D_D>) {
%n = sparse_tensor.number_of_entries %A : tensor<9x4xf64, #MAT_D_D>
vector.print %n : index

%1 = sparse_tensor.values %A : tensor<9x4xf64, #MAT_D_D> to memref<?xf64>
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()

return
}

// Driver method to call and verify kernels.
func.func @entry() {
func.func @main() {
%m42 = arith.constant dense<
[ [ 1.0, 0.0 ],
[ 3.1, 0.0 ],
Expand Down Expand Up @@ -163,20 +139,21 @@ module {
%sm34cdp = sparse_tensor.convert %m34 : tensor<3x4xf64> to tensor<3x4xf64, #MAT_C_D_P>
%sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>

// CHECK: {{\[}}[1, 0, 3, 0],
// CHECK-NEXT: [0, 2, 0, 0],
// CHECK-NEXT: [1, 0, 1, 1],
// CHECK-NEXT: [0, 0.5, 0, 0],
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 1.5, 1],
// CHECK-NEXT: [0, 3.5, 0, 0],
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 9, 4 )
// CHECK-NEXT: lvl = ( 4, 9 )
// CHECK-NEXT: pos[0] : ( 0, 4
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
// CHECK-NEXT: pos[1] : ( 0, 5, 11, 16, 18
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 1, 3, 4, 6, 7, 8, 0, 2, 4, 5, 7, 2, 5
// CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 2, 0.5, 5, 3.5, 5, 0.5, 3, 1, 2, 1.5, 2, 1, 1
// CHECK-NEXT: ----
//
%4 = call @concat_sparse_sparse_perm(%sm24ccp, %sm34cd, %sm44dc)
: (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C_P>
call @dump_mat_perm_9x4(%4) : (tensor<9x4xf64, #MAT_C_C_P>) -> ()
sparse_tensor.print %4 : tensor<9x4xf64, #MAT_C_C_P>

// CHECK: {{\[}}[1, 0, 3, 0],
// CHECK-NEXT: [0, 2, 0, 0],
Expand All @@ -191,20 +168,21 @@ module {
: (tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64>
call @dump_mat_dense_9x4(%5) : (tensor<9x4xf64>) -> ()

// CHECK: {{\[}}[1, 0, 3, 0],
// CHECK-NEXT: [0, 2, 0, 0],
// CHECK-NEXT: [1, 0, 1, 1],
// CHECK-NEXT: [0, 0.5, 0, 0],
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 1.5, 1],
// CHECK-NEXT: [0, 3.5, 0, 0],
// CHECK-NEXT: [1, 5, 2, 0],
// CHECK-NEXT: [1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 9, 4 )
// CHECK-NEXT: lvl = ( 9, 4 )
// CHECK-NEXT: pos[0] : ( 0, 9
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 13, 16, 18
// CHECK-NEXT: crd[1] : ( 0, 2, 1, 0, 2, 3, 1, 0, 1, 2, 2, 3, 1, 0, 1, 2, 0, 1
// CHECK-NEXT: values : ( 1, 3, 2, 1, 1, 1, 0.5, 1, 5, 2, 1.5, 1, 3.5, 1, 5, 2, 1, 0.5
// CHECK-NEXT: ----
//
%6 = call @concat_mix_sparse_perm(%m24, %sm34cdp, %sm44dc)
: (tensor<2x4xf64>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64, #MAT_C_C>
call @dump_mat_9x4(%6) : (tensor<9x4xf64, #MAT_C_C>) -> ()
sparse_tensor.print %6 : tensor<9x4xf64, #MAT_C_C>

// CHECK: {{\[}}[1, 0, 3, 0],
// CHECK-NEXT: [0, 2, 0, 0],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}

Expand Down Expand Up @@ -82,20 +82,6 @@ module {
return %0 : tensor<4x9xf64>
}

func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()

%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
vector.print %n : index

%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()

return
}

func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
%1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
Expand All @@ -104,7 +90,7 @@ module {
}

// Driver method to call and verify kernels.
func.func @entry() {
func.func @main() {
%m42 = arith.constant dense<
[ [ 1.0, 0.0 ],
[ 3.1, 0.0 ],
Expand All @@ -125,15 +111,21 @@ module {
%sm43cd = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D>
%sm44dc = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 4, 9 )
// CHECK-NEXT: lvl = ( 4, 9 )
// CHECK-NEXT: pos[0] : ( 0, 4
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
// CHECK-NEXT: ----
//
%8 = call @concat_sparse_sparse_dim1(%sm42cc, %sm43cd, %sm44dc)
: (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
call @dump_mat_4x9(%8) : (tensor<4x9xf64, #MAT_C_C>) -> ()
sparse_tensor.print %8 : tensor<4x9xf64, #MAT_C_C>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
Expand All @@ -143,15 +135,21 @@ module {
: (tensor<4x2xf64, #MAT_C_C>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
call @dump_mat_dense_4x9(%9) : (tensor<4x9xf64>) -> ()

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 4, 9 )
// CHECK-NEXT: lvl = ( 4, 9 )
// CHECK-NEXT: pos[0] : ( 0, 4
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
// CHECK-NEXT: ----
//
%10 = call @concat_mix_sparse_dim1(%m42, %sm43cd, %sm44dc)
: (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
call @dump_mat_4x9(%10) : (tensor<4x9xf64, #MAT_C_C>) -> ()
sparse_tensor.print %10 : tensor<4x9xf64, #MAT_C_C>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
// DEFINE: %{run_opts} = -e entry -entry-point-result=void
// DEFINE: %{run_opts} = -e main -entry-point-result=void
// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs}
// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs}

Expand Down Expand Up @@ -85,34 +85,6 @@ module {
return %0 : tensor<4x9xf64>
}

func.func @dump_mat_4x9(%A: tensor<4x9xf64, #MAT_C_C>) {
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C> to tensor<4x9xf64>
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()

%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C>
vector.print %n : index

%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C> to memref<?xf64>
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()

return
}

func.func @dump_mat_perm_4x9(%A: tensor<4x9xf64, #MAT_C_C_P>) {
%c = sparse_tensor.convert %A : tensor<4x9xf64, #MAT_C_C_P> to tensor<4x9xf64>
%cu = tensor.cast %c : tensor<4x9xf64> to tensor<*xf64>
call @printMemrefF64(%cu) : (tensor<*xf64>) -> ()

%n = sparse_tensor.number_of_entries %A : tensor<4x9xf64, #MAT_C_C_P>
vector.print %n : index

%1 = sparse_tensor.values %A : tensor<4x9xf64, #MAT_C_C_P> to memref<?xf64>
call @printMemref1dF64(%1) : (memref<?xf64>) -> ()

return
}

func.func @dump_mat_dense_4x9(%A: tensor<4x9xf64>) {
%1 = tensor.cast %A : tensor<4x9xf64> to tensor<*xf64>
call @printMemrefF64(%1) : (tensor<*xf64>) -> ()
Expand All @@ -121,7 +93,7 @@ module {
}

// Driver method to call and verify kernels.
func.func @entry() {
func.func @main() {
%m42 = arith.constant dense<
[ [ 1.0, 0.0 ],
[ 3.1, 0.0 ],
Expand Down Expand Up @@ -153,15 +125,21 @@ module {
%sm43cdp = sparse_tensor.convert %m43 : tensor<4x3xf64> to tensor<4x3xf64, #MAT_C_D_P>
%sm44dcp = sparse_tensor.convert %m44 : tensor<4x4xf64> to tensor<4x4xf64, #MAT_D_C_P>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 4, 9 )
// CHECK-NEXT: lvl = ( 9, 4 )
// CHECK-NEXT: pos[0] : ( 0, 9
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8
// CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 7, 10, 12, 15, 17, 18
// CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 3, 3, 0, 1, 2, 2, 3, 1, 2, 3, 0, 2, 0
// CHECK-NEXT: values : ( 1, 3.1, 2, 1, 1, 5, 2, 1, 0.5, 1, 1, 1, 3.5, 5, 0.5, 1.5, 2, 1
// CHECK-NEXT: ----
//
%12 = call @concat_sparse_sparse_perm_dim1(%sm42ccp, %sm43cd, %sm44dc)
: (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C_P>
call @dump_mat_perm_4x9(%12) : (tensor<4x9xf64, #MAT_C_C_P>) -> ()
sparse_tensor.print %12 : tensor<4x9xf64, #MAT_C_C_P>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
Expand All @@ -171,15 +149,21 @@ module {
: (tensor<4x2xf64, #MAT_C_C_P>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64>
call @dump_mat_dense_4x9(%13) : (tensor<4x9xf64>) -> ()

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
// CHECK-NEXT: [0, 2, 0, 0, 1, 1, 5, 2, 0],
// CHECK-NEXT: [0, 0, 5, 2, 0, 1, 0.5, 0, 0]]
// CHECK-NEXT: 18
// CHECK: [1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
//
// CHECK: ---- Sparse Tensor ----
// CHECK-NEXT: nse = 18
// CHECK-NEXT: dim = ( 4, 9 )
// CHECK-NEXT: lvl = ( 4, 9 )
// CHECK-NEXT: pos[0] : ( 0, 4
// CHECK-NEXT: crd[0] : ( 0, 1, 2, 3
// CHECK-NEXT: pos[1] : ( 0, 5, 9, 14, 18
// CHECK-NEXT: crd[1] : ( 0, 2, 4, 7, 8, 0, 2, 4, 6, 1, 4, 5, 6, 7, 2, 3, 5, 6
// CHECK-NEXT: values : ( 1, 1, 1, 1.5, 1, 3.1, 1, 0.5, 3.5, 2, 1, 1, 5, 2, 5, 2, 1, 0.5
// CHECK-NEXT: ----
//
%14 = call @concat_mix_sparse_perm_dim1(%m42, %sm43cdp, %sm44dc)
: (tensor<4x2xf64>, tensor<4x3xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C>) -> tensor<4x9xf64, #MAT_C_C>
call @dump_mat_4x9(%14) : (tensor<4x9xf64, #MAT_C_C>) -> ()
sparse_tensor.print %14 : tensor<4x9xf64, #MAT_C_C>

// CHECK: {{\[}}[1, 0, 1, 0, 1, 0, 0, 1.5, 1],
// CHECK-NEXT: [3.1, 0, 1, 0, 0.5, 0, 3.5, 0, 0],
Expand Down
Loading