|
| 1 | +//-------------------------------------------------------------------------------------------------- |
| 2 | +// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. |
| 3 | +// |
| 4 | +// Set-up that's shared across all tests in this directory. In principle, this |
| 5 | +// config could be moved to lit.local.cfg. However, there are downstream users that |
| 6 | +// do not use these LIT config files. Hence why this is kept inline. |
| 7 | +// |
| 8 | +// DEFINE: %{sparsifier_opts} = enable-runtime-library=true |
| 9 | +// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} |
| 10 | +// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" |
| 11 | +// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" |
| 12 | +// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils |
| 13 | +// DEFINE: %{run_opts} = -e main -entry-point-result=void |
| 14 | +// DEFINE: %{run} = mlir-cpu-runner %{run_opts} %{run_libs} |
| 15 | +// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs} |
| 16 | +// |
| 17 | +// DEFINE: %{env} = |
| 18 | +//-------------------------------------------------------------------------------------------------- |
| 19 | + |
| 20 | +// RUN: %{compile} | %{run} | FileCheck %s |
| 21 | +// |
| 22 | +// Do the same run, but now with direct IR generation. |
| 23 | +// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true |
| 24 | +// RUN: %{compile} | %{run} | FileCheck %s |
| 25 | +// |
| 26 | +// Do the same run, but now with direct IR generation and vectorization. |
| 27 | +// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true |
| 28 | +// RUN: %{compile} | %{run} | FileCheck %s |
| 29 | +// |
| 30 | +// Do the same run, but now with direct IR generation and VLA vectorization. |
| 31 | +// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} |
| 32 | + |
| 33 | +#Sparse1 = #sparse_tensor.encoding<{ |
| 34 | + map = (i, j, k) -> ( |
| 35 | + i : compressed, |
| 36 | + j : compressed, |
| 37 | + k : compressed |
| 38 | + ) |
| 39 | +}> |
| 40 | + |
| 41 | +#Sparse2 = #sparse_tensor.encoding<{ |
| 42 | + map = (i, j, k) -> ( |
| 43 | + i floordiv 2 : compressed, |
| 44 | + j floordiv 2 : compressed, |
| 45 | + k floordiv 2 : compressed, |
| 46 | + i mod 2 : dense, |
| 47 | + j mod 2 : dense, |
| 48 | + k mod 2 : dense) |
| 49 | +}> |
| 50 | + |
| 51 | +module { |
| 52 | + |
| 53 | + // |
| 54 | + // Main driver that tests sparse tensor storage. |
| 55 | + // |
| 56 | + func.func @main() { |
| 57 | + %c0 = arith.constant 0 : index |
| 58 | + %i0 = arith.constant 0 : i32 |
| 59 | + |
| 60 | + // Setup input dense tensor and convert to two sparse tensors. |
| 61 | + %d = arith.constant dense <[ |
| 62 | + [ // i=0 |
| 63 | + [ 1, 0, 0, 0 ], |
| 64 | + [ 0, 0, 0, 0 ], |
| 65 | + [ 0, 0, 0, 0 ], |
| 66 | + [ 0, 0, 5, 0 ] ], |
| 67 | + [ // i=1 |
| 68 | + [ 2, 0, 0, 0 ], |
| 69 | + [ 0, 0, 0, 0 ], |
| 70 | + [ 0, 0, 0, 0 ], |
| 71 | + [ 0, 0, 6, 0 ] ], |
| 72 | + [ //i=2 |
| 73 | + [ 3, 0, 0, 0 ], |
| 74 | + [ 0, 0, 0, 0 ], |
| 75 | + [ 0, 0, 0, 0 ], |
| 76 | + [ 0, 0, 7, 0 ] ], |
| 77 | + //i=3 |
| 78 | + [ [ 4, 0, 0, 0 ], |
| 79 | + [ 0, 0, 0, 0 ], |
| 80 | + [ 0, 0, 0, 0 ], |
| 81 | + [ 0, 0, 8, 0 ] ] |
| 82 | + ]> : tensor<4x4x4xi32> |
| 83 | + |
| 84 | + %a = sparse_tensor.convert %d : tensor<4x4x4xi32> to tensor<4x4x4xi32, #Sparse1> |
| 85 | + %b = sparse_tensor.convert %d : tensor<4x4x4xi32> to tensor<4x4x4xi32, #Sparse2> |
| 86 | + |
| 87 | + // |
| 88 | + // If we store the two "fibers" [1,2,3,4] starting at index (0,0,0) and |
| 89 | + // ending at index (3,0,0) and [5,6,7,8] starting at index (0,3,2) and |
| 90 | + // ending at index (3,3,2)) with a “DCSR-flavored” along (j,k) with |
| 91 | + // dense “fibers” in the i-dim, we end up with 8 stored entries. |
| 92 | + // |
| 93 | + // CHECK: 8 |
| 94 | + // CHECK-NEXT: ( 1, 5, 2, 6, 3, 7, 4, 8 ) |
| 95 | + // |
| 96 | + %na = sparse_tensor.number_of_entries %a : tensor<4x4x4xi32, #Sparse1> |
| 97 | + vector.print %na : index |
| 98 | + %ma = sparse_tensor.values %a: tensor<4x4x4xi32, #Sparse1> to memref<?xi32> |
| 99 | + %va = vector.transfer_read %ma[%c0], %i0: memref<?xi32>, vector<8xi32> |
| 100 | + vector.print %va : vector<8xi32> |
| 101 | + |
| 102 | + // |
| 103 | + // If we store full 2x2x2 3-D blocks in the original index order |
| 104 | + // in a compressed fashion, we end up with 4 blocks to incorporate |
| 105 | + // all the nonzeros, and thus 32 stored entries. |
| 106 | + // |
| 107 | + // CHECK: 32 |
| 108 | + // CHECK-NEXT: ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 6, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8, 0 ) |
| 109 | + // |
| 110 | + %nb = sparse_tensor.number_of_entries %b : tensor<4x4x4xi32, #Sparse2> |
| 111 | + vector.print %nb : index |
| 112 | + %mb = sparse_tensor.values %b: tensor<4x4x4xi32, #Sparse2> to memref<?xi32> |
| 113 | + %vb = vector.transfer_read %mb[%c0], %i0: memref<?xi32>, vector<32xi32> |
| 114 | + vector.print %vb : vector<32xi32> |
| 115 | + |
| 116 | + // Release the resources. |
| 117 | + bufferization.dealloc_tensor %a : tensor<4x4x4xi32, #Sparse1> |
| 118 | + bufferization.dealloc_tensor %b : tensor<4x4x4xi32, #Sparse2> |
| 119 | + |
| 120 | + return |
| 121 | + } |
| 122 | +} |
0 commit comments