|
| 1 | +// DEFINE: %{compile} = mlir-opt %s \ |
| 2 | +// DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \ |
| 3 | +// DEFINE: -one-shot-bufferize="bufferize-function-boundaries" -buffer-deallocation-pipeline -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \ |
| 4 | +// DEFINE: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t |
| 5 | +// DEFINE: %{entry_point} = generic_reduce_2d_f32 |
| 6 | +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\ |
| 7 | +// DEFINE: -shared-libs=%mlir_native_utils_lib_dir/libmlir_runner_utils%shlibext,%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext |
| 8 | + |
| 9 | +// RUN: %{compile} |
| 10 | + |
| 11 | +// RUN: %{run} | FileCheck %s --check-prefix=F32 |
| 12 | + |
| 13 | +func.func @generic_reduce_2d_f32() { |
| 14 | + // 2-D Tensor |
| 15 | + %M = arith.constant 16 : index |
| 16 | + %N = arith.constant 1000 : index |
| 17 | + %c0_f32 = arith.constant 0.0 : f32 |
| 18 | + |
| 19 | + // Allocate the input and output tensors |
| 20 | + %A_alloc = bufferization.alloc_tensor(%M, %N) : tensor<?x?xf32> |
| 21 | + %C_alloc = bufferization.alloc_tensor(%M) : tensor<?xf32> |
| 22 | + |
| 23 | + // Initialise the tensors |
| 24 | + %pi = arith.constant 3.1416 : f32 |
| 25 | + %A_in = linalg.fill ins(%pi : f32) outs(%A_alloc : tensor<?x?xf32>) -> tensor<?x?xf32> |
| 26 | + %C_in = linalg.fill ins(%c0_f32 : f32) outs(%C_alloc : tensor<?xf32>) -> tensor<?xf32> |
| 27 | + |
| 28 | + // Reduce |
| 29 | + %C_out = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, |
| 30 | + affine_map<(d0, d1) -> (d0)>], |
| 31 | + iterator_types = ["parallel", "reduction"] } |
| 32 | + ins(%A_in : tensor<?x?xf32>) |
| 33 | + outs(%C_in : tensor<?xf32>) { |
| 34 | + ^bb(%in: f32, %out: f32) : |
| 35 | + %0 = arith.addf %in, %out : f32 |
| 36 | + linalg.yield %0 : f32 |
| 37 | + } -> tensor<?xf32> |
| 38 | + |
| 39 | + // Print and verify the output |
| 40 | + // F32-LABEL: SVE: START OF TEST OUTPUT |
| 41 | + vector.print str "SVE: START OF TEST OUTPUT\n" |
| 42 | + |
| 43 | + // F32-NEXT: Unranked Memref {{.*}} rank = 1 offset = 0 sizes = [16] strides = [1] data = |
| 44 | + // F32-NEXT: [3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6, 3141.6] |
| 45 | + |
| 46 | + %xf = tensor.cast %C_out : tensor<?xf32> to tensor<*xf32> |
| 47 | + call @printMemrefF32(%xf) : (tensor<*xf32>) -> () |
| 48 | + |
| 49 | + // F32-NEXT: SVE: END OF TEST OUTPUT |
| 50 | + vector.print str "SVE: END OF TEST OUTPUT\n" |
| 51 | + |
| 52 | + return |
| 53 | +} |
| 54 | + |
| 55 | +module attributes {transform.with_named_sequence} { |
| 56 | + // A sequence that will tile and vectorise a Reduce Op |
| 57 | + transform.named_sequence @tile_and_vectorize_reduce(%func |
| 58 | + : !transform.op<"func.func"> {transform.readonly}) { |
| 59 | + |
| 60 | + // Step 0: Get a handle to the reduce Op |
| 61 | + %reduce = transform.structured.match ops{["linalg.generic"]} in %func |
| 62 | + : (!transform.op<"func.func">) -> !transform.any_op |
| 63 | + |
| 64 | + // Step 1: Tile |
| 65 | + %tiled_reduce, %loops:2 = transform.structured.tile_using_for %reduce tile_sizes [1, [4]] |
| 66 | + : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op) |
| 67 | + |
| 68 | + // Step 2: Vectorize |
| 69 | + transform.structured.vectorize %tiled_reduce vector_sizes [1, [4]] : !transform.any_op |
| 70 | + |
| 71 | + // Step 3: Lower vector.multi_reduction |
| 72 | + transform.apply_patterns to %func { |
| 73 | + transform.apply_patterns.vector.lower_masked_transfers |
| 74 | + transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction" |
| 75 | + } : !transform.op<"func.func"> |
| 76 | + |
| 77 | + transform.yield |
| 78 | + } |
| 79 | + |
| 80 | + // A sequence that goes over all functions in tis module and applies |
| 81 | + // "tile_and_vectorize_reduce" |
| 82 | + transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) { |
| 83 | + %funcs = transform.structured.match ops{["func.func"]} in %module |
| 84 | + : (!transform.any_op) -> !transform.op<"func.func"> |
| 85 | + |
| 86 | + transform.foreach %funcs : !transform.op<"func.func"> { |
| 87 | + ^bb2(%func : !transform.op<"func.func">): |
| 88 | + transform.include @tile_and_vectorize_reduce failures(propagate) |
| 89 | + (%func) : (!transform.op<"func.func">) -> () |
| 90 | + } |
| 91 | + transform.yield |
| 92 | + } |
| 93 | +} |
| 94 | + |
| 95 | +func.func private @printMemrefF32(%ptr : tensor<*xf32>) |
0 commit comments