|
| 1 | +// RUN: mlir-opt %s -one-shot-bufferize="use-encoding-for-memory-space" -split-input-file | FileCheck %s |
| 2 | + |
| 3 | +// TODO: move to tensor dialect tests |
| 4 | +func.func @from_elements(%fill: f32, %f: f32, %idx: index) -> tensor<3xf32, 1> { |
| 5 | + %t = tensor.from_elements %fill, %fill, %fill : tensor<3xf32, 1> |
| 6 | + %i = tensor.insert %f into %t[%idx] : tensor<3xf32, 1> |
| 7 | + return %i : tensor<3xf32, 1> |
| 8 | +} |
| 9 | + |
| 10 | +// CHECK-LABEL: @from_elements |
| 11 | +// CHECK-SAME: (%[[arg0:.+]]: f32, %[[arg1:.+]]: f32, %[[arg2:.+]]: index) -> tensor<3xf32, 1 : i64> |
| 12 | +// CHECK: %[[alloc:.+]] = memref.alloc() {{.*}} : memref<3xf32, 1> |
| 13 | +// CHECK-DAG: %[[c0:.+]] = arith.constant 0 : index |
| 14 | +// CHECK-DAG: %[[c1:.+]] = arith.constant 1 : index |
| 15 | +// CHECK-DAG: %[[c2:.+]] = arith.constant 2 : index |
| 16 | +// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c0]]] : memref<3xf32, 1> |
| 17 | +// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c1]]] : memref<3xf32, 1> |
| 18 | +// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c2]]] : memref<3xf32, 1> |
| 19 | +// CHECK: memref.store %[[arg1]], %[[alloc]][%[[arg2]]] : memref<3xf32, 1> |
| 20 | +// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<3xf32, 1> -> tensor<3xf32, 1 : i64> |
| 21 | +// CHECK: return %[[v0]] : tensor<3xf32, 1 : i64> |
| 22 | + |
| 23 | +// ----- |
| 24 | + |
| 25 | +func.func @alloc_tesor_with_space_no_encoding() -> tensor<128xf32> { |
| 26 | + %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32> |
| 27 | + return %0 : tensor<128xf32> |
| 28 | +} |
| 29 | + |
| 30 | +// CHECK-LABEL: @alloc_tesor_with_space_no_encoding |
| 31 | +// CHECK-SAME: () -> tensor<128xf32> { |
| 32 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> |
| 33 | +// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32> |
| 34 | +// CHECK: return %[[v0]] : tensor<128xf32> |
| 35 | + |
| 36 | +// ----- |
| 37 | + |
| 38 | +func.func @alloc_tesor_with_space_and_cast() -> tensor<128xf32, 1> { |
| 39 | + %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32> |
| 40 | + %1 = tensor.cast %0 : tensor<128xf32> to tensor<128xf32, 1> |
| 41 | + return %1 : tensor<128xf32, 1> |
| 42 | +} |
| 43 | + |
| 44 | +// CHECK-LABEL: @alloc_tesor_with_space_and_cast |
| 45 | +// CHECK-SAME: () -> tensor<128xf32, 1 : i64> { |
| 46 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> |
| 47 | +// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32, 1 : i64> |
| 48 | +// CHECK: return %[[v0]] : tensor<128xf32, 1 : i64> |
| 49 | + |
| 50 | +// ----- |
| 51 | + |
| 52 | +func.func @alloc_tesor_with_space_with_encoding() -> tensor<128xf32, 1 : i64> { |
| 53 | + %0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32, 1 : i64> |
| 54 | + return %0 : tensor<128xf32, 1 : i64> |
| 55 | +} |
| 56 | + |
| 57 | +// CHECK-LABEL: @alloc_tesor_with_space_with_encoding |
| 58 | +// CHECK-SAME: () -> tensor<128xf32, 1 : i64> { |
| 59 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> |
| 60 | +// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32, 1 : i64> |
| 61 | +// CHECK: return %[[v0]] : tensor<128xf32, 1 : i64> |
| 62 | + |
| 63 | +// ----- |
| 64 | + |
| 65 | +func.func @alloc_tesor_copy_from_default_space(%arg0: tensor<128xf32>) -> tensor<128xf32> { |
| 66 | + %0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 1 : i64} : tensor<128xf32> |
| 67 | + return %0 : tensor<128xf32> |
| 68 | +} |
| 69 | + |
| 70 | +// CHECK-LABEL: @alloc_tesor_copy_from_default_space |
| 71 | +// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32>) -> tensor<128xf32> { |
| 72 | +// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32> -> memref<128xf32, strided<[?], offset: ?>> |
| 73 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> |
| 74 | +// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>> to memref<128xf32, 1> |
| 75 | +// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> -> tensor<128xf32> |
| 76 | +// CHECK: return %[[v1]] : tensor<128xf32> |
| 77 | + |
| 78 | +// ----- |
| 79 | + |
| 80 | +func.func @alloc_tesor_copy_from_non_default_space(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 2> { |
| 81 | + %0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 2 : i64} : tensor<128xf32, 1> |
| 82 | + %1 = tensor.cast %0 : tensor<128xf32, 1> to tensor<128xf32, 2> |
| 83 | + return %1 : tensor<128xf32, 2> |
| 84 | +} |
| 85 | + |
| 86 | +// CHECK-LABEL: @alloc_tesor_copy_from_non_default_space |
| 87 | +// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { |
| 88 | +// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> -> memref<128xf32, strided<[?], offset: ?>, 1> |
| 89 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> |
| 90 | +// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> |
| 91 | +// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> -> tensor<128xf32, 2 : i64> |
| 92 | +// CHECK: return %[[v1]] : tensor<128xf32, 2 : i64> |
| 93 | + |
| 94 | +// ----- |
| 95 | + |
| 96 | +// TODO: this should be illegal since ultimately we can not eliminate the `bufferization.to_tensor` when we |
| 97 | +// bufferize function boundaries. |
| 98 | +func.func @alloc_tesor_copy_from_non_default_space_no_cast(%arg0: tensor<128xf32, 1>, |
| 99 | + %arg1: tensor<4xf32, 1>) -> tensor<128xf32, 1> { |
| 100 | + %0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 2 : i64} : tensor<128xf32, 1> |
| 101 | + %1 = tensor.insert_slice %arg1 into %arg0 [0][4][1] : tensor<4xf32, 1> into tensor<128xf32, 1> |
| 102 | + return %0 : tensor<128xf32, 1> |
| 103 | +} |
| 104 | + |
| 105 | +// CHECK-LABEL: @alloc_tesor_copy_from_non_default_space_no_cast |
| 106 | +// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: tensor<4xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> { |
| 107 | +// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg1]] : tensor<4xf32, 1 : i64> -> memref<4xf32, strided<[?], offset: ?>, 1> |
| 108 | +// CHECK: %[[v1:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> -> memref<128xf32, strided<[?], offset: ?>, 1> |
| 109 | +// CHECK: %[[v2:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> -> memref<128xf32, strided<[?], offset: ?>, 1> |
| 110 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> |
| 111 | +// CHECK: memref.copy %[[v2]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> |
| 112 | +// CHECK: %[[v3:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> -> tensor<128xf32, 1 : i64> |
| 113 | +// CHECK: %[[alloc_0:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> |
| 114 | +// CHECK: memref.copy %[[v1]], %[[alloc_0]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 1> |
| 115 | +// CHECK: %[[subview:.+]] = memref.subview %[[alloc_0]][0] [4] [1] : memref<128xf32, 1> to memref<4xf32, strided<[1]>, 1> |
| 116 | +// CHECK: memref.copy %[[v0]], %[[subview]] : memref<4xf32, strided<[?], offset: ?>, 1> to memref<4xf32, strided<[1]>, 1> |
| 117 | +// CHECK: return %[[v3]] : tensor<128xf32, 1 : i64> |
| 118 | + |
| 119 | +// ----- |
| 120 | + |
| 121 | +func.func @materialize_in_destination(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 2> { |
| 122 | + %0 = bufferization.alloc_tensor () {memory_space = 2 : i64} : tensor<128xf32, 2> |
| 123 | + %1 = bufferization.materialize_in_destination %arg0 in %0 : (tensor<128xf32, 1>, tensor<128xf32, 2>) -> tensor<128xf32, 2> |
| 124 | + return %1 : tensor<128xf32, 2> |
| 125 | +} |
| 126 | + |
| 127 | +// CHECK-LABEL: @materialize_in_destination |
| 128 | +// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { |
| 129 | +// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> -> memref<128xf32, strided<[?], offset: ?>, 1> |
| 130 | +// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> |
| 131 | +// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> |
| 132 | +// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> -> tensor<128xf32, 2 : i64> |
| 133 | +// CHECK: return %[[v1]] : tensor<128xf32, 2 : i64> |
0 commit comments