|
| 1 | +// RUN: mlir-opt %s -convert-vector-to-xegpu -split-input-file | FileCheck %s |
| 2 | + |
| 3 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 4 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 5 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 6 | +func.func @dpas_gemm_f16(%lhs: vector<8x16xf16>, %rhs: vector<16x16xf16>, |
| 7 | + %acc: vector<8x16xf32>) -> vector<8x16xf32> { |
| 8 | + %3 = vector.contract |
| 9 | + {indexing_maps = [#map, #map1, #map2], |
| 10 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 11 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 12 | + : vector<8x16xf16>, vector<16x16xf16> into vector<8x16xf32> |
| 13 | + return %3 : vector<8x16xf32> |
| 14 | +} |
| 15 | + |
| 16 | +// CHECK-LABEL: @dpas_gemm_f16( |
| 17 | +// CHECK-SAME: %[[LHS:.+]]: vector<8x16xf16>, |
| 18 | +// CHECK-SAME: %[[RHS:.+]]: vector<16x16xf16>, |
| 19 | +// CHECK-SAME: %[[ACC:.+]]: vector<8x16xf32> |
| 20 | +// CHECK: %[[DPAS:.+]] = xegpu.dpas |
| 21 | +// CHECK-SAME: %[[LHS]], %[[RHS]], %[[ACC]] |
| 22 | +// CHECK-SAME: {{.*}}-> vector<8x16xf32> |
| 23 | +// CHECK: return %[[DPAS]] |
| 24 | + |
| 25 | +// ----- |
| 26 | + |
| 27 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 28 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 29 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 30 | +func.func @dpas_gemm_i8(%lhs: vector<8x32xi8>, %rhs: vector<32x16xi8>, |
| 31 | + %acc: vector<8x16xi32>) -> vector<8x16xi32> { |
| 32 | + %3 = vector.contract |
| 33 | + {indexing_maps = [#map, #map1, #map2], |
| 34 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 35 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 36 | + : vector<8x32xi8>, vector<32x16xi8> into vector<8x16xi32> |
| 37 | + return %3 : vector<8x16xi32> |
| 38 | +} |
| 39 | + |
| 40 | +// CHECK-LABEL: @dpas_gemm_i8( |
| 41 | +// CHECK-SAME: %[[LHS:.+]]: vector<8x32xi8>, |
| 42 | +// CHECK-SAME: %[[RHS:.+]]: vector<32x16xi8>, |
| 43 | +// CHECK-SAME: %[[ACC:.+]]: vector<8x16xi32> |
| 44 | +// CHECK: %[[DPAS:.+]] = xegpu.dpas |
| 45 | +// CHECK-SAME: %[[LHS]], %[[RHS]], %[[ACC]] |
| 46 | +// CHECK-SAME: {{.*}}-> vector<8x16xi32> |
| 47 | +// CHECK: return %[[DPAS]] |
| 48 | + |
| 49 | +// ----- |
| 50 | + |
| 51 | +// For simplicity, only plain data layouts are currently supported. |
| 52 | +// VNNI packing is applied later as a separate lowering step. |
| 53 | + |
| 54 | +#map = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> |
| 55 | +#map1 = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3)> |
| 56 | +#map2 = affine_map<(d0, d1, d2, d3) -> (d0, d1)> |
| 57 | +func.func @negative_vnni_packed(%lhs: vector<8x8x2xf16>, %rhs: vector<8x16x2xf16>, |
| 58 | + %acc: vector<8x16xf32>) -> vector<8x16xf32> { |
| 59 | + %3 = vector.contract |
| 60 | + {indexing_maps = [#map, #map1, #map2], |
| 61 | + iterator_types = ["parallel", "parallel", "reduction", "reduction"], |
| 62 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 63 | + : vector<8x8x2xf16>, vector<8x16x2xf16> into vector<8x16xf32> |
| 64 | + return %3 : vector<8x16xf32> |
| 65 | +} |
| 66 | + |
| 67 | +// CHECK-LABEL: @negative_vnni_packed( |
| 68 | +// CHECK: vector.contract |
| 69 | + |
| 70 | +// ----- |
| 71 | + |
| 72 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 73 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 74 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 75 | +func.func @negative_combining_kind(%lhs: vector<8x16xf16>, %rhs: vector<16x16xf16>, |
| 76 | + %acc: vector<8x16xf32>) -> vector<8x16xf32> { |
| 77 | + %3 = vector.contract |
| 78 | + {indexing_maps = [#map, #map1, #map2], |
| 79 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 80 | + kind = #vector.kind<mul>} %lhs, %rhs, %acc |
| 81 | + : vector<8x16xf16>, vector<16x16xf16> into vector<8x16xf32> |
| 82 | + return %3 : vector<8x16xf32> |
| 83 | +} |
| 84 | + |
| 85 | +// CHECK-LABEL: @negative_combining_kind( |
| 86 | +// CHECK: vector.contract |
| 87 | + |
| 88 | +// ----- |
| 89 | + |
| 90 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 91 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 92 | +#map2 = affine_map<(d0, d1, d2) -> ()> |
| 93 | +func.func @negative_accumulator_shape(%lhs: vector<8x16xf16>, %rhs: vector<16x16xf16>, |
| 94 | + %acc: vector<f32>) -> vector<f32> { |
| 95 | + %3 = vector.contract |
| 96 | + {indexing_maps = [#map, #map1, #map2], |
| 97 | + iterator_types = ["reduction", "reduction", "reduction"], |
| 98 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 99 | + : vector<8x16xf16>, vector<16x16xf16> into vector<f32> |
| 100 | + return %3 : vector<f32> |
| 101 | +} |
| 102 | + |
| 103 | +// CHECK-LABEL: @negative_accumulator_shape( |
| 104 | +// CHECK: vector.contract |
| 105 | + |
| 106 | +// ----- |
| 107 | + |
| 108 | +#map = affine_map<(d0, d1, d2) -> (d2, d0)> |
| 109 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 110 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 111 | +func.func @negative_gemm_transpose_a(%lhs: vector<16x8xf16>, %rhs: vector<16x16xf16>, |
| 112 | + %acc: vector<8x16xf32>) -> vector<8x16xf32> { |
| 113 | + %3 = vector.contract |
| 114 | + {indexing_maps = [#map, #map1, #map2], |
| 115 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 116 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 117 | + : vector<16x8xf16>, vector<16x16xf16> into vector<8x16xf32> |
| 118 | + return %3 : vector<8x16xf32> |
| 119 | +} |
| 120 | + |
| 121 | +// CHECK-LABEL: @negative_gemm_transpose_a( |
| 122 | +// CHECK: vector.contract |
| 123 | + |
| 124 | +// ----- |
| 125 | + |
| 126 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 127 | +#map1 = affine_map<(d0, d1, d2) -> (d1, d2)> |
| 128 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 129 | +func.func @negative_gemm_transpose_b(%lhs: vector<8x16xf16>, %rhs: vector<16x16xf16>, |
| 130 | + %acc: vector<8x16xf32>) -> vector<8x16xf32> { |
| 131 | + %3 = vector.contract |
| 132 | + {indexing_maps = [#map, #map1, #map2], |
| 133 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 134 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 135 | + : vector<8x16xf16>, vector<16x16xf16> into vector<8x16xf32> |
| 136 | + return %3 : vector<8x16xf32> |
| 137 | +} |
| 138 | + |
| 139 | +// CHECK-LABEL: @negative_gemm_transpose_b( |
| 140 | +// CHECK: vector.contract |
| 141 | + |
| 142 | +// ----- |
| 143 | + |
| 144 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 145 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 146 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 147 | +func.func @negative_n_dim_size(%lhs: vector<8x16xf16>, %rhs: vector<16x32xf16>, |
| 148 | + %acc: vector<8x32xf32>) -> vector<8x32xf32> { |
| 149 | + %3 = vector.contract |
| 150 | + {indexing_maps = [#map, #map1, #map2], |
| 151 | + iterator_types = ["parallel", "parallel", "reduction"], |
| 152 | + kind = #vector.kind<add>} %lhs, %rhs, %acc |
| 153 | + : vector<8x16xf16>, vector<16x32xf16> into vector<8x32xf32> |
| 154 | + return %3 : vector<8x32xf32> |
| 155 | +} |
| 156 | + |
| 157 | +// CHECK-LABEL: @negative_n_dim_size( |
| 158 | +// CHECK: vector.contract |
0 commit comments