Skip to content

Commit 778fd1c

Browse files
committed
fixup! [mlir][linalg] Add a new helper hook - isVectorizable
Add tests to demonstrate the new functionality added to `transform.structured.vectorize_children_and_apply_patterns`.
1 parent bb95fed commit 778fd1c

File tree

2 files changed

+73
-0
lines changed

2 files changed

+73
-0
lines changed

mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2010,3 +2010,68 @@ module attributes {transform.with_named_sequence} {
20102010
// CHECK: %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 2, 3, 0] : vector<1x1x12x197xf32> to vector<1x12x197x1xf32>
20112011
// CHECK: %[[VAL_9:.*]] = vector.transfer_write %[[VAL_8]], %[[VAL_3]]{{\[}}%[[VAL_2]], %[[VAL_2]], %[[VAL_2]], %[[VAL_2]]] {in_bounds = [true, true, true, true]} : vector<1x12x197x1xf32>, tensor<1x12x197x1xf32>
20122012
// CHECK: return %[[VAL_9]] : tensor<1x12x197x1xf32>
2013+
2014+
// -----
2015+
2016+
// Input identical as the test in vectorization.mlir. Output is different -
2017+
// vector sizes are inferred (rather than user-specified) and hence _no_
2018+
// masking was used.
2019+
2020+
func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
2021+
%pack = tensor.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
2022+
return %pack : tensor<4x1x32x16x2xf32>
2023+
}
2024+
2025+
module attributes {transform.with_named_sequence} {
2026+
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
2027+
%0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
2028+
%1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2029+
%2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
2030+
transform.yield
2031+
}
2032+
}
2033+
2034+
// CHECK-LABEL: func.func @test_vectorize_pack(
2035+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x8x16xf32>,
2036+
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
2037+
// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
2038+
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
2039+
// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32>
2040+
// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
2041+
// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32>
2042+
// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<4x1x32x16x2xf32>
2043+
// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<4x1x32x16x2xf32>, tensor<4x1x32x16x2xf32>
2044+
// CHECK: return %[[VAL_8]] : tensor<4x1x32x16x2xf32>
2045+
2046+
// -----
2047+
2048+
// Input identical as the test in vectorization.mlir. Output is different -
2049+
// vector sizes are inferred (rather than user-specified) and hence _no_
2050+
// masking was used.
2051+
2052+
func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
2053+
%pad = arith.constant 0.000000e+00 : f32
2054+
%pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>
2055+
return %pack : tensor<32x4x1x16x2xf32>
2056+
}
2057+
2058+
// CHECK-LABEL: func.func @test_vectorize_padded_pack(
2059+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x7x15xf32>,
2060+
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
2061+
// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32
2062+
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
2063+
// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, false, false]} : tensor<32x7x15xf32>, vector<32x8x16xf32>
2064+
// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32>
2065+
// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [0, 1, 3, 4, 2] : vector<32x4x2x1x16xf32> to vector<32x4x1x16x2xf32>
2066+
// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<32x4x1x16x2xf32>
2067+
// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<32x4x1x16x2xf32>, tensor<32x4x1x16x2xf32>
2068+
// CHECK: return %[[VAL_8]] : tensor<32x4x1x16x2xf32>
2069+
2070+
module attributes {transform.with_named_sequence} {
2071+
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
2072+
%0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op
2073+
%1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op
2074+
%2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op
2075+
transform.yield
2076+
}
2077+
}

mlir/test/Dialect/Linalg/vectorization.mlir

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -666,6 +666,10 @@ module attributes {transform.with_named_sequence} {
666666

667667
// -----
668668

669+
// Input identical as the test in vectorization-with-patterns.mlir. Output is
670+
// different - vector sizes are inferred (rather than user-specified) and hence
671+
// masking was used.
672+
669673
func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> {
670674
%pack = tensor.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32>
671675
return %pack : tensor<4x1x32x16x2xf32>
@@ -692,6 +696,10 @@ module attributes {transform.with_named_sequence} {
692696

693697
// -----
694698

699+
// Input identical as the test in vectorization-with-patterns.mlir. Output is
700+
// different - vector sizes are inferred (rather than user-specified) and hence
701+
// masking was used.
702+
695703
func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> {
696704
%pad = arith.constant 0.000000e+00 : f32
697705
%pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>

0 commit comments

Comments
 (0)