Skip to content

Commit 73412a2

Browse files
author
xiaohui1.xu
authored
[Transform][Vector] Support vector methods in the new llvm version (#286)
1 parent e0f6945 commit 73412a2

File tree

2 files changed

+3
-5
lines changed

2 files changed

+3
-5
lines changed

lib/gc/Transforms/LowerToTileVector.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -656,8 +656,7 @@ struct LowerToTileVectorPass
656656
// transpose or braodcast semantic etc.
657657
vector::populateVectorTransferPermutationMapLoweringPatterns(secondPattern);
658658
// Remove unnessary broadcast operation
659-
// TODO: disable this pattern until the following support is ready
660-
// vector::populateSinkVectorBroadcastPatterns(secondPattern);
659+
vector::populateSinkVectorOpsPatterns(secondPattern);
661660
// Second fold (with the help of the `applyPatternsAndFoldGreedily`
662661
// function) can help us to eliminate redundant operation like consecutive
663662
// read and write.

test/mlir/test/gc/Transforms/vectorization.mlir

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,11 @@ func.func @add_tensor_test1(%arg0: tensor<4x8x16xf32>, %arg1: tensor<4x8x16xf32>
3737
// CHECK: %[[C0:.*]] = arith.constant 0 : index
3838
// CHECK: %[[READ0:.*]] = vector.transfer_read %{{.*}} {in_bounds = [true, true, true]} : tensor<4x16x16xf32>, vector<4x16x16xf32>
3939
// CHECK: %[[SHAPECAST0:.*]] = vector.shape_cast %[[READ0]] : vector<4x16x16xf32> to vector<4x4x4x4x4xf32>
40-
// CHECK: %[[TRANSPOSE0:.*]] = vector.transpose %[[SHAPECAST0]], [1, 0, 3, 2, 4] : vector<4x4x4x4x4xf32> to vector<4x4x4x4x4xf32>
4140
// CHECK: %[[READ1:.*]] = vector.transfer_read %{{.*}} {in_bounds = [true, true, true]} : tensor<4x16x16xf32>, vector<4x16x16xf32>
4241
// CHECK: %[[SHAPECAST1:.*]] = vector.shape_cast %[[READ1]] : vector<4x16x16xf32> to vector<4x4x4x4x4xf32>
43-
// CHECK: %[[TRANSPOSE1:.*]] = vector.transpose %[[SHAPECAST1]], [1, 0, 3, 2, 4] : vector<4x4x4x4x4xf32> to vector<4x4x4x4x4xf32>
4442
// CHECK: %[[EMPTY:.*]] = tensor.empty() : tensor<4x4x4x4x4xf32>
45-
// CHECK: %[[ADD0:.*]] = arith.addf %[[TRANSPOSE0]], %[[TRANSPOSE1]] : vector<4x4x4x4x4xf32>
43+
// CHECK: %[[ADD0:.*]] = arith.addf %[[SHAPECAST0]], %[[SHAPECAST1]] : vector<4x4x4x4x4xf32>
44+
// CHECK: %[[TRANSPOSE1:.*]] = vector.transpose %[[ADD0]], [1, 0, 3, 2, 4] : vector<4x4x4x4x4xf32> to vector<4x4x4x4x4xf32>
4645
// CHECK: %[[WRITE0:.*]] = vector.transfer_write %{{.*}} {in_bounds = [true, true, true, true, true]} : vector<4x4x4x4x4xf32>, tensor<4x4x4x4x4xf32>
4746
func.func @add_tensor_pack_test2(%arg0: tensor<4x16x16xf32>, %arg1: tensor<4x16x16xf32>) -> tensor<4x4x4x4x4xf32> {
4847
%0 = tensor.empty() : tensor<4x4x4x4x4xf32>

0 commit comments

Comments
 (0)