Skip to content

Commit 5994201

Browse files
author
MaheshRavishankar
committed
[mlir][Linalg] NFC: Rename FusionOfTensors pass to FusionOfElementwiseOps pass.
This makes it more explicit what the scope of this pass is. The name of this pass predates fusion on tensors using tile + fuse, and hence the confusion. Differential Revision: https://reviews.llvm.org/D106132
1 parent 42e6cfc commit 5994201

File tree

6 files changed

+13
-13
lines changed

6 files changed

+13
-13
lines changed

mlir/include/mlir/Dialect/Linalg/Passes.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ std::unique_ptr<OperationPass<FuncOp>> createConvertElementwiseToLinalgPass();
2020

2121
std::unique_ptr<OperationPass<FuncOp>> createLinalgFoldUnitExtentDimsPass();
2222

23-
std::unique_ptr<Pass> createLinalgFusionOfTensorOpsPass();
23+
std::unique_ptr<Pass> createLinalgElementwiseOpFusionPass();
2424
std::unique_ptr<Pass> createFoldReshapeOpsByLinearizationPass();
2525

2626
std::unique_ptr<OperationPass<FuncOp>>

mlir/include/mlir/Dialect/Linalg/Passes.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ def LinalgFoldUnitExtentDims : FunctionPass<"linalg-fold-unit-extent-dims"> {
5656
];
5757
}
5858

59-
def LinalgFusionOfTensorOps : Pass<"linalg-fusion-for-tensor-ops"> {
60-
let summary = "Fuse operations on RankedTensorType in linalg dialect";
61-
let constructor = "mlir::createLinalgFusionOfTensorOpsPass()";
59+
def LinalgElementwiseOpFusion : Pass<"linalg-fuse-elementwise-ops"> {
60+
let summary = "Fuse elementwise operations on tensors";
61+
let constructor = "mlir::createLinalgElementwiseOpFusionPass()";
6262
let options = [
6363
Option<"allowFoldingUnitDimReshapes", "allow-folding-unit-dim-reshapes",
6464
"bool", /*default=*/"false",

mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ add_mlir_dialect_library(MLIRLinalgTransforms
55
Detensorize.cpp
66
Distribution.cpp
77
DropUnitDims.cpp
8+
ElementwiseOpFusion.cpp
89
ElementwiseToLinalg.cpp
910
Fusion.cpp
10-
FusionOnTensors.cpp
1111
Generalization.cpp
1212
Hoisting.cpp
1313
InlineScalarOperands.cpp

mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp renamed to mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//===- Fusion.cpp - Implementation of linalg Fusion -----------------------===//
1+
//===- ElementwiseOpFusion.cpp - Implementation of linalg Fusion ---------===///
22
//
33
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
44
// See https://llvm.org/LICENSE.txt for license information.
@@ -1281,8 +1281,8 @@ class FuseElementwiseOps : public OpRewritePattern<GenericOp> {
12811281
};
12821282

12831283
/// Pass that fuses generic ops on tensors. Used only for testing.
1284-
struct FusionOfTensorOpsPass
1285-
: public LinalgFusionOfTensorOpsBase<FusionOfTensorOpsPass> {
1284+
struct LinalgElementwiseOpFusionPass
1285+
: public LinalgElementwiseOpFusionBase<LinalgElementwiseOpFusionPass> {
12861286
void runOnOperation() override {
12871287
Operation *op = getOperation();
12881288
RewritePatternSet patterns(op->getContext());
@@ -1410,8 +1410,8 @@ void mlir::linalg::populatePushReshapeOpsPatterns(RewritePatternSet &patterns) {
14101410
patterns.add<PushExpandingReshape>(context);
14111411
}
14121412

1413-
std::unique_ptr<Pass> mlir::createLinalgFusionOfTensorOpsPass() {
1414-
return std::make_unique<FusionOfTensorOpsPass>();
1413+
std::unique_ptr<Pass> mlir::createLinalgElementwiseOpFusionPass() {
1414+
return std::make_unique<LinalgElementwiseOpFusionPass>();
14151415
}
14161416

14171417
std::unique_ptr<Pass> mlir::createFoldReshapeOpsByLinearizationPass() {

mlir/test/Dialect/Linalg/fusion-tensor.mlir renamed to mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops -split-input-file | FileCheck %s
1+
// RUN: mlir-opt %s -linalg-fuse-elementwise-ops -split-input-file | FileCheck %s
22

33
// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
44
#map0 = affine_map<(d0, d1) -> (d0, d1)>

mlir/test/Dialect/Linalg/reshape_fusion.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops="allow-folding-unit-dim-reshapes=false" -split-input-file | FileCheck %s
2-
// RUN: mlir-opt %s -linalg-fusion-for-tensor-ops="allow-folding-unit-dim-reshapes=true" -split-input-file | FileCheck %s --check-prefix=FOLDUNITDIM
1+
// RUN: mlir-opt %s -linalg-fuse-elementwise-ops="allow-folding-unit-dim-reshapes=false" -split-input-file | FileCheck %s
2+
// RUN: mlir-opt %s -linalg-fuse-elementwise-ops="allow-folding-unit-dim-reshapes=true" -split-input-file | FileCheck %s --check-prefix=FOLDUNITDIM
33
#map0 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
44
#map1 = affine_map<(d0, d1, d2) -> (d1, d2, d0)>
55
#map2 = affine_map<(d0, d1, d2) -> ()>

0 commit comments

Comments
 (0)