Skip to content

Commit 9a844c2

Browse files
author
MaheshRavishankar
committed
Revert "[mlir] Make sure linearizeCollapsedDims doesn't drop input map dims"
This reverts commit bc38673.
1 parent bc38673 commit 9a844c2

File tree

2 files changed

+1
-37
lines changed

2 files changed

+1
-37
lines changed

mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -385,15 +385,7 @@ static AffineMap linearizeCollapsedDims(AffineMap sourceMap,
385385
makeCanonicalStridedLayoutExpr(sizes, dimExprs, context);
386386
resultExprs.push_back(linearizedExpr);
387387
}
388-
// The new affine map cannot drop unused dimension but some new symbols may
389-
// have been added. Create a map with at least as many dimensions/symbols as
390-
// the original affine map.
391-
int64_t maxDim = -1;
392-
int64_t maxSym = -1;
393-
getMaxDimAndSymbol<SmallVector<AffineExpr>>({resultExprs}, maxDim, maxSym);
394-
unsigned numDims = std::max(unsigned(maxDim + 1), sourceMap.getNumDims());
395-
unsigned numSyms = std::max(unsigned(maxSym + 1), sourceMap.getNumSymbols());
396-
return AffineMap::get(numDims, numSyms, resultExprs, context);
388+
return AffineMap::inferFromExprList({resultExprs}).front();
397389
}
398390

399391
// TensorExpandShapeOp is fusable with its consumer (i.e. reshape as a

mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -199,31 +199,3 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
199199
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]]
200200
// CHECK: %[[RESULT:.+]] = linalg.tensor_collapse_shape %[[NOFUSE]]
201201
// CHECK: return %[[RESULT]]
202-
203-
204-
// -----
205-
206-
func @generic_op_permultation_reshape_consumer_fusion_unused_dim(%arg0 : tensor<6x1xf32>) -> tensor<6xi32> {
207-
%0 = linalg.init_tensor [6, 1] : tensor<6x1xi32>
208-
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
209-
affine_map<(d0, d1) -> (d0, d1)>],
210-
iterator_types = ["parallel", "parallel"]}
211-
ins(%arg0 : tensor<6x1xf32>) outs(%0 : tensor<6x1xi32>) {
212-
^bb0(%arg3: f32, %arg4: i32): // no predecessors
213-
%5 = arith.fptosi %arg3 : f32 to i32
214-
linalg.yield %5 : i32
215-
} -> tensor<6x1xi32>
216-
%6 = linalg.tensor_collapse_shape %1 [[0, 1]] : tensor<6x1xi32> into tensor<6xi32>
217-
return %6 : tensor<6xi32>
218-
}
219-
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
220-
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d0)>
221-
// CHECK: func @generic_op_permultation_reshape_consumer_fusion_unused_dim
222-
// CHECK-SAME: %[[ARG0:.+]]: tensor<6x1xf32>
223-
// CHECK: %[[T0:.+]] = linalg.init_tensor [6, 1]
224-
// CHECK: %[[T1:.+]] = linalg.tensor_collapse_shape %[[T0]]
225-
// CHECK-SAME: [0, 1]
226-
// CHECK: linalg.generic
227-
// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
228-
// CHECK-SAME: ins(%[[ARG0]] : tensor<6x1xf32>)
229-
// CHECK-SAME: outs(%[[T1]] : tensor<6xi32>)

0 commit comments

Comments
 (0)