Skip to content

Commit f58fb8c

Browse files
authored
[mlir][tosa] Fix lowering of tosa.conv2d (#73240)
The lowering of tosa.conv2d produces an illegal tensor.empty operation where the number of inputs do not match the number of dynamic dimensions in the output type. The fix is to base the generation of tensor.dim operations off the result type of the conv2d operation, rather than the input type. The problem and fix are very similar to this fix #72724 but for convolution.
1 parent 0d87e25 commit f58fb8c

File tree

2 files changed

+25
-2
lines changed

2 files changed

+25
-2
lines changed

mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ static SmallVector<Value> inferDynamicDimsForConv(
179179
for (uint32_t i = 0, s = inputSizeDims.size(); i < s; ++i) {
180180
int64_t inputDim = inputSizeDims[i];
181181
int64_t kernelDim = kernelSizeDims[i];
182-
if (inputTy.isDynamicDim(inputDim)) {
182+
if (resultTy.isDynamicDim(inputDim)) {
183183
auto padTop = padAttr[i * 2];
184184
auto padBottom = padAttr[i * 2 + 1];
185185
auto stride = strideAttr[i];
@@ -196,7 +196,7 @@ static SmallVector<Value> inferDynamicDimsForConv(
196196

197197
// Get the batch/channels dimensions.
198198
for (int i = 0; i < inputRank; i++) {
199-
if (inputTy.isDynamicDim(i) && !dynDims[i])
199+
if (resultTy.isDynamicDim(i) && !dynDims[i])
200200
dynDims[i] = rewriter.create<tensor::DimOp>(loc, input, i);
201201
}
202202

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,29 @@ func.func @conv2d_dyn_w_h(%input: tensor<1x?x?x27xf32>, %weights: tensor<28x3x3x
495495

496496
// -----
497497

498+
// CHECK: [[$MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3)>
499+
// CHECK: [[$MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
500+
501+
func.func @conv2d_dyn_output(%input: tensor<2x6x5x4xf32>, %weights: tensor<4x3x3x4xf32>, %bias: tensor<4xf32>) {
502+
// %[[C0:.+]] = arith.constant 0 : index
503+
// %[[DIM0:.+]] = tensor.dim %input, %[[C0]] : tensor<2x6x5x4xf32>
504+
// %[[INIT_CONV:.+]] = tensor.empty(%[[DIM0]]) : tensor<?x4x3x4xf32>
505+
// %[[ZERO:.+]] = arith.constant 0.000000e+00 : f32
506+
// %[[FILL:.+]] = linalg.fill
507+
// %[[INIT_GENERIC:.+]] = tensor.empty([[DIM0]]) : tensor<?x4x3x4xf32>
508+
509+
// %[[CONV:.+]] = linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<2x6x5x4xf32>, tensor<4x3x3x4xf32>) outs(%[[INIT_CONV]] : tensor<?x4x3x4xf32>) -> tensor<?x4x3x4xf32>
510+
// linalg.generic {indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[CONV]] : tensor<4xf32>, tensor<?x4x3x4xf32>) outs(%[[INIT_GENERIC]] : tensor<?x4x3x4xf32>) {
511+
// %[[ADD:.+]] = arith.addf
512+
// linalg.yield %[[ADD]] : f32
513+
// } -> tensor<?x4x3x4xf32>
514+
515+
%0 = tosa.conv2d %input, %weights, %bias {dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} : (tensor<2x6x5x4xf32 >, tensor<4x3x3x4xf32>, tensor<4xf32>) -> tensor<?x4x3x4xf32>
516+
return
517+
}
518+
519+
// -----
520+
498521
// CHECK-LABEL: @conv2d_padded_f32
499522
func.func @conv2d_padded_f32(%input: tensor<1x47x40x28xf32>, %weights: tensor<28x3x3x28xf32>, %bias: tensor<28xf32>) -> () {
500523
// CHECK: %[[C0:.+]] = arith.constant 0

0 commit comments

Comments
 (0)