Skip to content

Commit c235589

Browse files
authored
[mlir][tosa] Add ERROR_IF checks to TRANSPOSE_CONV2D verifier (llvm#133234)
This patch extends the verifier with following checks: ERROR_IF(out_pad_top <= -KH || out_pad_bottom <= -KH); ERROR_IF(out_pad_left <= -KW || out_pad_right <= -KW); ERROR_IF(stride_y < 1 || stride_x < 1); ERROR_IF(OH != (IH - 1) * stride_y + out_pad_top + out_pad_bottom + KH); ERROR_IF(OW != (IW - 1) * stride_x + out_pad_left + out_pad_right + KW); ERROR_IF(BC != OC && BC != 1); Signed-off-by: Elen Kalda <[email protected]>
1 parent ebacd46 commit c235589

File tree

5 files changed

+213
-29
lines changed

5 files changed

+213
-29
lines changed

mlir/lib/Dialect/Tosa/IR/TosaOps.cpp

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2886,6 +2886,118 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
28862886
LogicalResult TransposeConv2DOp::verify() {
28872887
if (verifyConvOp(*this).failed() || verifyConvOpModes(*this).failed())
28882888
return failure();
2889+
2890+
const llvm::ArrayRef<int64_t> strides = getStride();
2891+
const int64_t strideY = strides[0];
2892+
const int64_t strideX = strides[1];
2893+
2894+
if (strideY < 1 || strideX < 1)
2895+
return emitOpError("expect all stride values to be >= 1, got [")
2896+
<< strides << "]";
2897+
2898+
const auto inputType = llvm::dyn_cast<RankedTensorType>(getInput().getType());
2899+
2900+
const auto outputType =
2901+
llvm::dyn_cast<RankedTensorType>(getOutput().getType());
2902+
2903+
const auto weightType =
2904+
llvm::dyn_cast<RankedTensorType>(getWeight().getType());
2905+
2906+
const auto checkPadAgainstKernelDim =
2907+
[this](int64_t pad_value, int64_t kernel_dim_size,
2908+
llvm::StringRef pad_name,
2909+
llvm::StringRef kernel_dim_name) -> LogicalResult {
2910+
if (pad_value <= -kernel_dim_size)
2911+
return emitOpError("expected ")
2912+
<< pad_name << " > -" << kernel_dim_name
2913+
<< ", but got: " << pad_name << "=" << pad_value << " and "
2914+
<< kernel_dim_name << "=" << kernel_dim_size;
2915+
return success();
2916+
};
2917+
2918+
const llvm::ArrayRef<int64_t> padding = getOutPad();
2919+
2920+
const int64_t outPadTop = padding[0];
2921+
const int64_t outPadBottom = padding[1];
2922+
2923+
const int64_t kernelHeight = weightType.getDimSize(1);
2924+
2925+
if (!ShapedType::isDynamic(kernelHeight)) {
2926+
if (failed(checkPadAgainstKernelDim(outPadTop, kernelHeight, "out_pad_top",
2927+
"KH")))
2928+
return failure();
2929+
2930+
if (failed(checkPadAgainstKernelDim(outPadBottom, kernelHeight,
2931+
"out_pad_bottom", "KH")))
2932+
return failure();
2933+
}
2934+
2935+
const int64_t kernelWidth = weightType.getDimSize(2);
2936+
2937+
const int64_t outPadLeft = padding[2];
2938+
const int64_t outPadRight = padding[3];
2939+
2940+
if (!ShapedType::isDynamic(kernelWidth)) {
2941+
if (failed(checkPadAgainstKernelDim(outPadLeft, kernelWidth, "out_pad_left",
2942+
"KW")))
2943+
return failure();
2944+
2945+
if (failed(checkPadAgainstKernelDim(outPadRight, kernelWidth,
2946+
"out_pad_right", "KW")))
2947+
return failure();
2948+
}
2949+
2950+
// Rest of the checks depend on the output type being a RankedTensorType
2951+
if (!outputType)
2952+
return success();
2953+
2954+
const int64_t inputHeight = inputType.getDimSize(1);
2955+
const int64_t outputHeight = outputType.getDimSize(1);
2956+
2957+
if (!ShapedType::isDynamic(inputHeight) &&
2958+
!ShapedType::isDynamic(outputHeight)) {
2959+
if (outputHeight !=
2960+
(inputHeight - 1) * strideY + outPadTop + outPadBottom + kernelHeight)
2961+
return emitOpError(
2962+
"dimension mismatch: expected OH == (IH - 1) * stride_y "
2963+
"+ out_pad_top + out_pad_bottom + KH, but got ")
2964+
<< outputHeight << " != (" << inputHeight << " - 1) * " << strideY
2965+
<< " + " << outPadTop << " + " << outPadBottom << " + "
2966+
<< kernelHeight;
2967+
}
2968+
2969+
const int64_t inputWidth = inputType.getDimSize(2);
2970+
const int64_t outputWidth = outputType.getDimSize(2);
2971+
2972+
if (!ShapedType::isDynamic(inputWidth) &&
2973+
!ShapedType::isDynamic(outputWidth)) {
2974+
if (outputWidth !=
2975+
(inputWidth - 1) * strideX + outPadLeft + outPadRight + kernelWidth)
2976+
return emitOpError(
2977+
"dimension mismatch: expected OW == (IW - 1) * stride_x "
2978+
"+ out_pad_left + out_pad_right + KW, but got ")
2979+
<< outputWidth << " != (" << inputWidth << " - 1) * " << strideX
2980+
<< " + " << outPadLeft << " + " << outPadRight << " + "
2981+
<< kernelWidth;
2982+
}
2983+
2984+
const auto biasType = llvm::dyn_cast<RankedTensorType>(getBias().getType());
2985+
2986+
if (!biasType)
2987+
return success();
2988+
2989+
const int64_t biasChannels = biasType.getDimSize(0);
2990+
2991+
// Skip further checks if bias is dynamic
2992+
if (biasChannels == ShapedType::kDynamic)
2993+
return success();
2994+
2995+
const int64_t outputChannels = outputType.getDimSize(3);
2996+
if (biasChannels != outputChannels && biasChannels != 1)
2997+
return emitOpError(
2998+
"bias channels expected to be equal to output channels (")
2999+
<< outputChannels << ") or 1, got " << biasChannels;
3000+
28893001
return success();
28903002
}
28913003

mlir/test/Dialect/Tosa/invalid.mlir

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,78 @@ func.func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xi8>, %arg1: tensor<16x1
172172
return %0 : tensor<1x32x32x16xi8>
173173
}
174174

175+
// -----
176+
177+
func.func @test_transpose_conv2d_invalid_padding_top(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
178+
// expected-error@+1 {{'tosa.transpose_conv2d' op expected out_pad_top > -KH, but got: out_pad_top=-3 and KH=1}}
179+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: -3, 0, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
180+
return %0 : tensor<1x32x32x16xf32>
181+
}
182+
183+
// -----
184+
185+
func.func @test_transpose_conv2d_invalid_padding_bottom(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
186+
// expected-error@+1 {{'tosa.transpose_conv2d' op expected out_pad_bottom > -KH, but got: out_pad_bottom=-1 and KH=1}}
187+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, -1, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
188+
return %0 : tensor<1x32x32x16xf32>
189+
}
190+
191+
// -----
192+
193+
func.func @test_transpose_conv2d_invalid_padding_left(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
194+
// expected-error@+1 {{'tosa.transpose_conv2d' op expected out_pad_left > -KW, but got: out_pad_left=-8 and KW=1}}
195+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, -8, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
196+
return %0 : tensor<1x32x32x16xf32>
197+
}
198+
199+
// -----
200+
201+
func.func @test_transpose_conv2d_invalid_padding_right(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
202+
// expected-error@+1 {{'tosa.transpose_conv2d' op expected out_pad_right > -KW, but got: out_pad_right=-9 and KW=1}}
203+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, -9>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
204+
return %0 : tensor<1x32x32x16xf32>
205+
}
206+
207+
// -----
208+
209+
func.func @test_transpose_conv2d_invalid_stride_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
210+
// expected-error@+1 {{'tosa.transpose_conv2d' op expect all stride values to be >= 1, got [0, 1]}}
211+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 0, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
212+
return %0 : tensor<1x32x32x16xf32>
213+
}
214+
215+
// -----
216+
217+
func.func @test_transpose_conv2d_invalid_stride_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
218+
// expected-error@+1 {{'tosa.transpose_conv2d' op expect all stride values to be >= 1, got [1, 0]}}
219+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 0>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
220+
return %0 : tensor<1x32x32x16xf32>
221+
}
222+
223+
// -----
224+
225+
func.func @test_transpose_conv2d_invalid_output_height(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x33x32x16xf32> {
226+
// expected-error@+1 {{'tosa.transpose_conv2d' op dimension mismatch: expected OH == (IH - 1) * stride_y + out_pad_top + out_pad_bottom + KH, but got 33 != (32 - 1) * 1 + 0 + 0 + 1}}
227+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 33, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x33x32x16xf32>
228+
return %0 : tensor<1x33x32x16xf32>
229+
}
230+
231+
// -----
232+
233+
func.func @test_transpose_conv2d_invalid_output_width(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x40x16xf32> {
234+
// expected-error@+1 {{'tosa.transpose_conv2d' op dimension mismatch: expected OW == (IW - 1) * stride_x + out_pad_left + out_pad_right + KW, but got 40 != (32 - 1) * 1 + 0 + 0 + 1}}
235+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 32, 40, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x40x16xf32>
236+
return %0 : tensor<1x32x40x16xf32>
237+
}
238+
239+
// -----
240+
241+
func.func @test_transpose_conv2d_invalid_bias(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<5xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x32x32x16xf32> {
242+
// expected-error@+1 {{'tosa.transpose_conv2d' op bias channels expected to be equal to output channels (16) or 1, got 5}}
243+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<5xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
244+
return %0 : tensor<1x32x32x16xf32>
245+
}
246+
175247
// -----
176248
// CHECK-LABEL: conv2d_quant_any_acc
177249
func.func @test_conv2d_quant_any_acc(%arg0: tensor<1x4x4x4x!quant.any<i8<-8:7>>>, %arg1: tensor<8x1x1x4x!quant.any<i8<-8:7>>>, %arg2: tensor<8x!quant.any<i8<-8:7>>>) -> tensor<1x4x4x8x!quant.any<i8<-8:7>>> {

mlir/test/Dialect/Tosa/invalid_extension.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -165,11 +165,11 @@ func.func @test_depthwise_conv2d_non_const_input_zp(%arg0: tensor<1x4x4x4xi8>, %
165165

166166
// -----
167167

168-
func.func @test_transpose_conv2d_non_const_weight_zp(%arg0: tensor<1x4x4x4xi8>, %arg1: tensor<1x1x4x2xi8>, %arg2: tensor<8xi32>, %arg3: tensor<1xi8>) -> tensor<1x4x4x8xi32> {
168+
func.func @test_transpose_conv2d_non_const_weight_zp(%arg0: tensor<1x4x4x4xi8>, %arg1: tensor<1x1x4x2xi8>, %arg2: tensor<8xi32>, %arg3: tensor<1xi8>) -> tensor<1x4x7x8xi32> {
169169
%input_zp = "tosa.const"() {values = dense<0> : tensor<1xi8> } : () -> tensor<1xi8>
170170
// expected-error@+1 {{'tosa.transpose_conv2d' op expected compile time resolvable constant, but got variable value for operand #4}}
171-
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %arg3 {acc_type = i32, out_pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} : (tensor<1x4x4x4xi8>, tensor<1x1x4x2xi8>, tensor<8xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x4x4x8xi32>
172-
return %0 : tensor<1x4x4x8xi32>
171+
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %arg3 {acc_type = i32, out_pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} : (tensor<1x4x4x4xi8>, tensor<1x1x4x2xi8>, tensor<8xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x4x7x8xi32>
172+
return %0 : tensor<1x4x7x8xi32>
173173
}
174174

175175
// -----

0 commit comments

Comments
 (0)