-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir][tosa] Add several level checks #128074
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-mlir-tosa @llvm/pr-subscribers-mlir Author: TatWai Chong (tatwaichong) ChangesAdd the following types of level check to consolidate the level validity
Change-Id: I797fafe504219e43950824c04839c7187065fe8e Patch is 45.84 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/128074.diff 2 Files Affected:
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index f74a4b4c58b80..f8e788c0497fd 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -70,17 +70,22 @@ struct TosaLevel {
int32_t MAX_KERNEL = 0;
int32_t MAX_STRIDE = 0;
int32_t MAX_SCALE = 0;
-
- // @todo: MAX_LOG2_SIZE value and checks
+ int32_t MAX_LOG2_SIZE = 0;
+ int32_t MAX_NESTING = 0;
+ int32_t MAX_TENSOR_LIST_SIZE = 0;
bool operator==(const TosaLevel &rhs) {
return MAX_RANK == rhs.MAX_RANK && MAX_KERNEL == rhs.MAX_KERNEL &&
- MAX_STRIDE == rhs.MAX_STRIDE && MAX_SCALE == rhs.MAX_SCALE;
+ MAX_STRIDE == rhs.MAX_STRIDE && MAX_SCALE == rhs.MAX_SCALE &&
+ MAX_LOG2_SIZE == rhs.MAX_LOG2_SIZE &&
+ MAX_NESTING == rhs.MAX_NESTING &&
+ MAX_TENSOR_LIST_SIZE == rhs.MAX_TENSOR_LIST_SIZE;
}
};
-static constexpr TosaLevel TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256};
-static constexpr TosaLevel TOSA_LEVEL_NONE = {0, 0, 0, 0};
+static constexpr TosaLevel TOSA_LEVEL_EIGHTK = {6, 8192, 8192, 256, 31, 6, 64};
+static constexpr TosaLevel TOSA_LEVEL_NONE = {32, 2147483647, 2147483647, 2048,
+ 63, 256, 256};
//===----------------------------------------------------------------------===//
// TOSA Validation Pass.
@@ -147,107 +152,149 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
return true;
}
- bool levelCheckRank(Operation *op, const Value &v,
- const std::string &checkDesc) {
+ bool levelCheckListSize(Operation *op, int32_t v,
+ const std::string &checkDesc) {
+ if (v > tosaLevel.MAX_TENSOR_LIST_SIZE) {
+ op->emitOpError() << "failed level check for MAX_TENSOR_LIST_SIZE: "
+ << checkDesc;
+ return false;
+ }
+ return true;
+ }
+
+ bool levelCheckRankAndSizes(Operation *op, const Value &v,
+ const std::string &operandOrResult) {
if (ShapedType type = dyn_cast<ShapedType>(v.getType())) {
if (!type.hasRank()) {
op->emitOpError() << "failed level check: unranked tensor";
return false;
}
if (type.getRank() > tosaLevel.MAX_RANK) {
- op->emitOpError() << "failed level check: " << checkDesc;
+ op->emitOpError() << "failed level check: " << operandOrResult
+ << " rank(shape) <= MAX_RANK";
return false;
}
+
+ const int64_t max_dim = (1L << tosaLevel.MAX_LOG2_SIZE) - 1;
+ const int64_t max_size = (1L << (tosaLevel.MAX_LOG2_SIZE + 1)) - 1;
+
+ auto shape = type.getShape();
+ bool has_dynamic = false;
+ for (auto dim : shape) {
+ if (mlir::ShapedType::isDynamic(dim)) {
+ has_dynamic = true;
+ continue;
+ }
+ if (dim > max_dim) {
+ op->emitOpError() << "failed level check: " << operandOrResult
+ << " shape dimension <= (1<<MAX_LOG2_SIZE) - 1";
+ return false;
+ }
+ }
+ if (!has_dynamic) {
+ int64_t element_bits = type.getElementTypeBitWidth();
+ int64_t element_bytes = std::max(1L, element_bits / 8);
+ int64_t size = element_bytes * type.getNumElements();
+ if (size > max_size) {
+ op->emitOpError()
+ << "failed level check: " << operandOrResult
+ << " tensor size (in bytes) <= (1<<MAX_LOG2_SIZE+1) - 1";
+ return false;
+ }
+ }
}
return true;
}
template <typename T>
- bool levelCheckRanksFor(Operation *op) {
+ bool levelCheckRanksAndSizesFor(Operation *op) {
if (dyn_cast<T>(op)) {
// level check ranks of all operands and results
for (auto v : op->getOperands()) {
- if (!levelCheckRank(op, v, "operand rank(shape) <= MAX_RANK"))
+ if (!levelCheckRankAndSizes(op, v, "operand"))
return false;
}
for (auto v : op->getResults()) {
- if (!levelCheckRank(op, v, "result rank(shape) <= MAX_RANK"))
+ if (!levelCheckRankAndSizes(op, v, "result"))
return false;
}
}
return true;
}
- bool levelCheckRanks(Operation *op) {
-#define CHECK_RANKS_FOR(tosaOp) \
- if (!levelCheckRanksFor<tosaOp##Op>(op)) \
+ bool levelCheckRanksAndSizes(Operation *op) {
+#define CHECK_RANKS_AND_SIZES_FOR(tosaOp) \
+ if (!levelCheckRanksAndSizesFor<tosaOp##Op>(op)) \
return false;
// tensor operators:
- CHECK_RANKS_FOR(ArgMax);
+ CHECK_RANKS_AND_SIZES_FOR(ArgMax);
// all activation functions:
- CHECK_RANKS_FOR(Clamp);
- CHECK_RANKS_FOR(Sigmoid);
- CHECK_RANKS_FOR(Tanh);
+ CHECK_RANKS_AND_SIZES_FOR(Clamp);
+ CHECK_RANKS_AND_SIZES_FOR(Erf);
+ CHECK_RANKS_AND_SIZES_FOR(Sigmoid);
+ CHECK_RANKS_AND_SIZES_FOR(Tanh);
// all elementwise binary operators:
- CHECK_RANKS_FOR(Add);
- CHECK_RANKS_FOR(ArithmeticRightShift);
- CHECK_RANKS_FOR(BitwiseAnd);
- CHECK_RANKS_FOR(BitwiseOr);
- CHECK_RANKS_FOR(BitwiseXor);
- CHECK_RANKS_FOR(IntDiv);
- CHECK_RANKS_FOR(LogicalAnd);
- CHECK_RANKS_FOR(LogicalLeftShift);
- CHECK_RANKS_FOR(LogicalRightShift);
- CHECK_RANKS_FOR(LogicalOr);
- CHECK_RANKS_FOR(LogicalXor);
- CHECK_RANKS_FOR(Maximum);
- CHECK_RANKS_FOR(Minimum);
- CHECK_RANKS_FOR(Mul);
- CHECK_RANKS_FOR(Pow);
- CHECK_RANKS_FOR(Sub);
- CHECK_RANKS_FOR(Table);
+ CHECK_RANKS_AND_SIZES_FOR(Add);
+ CHECK_RANKS_AND_SIZES_FOR(ArithmeticRightShift);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseAnd);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseOr);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseXor);
+ CHECK_RANKS_AND_SIZES_FOR(IntDiv);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalAnd);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalLeftShift);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalRightShift);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalOr);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalXor);
+ CHECK_RANKS_AND_SIZES_FOR(Maximum);
+ CHECK_RANKS_AND_SIZES_FOR(Minimum);
+ CHECK_RANKS_AND_SIZES_FOR(Mul);
+ CHECK_RANKS_AND_SIZES_FOR(Pow);
+ CHECK_RANKS_AND_SIZES_FOR(Sub);
+ CHECK_RANKS_AND_SIZES_FOR(Table);
// all elementwise unary operators:
- CHECK_RANKS_FOR(Abs);
- CHECK_RANKS_FOR(BitwiseNot);
- CHECK_RANKS_FOR(Ceil);
- CHECK_RANKS_FOR(Clz);
- CHECK_RANKS_FOR(Exp);
- CHECK_RANKS_FOR(Floor);
- CHECK_RANKS_FOR(Log);
- CHECK_RANKS_FOR(LogicalNot);
- CHECK_RANKS_FOR(Negate);
- CHECK_RANKS_FOR(Reciprocal);
- CHECK_RANKS_FOR(Rsqrt);
+ CHECK_RANKS_AND_SIZES_FOR(Abs);
+ CHECK_RANKS_AND_SIZES_FOR(BitwiseNot);
+ CHECK_RANKS_AND_SIZES_FOR(Ceil);
+ CHECK_RANKS_AND_SIZES_FOR(Clz);
+ CHECK_RANKS_AND_SIZES_FOR(Cos);
+ CHECK_RANKS_AND_SIZES_FOR(Exp);
+ CHECK_RANKS_AND_SIZES_FOR(Floor);
+ CHECK_RANKS_AND_SIZES_FOR(Log);
+ CHECK_RANKS_AND_SIZES_FOR(LogicalNot);
+ CHECK_RANKS_AND_SIZES_FOR(Negate);
+ CHECK_RANKS_AND_SIZES_FOR(Reciprocal);
+ CHECK_RANKS_AND_SIZES_FOR(Rsqrt);
+ CHECK_RANKS_AND_SIZES_FOR(Sin);
// all elementwise ternary operators:
- CHECK_RANKS_FOR(Select);
+ CHECK_RANKS_AND_SIZES_FOR(Select);
// all comparison operators:
- CHECK_RANKS_FOR(Equal);
- CHECK_RANKS_FOR(Greater);
- CHECK_RANKS_FOR(GreaterEqual);
+ CHECK_RANKS_AND_SIZES_FOR(Equal);
+ CHECK_RANKS_AND_SIZES_FOR(Greater);
+ CHECK_RANKS_AND_SIZES_FOR(GreaterEqual);
// all reduction operators:
- CHECK_RANKS_FOR(ReduceAll);
- CHECK_RANKS_FOR(ReduceAny);
- CHECK_RANKS_FOR(ReduceMax);
- CHECK_RANKS_FOR(ReduceMin);
- CHECK_RANKS_FOR(ReduceProd);
- CHECK_RANKS_FOR(ReduceSum);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceAll);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceAny);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceMax);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceMin);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceProd);
+ CHECK_RANKS_AND_SIZES_FOR(ReduceSum);
// all data layout operators:
- CHECK_RANKS_FOR(Concat);
- CHECK_RANKS_FOR(Pad);
- CHECK_RANKS_FOR(Reshape);
- CHECK_RANKS_FOR(Reverse);
- CHECK_RANKS_FOR(Slice);
- CHECK_RANKS_FOR(Tile);
- CHECK_RANKS_FOR(Transpose);
+ CHECK_RANKS_AND_SIZES_FOR(Concat);
+ CHECK_RANKS_AND_SIZES_FOR(Pad);
+ CHECK_RANKS_AND_SIZES_FOR(Reshape);
+ CHECK_RANKS_AND_SIZES_FOR(Reverse);
+ CHECK_RANKS_AND_SIZES_FOR(Slice);
+ CHECK_RANKS_AND_SIZES_FOR(Tile);
+ CHECK_RANKS_AND_SIZES_FOR(Transpose);
// all type conversion operators:
- CHECK_RANKS_FOR(Cast);
- CHECK_RANKS_FOR(Rescale);
+ CHECK_RANKS_AND_SIZES_FOR(Cast);
+ CHECK_RANKS_AND_SIZES_FOR(Rescale);
// all data nodes operators:
- CHECK_RANKS_FOR(Const);
- CHECK_RANKS_FOR(Identity);
+ CHECK_RANKS_AND_SIZES_FOR(Const);
+ CHECK_RANKS_AND_SIZES_FOR(Identity);
-#undef CHECK_RANKS_FOR
+#undef CHECK_RANKS_AND_SIZES_FOR
return true;
}
@@ -396,6 +443,32 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
return true;
}
+ bool levelCheckListSize(Operation *op) {
+ if (auto concat = dyn_cast<tosa::ConcatOp>(op)) {
+ return levelCheckListSize(op, concat.getInput1().size(), "input1");
+ }
+ if (auto custom = dyn_cast<tosa::CustomOp>(op)) {
+ if (!levelCheckListSize(op, custom.getInputList().size(), "input_list") ||
+ !levelCheckListSize(op, custom.getOutputList().size(),
+ "output_list")) {
+ return false;
+ }
+ }
+ if (auto condIf = dyn_cast<tosa::IfOp>(op)) {
+ if (!levelCheckListSize(op, condIf.getInputs().size(), "inputs") ||
+ !levelCheckListSize(op, condIf.getOutput().size(), "outputs")) {
+ return false;
+ }
+ }
+ if (auto w = dyn_cast<tosa::WhileOp>(op)) {
+ if (!levelCheckListSize(op, w.getInputs().size(), "inputs") ||
+ !levelCheckListSize(op, w.getOutput().size(), "outputs")) {
+ return false;
+ }
+ }
+ return true;
+ }
+
// configure profile and level values from pass options profileName and
// levelName
void configLevelAndProfile() {
@@ -449,7 +522,7 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
return success();
}
- if (!levelCheckRanks(op)) {
+ if (!levelCheckRanksAndSizes(op)) {
return failure();
}
@@ -465,6 +538,11 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) {
return failure();
}
+ // level check MAX_TENSOR_LIST_SIZE
+ if (!levelCheckListSize(op)) {
+ return failure();
+ }
+
return success();
}
@@ -695,6 +773,10 @@ LogicalResult TosaValidation::applyErrorIfCheck(Operation *op) {
}
bool TosaValidation::isValidElementType(Type type) {
+ if (auto quantType =
+ llvm::dyn_cast<mlir::quant::QuantizedType>(type))
+ type = quantType.getStorageType();
+
if (isa<FloatType>(type)) {
return type.isF32() || type.isF16() || type.isBF16();
} else if (auto intTy = dyn_cast<IntegerType>(type)) {
diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir
index 90c4551564d1e..74e71dd8d1c19 100644
--- a/mlir/test/Dialect/Tosa/level_check.mlir
+++ b/mlir/test/Dialect/Tosa/level_check.mlir
@@ -2,7 +2,7 @@
// Enable all supported profiles and extensions to focus the verification of expected level errors.
//--------------------------------------------------------------------------------------------------
-// RUN: mlir-opt %s -split-input-file -verify-diagnostics --tosa-validate="profile=pro_int,pro_fp,mt extension=int16,int4,bf16,fp8e4m3,fp8e5m2,fft,variable"
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics --tosa-validate="profile=pro_int,pro_fp extension=int16,int4,bf16,fp8e4m3,fp8e5m2,fft,variable"
func.func @test_argmax(%arg0: tensor<1x1x1x1x29x29x4xf32>) -> tensor<1x1x1x1x29x4xi32> {
// expected-error@+1 {{'tosa.argmax' op failed level check: operand rank(shape) <= MAX_RANK}}
@@ -12,6 +12,311 @@ func.func @test_argmax(%arg0: tensor<1x1x1x1x29x29x4xf32>) -> tensor<1x1x1x1x29x
// -----
+func.func @test_clamp(%arg0: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.clamp' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.clamp %arg0 {min_val = -3.40282347E+38 : f32, max_val = 3.40282347E+38 : f32} : (tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_erf(%arg0: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.erf' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.erf %arg0 : (tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_sigmoid(%arg0: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.sigmoid' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.sigmoid %arg0 : (tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_tanh(%arg0: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.tanh' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.tanh %arg0 : (tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_add(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.add' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.add %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_arithmetic_right_shift(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.arithmetic_right_shift' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.arithmetic_right_shift %arg0, %arg1 {round = false} : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_bitwise_and(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.bitwise_and' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.bitwise_and %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_bitwise_or(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.bitwise_or' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.bitwise_or %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_bitwise_xor(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.bitwise_xor' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.bitwise_xor %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_int_div(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.int_div' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.int_div %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_logical_and(%arg0: tensor<1x1x1x1x13x21x3xi1>, %arg1: tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1> {
+ // expected-error@+1 {{'tosa.logical_and' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.logical_and %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi1>, tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1>
+ return %0 : tensor<1x1x1x1x13x21x3xi1>
+}
+
+// -----
+
+func.func @test_logical_left_shift(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.logical_left_shift' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.logical_left_shift %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_logical_right_shift(%arg0: tensor<1x1x1x1x13x21x3xi32>, %arg1: tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32> {
+ // expected-error@+1 {{'tosa.logical_right_shift' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.logical_right_shift %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xi32>, tensor<1x1x1x1x13x21x3xi32>) -> tensor<1x1x1x1x13x21x3xi32>
+ return %0 : tensor<1x1x1x1x13x21x3xi32>
+}
+
+// -----
+
+func.func @test_logical_or(%arg0: tensor<1x1x1x1x13x1x3xi1>, %arg1: tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1> {
+ // expected-error@+1 {{'tosa.logical_or' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.logical_or %arg0, %arg1 : (tensor<1x1x1x1x13x1x3xi1>, tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1>
+ return %0 : tensor<1x1x1x1x13x21x3xi1>
+}
+
+// -----
+
+func.func @test_logical_xor(%arg0: tensor<1x1x1x1x13x1x3xi1>, %arg1: tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1> {
+ // expected-error@+1 {{'tosa.logical_xor' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.logical_xor %arg0, %arg1 : (tensor<1x1x1x1x13x1x3xi1>, tensor<1x1x1x1x13x21x3xi1>) -> tensor<1x1x1x1x13x21x3xi1>
+ return %0 : tensor<1x1x1x1x13x21x3xi1>
+}
+
+// -----
+
+func.func @test_max(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x13x21x1xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.maximum' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.maximum %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x13x21x1xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_min(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x1x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.minimum' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.minimum %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x1x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_mul(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x13x1x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ %shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
+ // expected-error@+1 {{'tosa.mul' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.mul %arg0, %arg1, %shift : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x13x1x3xf32>, tensor<1xi8>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_pow(%arg0: tensor<1x1x1x1x13x21x3xf32>, %arg1: tensor<1x1x1x1x13x21x1xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.pow' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.pow %arg0, %arg1 : (tensor<1x1x1x1x13x21x3xf32>, tensor<1x1x1x1x13x21x1xf32>) -> tensor<1x1x1x1x13x21x3xf32>
+ return %0 : tensor<1x1x1x1x13x21x3xf32>
+}
+
+// -----
+
+func.func @test_sub(%arg0: tensor<1x1x1x1x1x21x3xf32>, %arg1: tensor<1x1x1x1x13x21x3xf32>) -> tensor<1x1x1x1x13x21x3xf32> {
+ // expected-error@+1 {{'tosa.sub' op failed level check: operand rank(shape) <= MAX_RANK}}
+ %0 = tosa.sub %arg0, %arg1 : (tensor<1x1x1x1x1x21x3xf32>, tensor<1x1x1x1x13x21x3xf32>) -> te...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
23707c2
to
2f2059a
Compare
9672966
to
fcd29d6
Compare
fcd29d6
to
0dd21a0
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks @tatwaichong, mostly had questions about conformance of these checks to the specification. Will other checks such as MAX_NESTING
come in future changes?
0dd21a0
to
247ba87
Compare
@lhutton1 agree, I think it is worthwhile to implement MAX_NESTING check in future changes. |
247ba87
to
5af3873
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for the updates @tatwaichong, had a couple more questions otherwise LGTM
5af3873
to
d71d1d2
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for the updates, apologies for the back and forth on this!
Improve the PR as suggested, and add tests. |
Add the following types of level check to consolidate the level validity - Complete rank level checks for operations. - Add MAX_LOG2_SIZE level check: The maximum value is 63 when the level is set to "none" and 31 when the level is set to "8K". - Add MAX_TENSOR_LIST_SIZE level check : The maximum value is 256 when the level is set to "none" and 64 when the level is set to "8K". - TOSA 1.0 spec does not allow operations with dynamic shapes, so an error should be raised instead. Co-authored-by: TatWai Chong <[email protected]> Change-Id: I797fafe504219e43950824c04839c7187065fe8e
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for the changes @tatwaichong, LGTM!
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/140/builds/18107 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/203/builds/3371 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/204/builds/2181 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/205/builds/2162 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/130/builds/10704 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/116/builds/11001 Here is the relevant piece of the build log for the reference
|
Will revert this PR. |
This reverts commit ccf1bfc.
Can this please be reverted @Jerry-Ge ? |
Add the following types of level check to consolidate the level validity - Complete rank level checks for operations. - Add MAX_LOG2_SIZE level check: The maximum value is 63 when the level is set to "none" and 31 when the level is set to "8K". - Add MAX_TENSOR_LIST_SIZE level check : The maximum value is 256 when the level is set to "none" and 64 when the level is set to "8K". - TOSA 1.0 spec does not allow operations with dynamic shapes, so an error should be raised instead Co-authored-by: TatWai Chong <[email protected]> Co-authored-by: Tai Ly <[email protected]>
Add the following types of level check to consolidate the level validity
level is set to "none" and 31 when the level is set to "8K".
when the level is set to "none" and 64 when the level is set to "8K".
an error should be raised instead
Co-authored-by: TatWai Chong [email protected]