@@ -55,10 +55,10 @@ namespace utils {
55
55
// .operation(NumOfLoops(_OR(EqualsTo(5), EqualsTo(4))))
56
56
// .input(MatchAll(), HasStaticShape())
57
57
// .output(MatchAll(), HasStaticShape())
58
- // .input(MatchOne(0), HasMap(BroadcastableProjectedPermutation(), &mapOperandA))
59
- // .input(MatchOne(1), HasMap(Any(), &mapOperandB))
60
- // .output(MatchOne(0), HasMap(BroadcastableProjectedPermutation(), &mapOperandC))
61
- // .region(MatchOne(0),
58
+ // .input(MatchOne(0), HasMap(BroadcastableProjectedPermutation(),
59
+ // &mapOperandA)) .input(MatchOne(1), HasMap(Any(), &mapOperandB))
60
+ // .output(MatchOne(0), HasMap(BroadcastableProjectedPermutation(),
61
+ // &mapOperandC)) .region(MatchOne(0),
62
62
// WithOpChain<arith::MulFOp, arith::AddFOp>(operands));
63
63
// // clang-format on
64
64
// if (!matmulMatcher.match(linalgOp))
@@ -72,7 +72,8 @@ namespace utils {
72
72
// int64_t iParIter = operandCPosIterPar[0];
73
73
// int64_t jParIter = operandCPosIterPar[1];
74
74
75
- // // Operand A: One parallel iterator (i) and two reduction ones (batch and k).
75
+ // // Operand A: One parallel iterator (i) and two reduction ones (batch and
76
+ // k).
76
77
// // The batch dimension is optional.
77
78
// llvm::SmallVector<int64_t> operandAPosIterPar = getIteratorPos(
78
79
// linalgOp, mapOperandA, mlir::utils::IteratorType::parallel);
@@ -296,8 +297,7 @@ static bool hasReluBody(Operation *op, SmallVectorImpl<Value> *captured) {
296
297
297
298
if (cmpPredicate == arith::CmpFPredicate::UGT ||
298
299
cmpPredicate == arith::CmpFPredicate::UGE) {
299
- if (cmpLhs == trueVal &&
300
- mlir::utils::isZeroTensor (cmpRhs) &&
300
+ if (cmpLhs == trueVal && mlir::utils::isZeroTensor (cmpRhs) &&
301
301
mlir::utils::isZeroTensor (falseVal)) {
302
302
// case: %in > 0 ? %in : 0
303
303
return (getOperand (cmpLhs, cmpRhs) || getOperand (cmpRhs, cmpLhs));
@@ -309,8 +309,7 @@ static bool hasReluBody(Operation *op, SmallVectorImpl<Value> *captured) {
309
309
}
310
310
} else if (cmpPredicate == arith::CmpFPredicate::ULT ||
311
311
cmpPredicate == arith::CmpFPredicate::ULE) {
312
- if (cmpLhs == falseVal &&
313
- mlir::utils::isZeroTensor (cmpRhs) &&
312
+ if (cmpLhs == falseVal && mlir::utils::isZeroTensor (cmpRhs) &&
314
313
mlir::utils::isZeroTensor (trueVal)) {
315
314
// case: %in < 0 ? 0 : %in
316
315
return (getOperand (cmpLhs, cmpRhs) || getOperand (cmpRhs, cmpLhs));
@@ -329,7 +328,7 @@ namespace {
329
328
// Helper matcher functor for relu detection.
330
329
struct WithReluBody {
331
330
WithReluBody () = delete ;
332
- WithReluBody (SmallVectorImpl<Value> *captures) : captures(captures){};
331
+ WithReluBody (SmallVectorImpl<Value> *captures) : captures(captures) {};
333
332
334
333
bool operator ()(Region *region, Operation *op) {
335
334
auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
@@ -361,7 +360,7 @@ bool isTwoDReluOp(linalg::LinalgOp linalgOp, SmallVectorImpl<Value> *operands) {
361
360
// SmallVectorImpl<Value> *operands) {
362
361
// SmallVector<Value, 2> linalgOperands;
363
362
// // clang-format off
364
- // auto identityMatcher =
363
+ // auto identityMatcher =
365
364
// StructuredOpMatcher::make<linalg::LinalgOp>()
366
365
// .output(MatchAll(), HasMap(Identity()))
367
366
// .input(MatchAll(), HasMap(BroadcastableProjectedPermutation()))
0 commit comments