Skip to content

Commit b79db39

Browse files
authored
[mlir][linalg] Support ParamType in vector_sizes option of VectorizeOp transform (#87557)
1 parent 470aefb commit b79db39

File tree

6 files changed

+249
-18
lines changed

6 files changed

+249
-18
lines changed

mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2138,25 +2138,16 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
21382138
}];
21392139

21402140
let arguments = (ins TransformHandleTypeInterface:$target,
2141-
Variadic<TransformHandleTypeInterface>:$vector_sizes,
2141+
Variadic<TransformAnyParamTypeOrAnyHandle>:$vector_sizes,
2142+
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
2143+
$static_vector_sizes,
21422144
OptionalAttr<UnitAttr>:$vectorize_nd_extract,
21432145
DefaultValuedOptionalAttr<DenseBoolArrayAttr, "{}">:
2144-
$scalable_sizes,
2145-
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
2146-
$static_vector_sizes);
2146+
$scalable_sizes);
21472147

21482148
let results = (outs);
2149-
let assemblyFormat = [{
2150-
$target oilist(
2151-
`vector_sizes` custom<DynamicIndexList>($vector_sizes,
2152-
$static_vector_sizes,
2153-
type($vector_sizes),
2154-
$scalable_sizes) |
2155-
`vectorize_nd_extract` $vectorize_nd_extract
2156-
)
2157-
attr-dict
2158-
`:` type($target)
2159-
}];
2149+
2150+
let hasCustomAssemblyFormat = 1;
21602151
let hasVerifier = 1;
21612152

21622153
let extraClassDeclaration = [{

mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3122,6 +3122,81 @@ transform::VectorizeChildrenAndApplyPatternsOp::applyToOne(
31223122
//===----------------------------------------------------------------------===//
31233123
// VectorizeOp
31243124
//===----------------------------------------------------------------------===//
3125+
3126+
static const StringLiteral kVectorSizesKeyword = "vector_sizes";
3127+
3128+
ParseResult transform::VectorizeOp::parse(OpAsmParser &parser,
3129+
OperationState &result) {
3130+
OpAsmParser::UnresolvedOperand target;
3131+
SmallVector<OpAsmParser::UnresolvedOperand> dynamicSizes;
3132+
DenseI64ArrayAttr staticSizes;
3133+
SmallVector<Type> operandTypes;
3134+
llvm::SMLoc operandLoc;
3135+
DenseBoolArrayAttr scalableVals;
3136+
3137+
if (parser.parseOperand(target) || parser.getCurrentLocation(&operandLoc))
3138+
return ParseResult::failure();
3139+
3140+
if (succeeded(parser.parseOptionalKeyword(kVectorSizesKeyword))) {
3141+
if (failed(parseDynamicIndexList(parser, dynamicSizes, staticSizes,
3142+
scalableVals)))
3143+
return ParseResult::failure();
3144+
}
3145+
3146+
if (succeeded(parser.parseOptionalKeyword(
3147+
getVectorizeNdExtractAttrName(result.name))))
3148+
result.addAttribute(getVectorizeNdExtractAttrName(result.name),
3149+
parser.getBuilder().getUnitAttr());
3150+
3151+
if (parser.parseOptionalAttrDict(result.attributes) ||
3152+
parser.parseColonTypeList(operandTypes))
3153+
return ParseResult::failure();
3154+
3155+
if (operandTypes.size() != dynamicSizes.size() + 1) {
3156+
return parser.emitError(operandLoc)
3157+
<< "expected " << dynamicSizes.size() + 1 << " operand type(s)";
3158+
}
3159+
if (parser.resolveOperand(target, operandTypes.front(), result.operands) ||
3160+
parser.resolveOperands(dynamicSizes, ArrayRef(operandTypes).drop_front(),
3161+
operandLoc, result.operands)) {
3162+
return failure();
3163+
}
3164+
3165+
if (scalableVals)
3166+
result.addAttribute(getScalableSizesAttrName(result.name), scalableVals);
3167+
if (staticSizes)
3168+
result.addAttribute(getStaticVectorSizesAttrName(result.name), staticSizes);
3169+
3170+
return success();
3171+
}
3172+
3173+
void transform::VectorizeOp::print(OpAsmPrinter &p) {
3174+
p << ' ' << getTarget() << ' ';
3175+
if (!getMixedVectorSizes().empty()) {
3176+
p << kVectorSizesKeyword << ' ';
3177+
printDynamicIndexList(p, getOperation(), getVectorSizes(),
3178+
getStaticVectorSizesAttr(),
3179+
/*valueTypes=*/{}, getScalableSizesAttr(),
3180+
OpAsmParser::Delimiter::Square);
3181+
}
3182+
3183+
if (getVectorizeNdExtract())
3184+
p << getVectorizeNdExtractAttrName() << ' ';
3185+
3186+
p.printOptionalAttrDict(
3187+
(*this)->getAttrs(),
3188+
/*elidedAttrs=*/{
3189+
getScalableSizesAttrName(getOperation()->getName()),
3190+
getStaticVectorSizesAttrName(getOperation()->getName())});
3191+
p << " : ";
3192+
p << getTarget().getType();
3193+
if (!getVectorSizes().empty()) {
3194+
p << ", ";
3195+
llvm::interleaveComma(getVectorSizes(), p,
3196+
[&](Value operand) { p << operand.getType(); });
3197+
}
3198+
}
3199+
31253200
DiagnosedSilenceableFailure transform::VectorizeOp::apply(
31263201
transform::TransformRewriter &rewriter,
31273202
mlir::transform::TransformResults &transformResults,
@@ -3136,6 +3211,13 @@ DiagnosedSilenceableFailure transform::VectorizeOp::apply(
31363211
auto attr = sz.get<Attribute>();
31373212
vectorSizes.push_back(cast<IntegerAttr>(attr).getInt());
31383213
continue;
3214+
} else if (sz.is<Value>() && isa<ParamType>(sz.get<Value>().getType())) {
3215+
ArrayRef<Attribute> params = state.getParams(sz.get<Value>());
3216+
if (params.size() != 1)
3217+
return emitSilenceableFailure(getLoc()) << "expected a single param";
3218+
vectorSizes.push_back(
3219+
cast<IntegerAttr>(params.front()).getValue().getSExtValue());
3220+
continue;
31393221
}
31403222

31413223
auto szPayloads = state.getPayloadOps(sz.get<Value>());

mlir/test/Dialect/Linalg/transform-ops-invalid.mlir

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,3 +71,24 @@ transform.sequence failures(propagate) {
7171
: (!transform.any_op) -> !transform.op<"linalg.generic">
7272

7373
}
74+
75+
// -----
76+
77+
transform.sequence failures(propagate) {
78+
^bb0(%arg0: !transform.any_op):
79+
%0 = transform.param.constant 2 : i64 -> !transform.param<i64>
80+
// expected-error@below {{custom op 'transform.structured.vectorize' expected 2 operand type(s)}}
81+
transform.structured.vectorize %arg0 vector_sizes [%0, 2] : !transform.any_op, !transform.param<i64>, !transform.param<i64>
82+
83+
}
84+
85+
// -----
86+
87+
transform.sequence failures(propagate) {
88+
^bb0(%arg0: !transform.any_op):
89+
%0 = transform.param.constant 2 : i64 -> !transform.param<i64>
90+
// expected-error@below {{expected ']' in dynamic index list}}
91+
// expected-error@below {{custom op 'transform.structured.vectorize' expected SSA value or integer}}
92+
transform.structured.vectorize %arg0 vector_sizes [%0 : !transform.param<i64>, 2] : !transform.any_op, !transform.param<i64>
93+
94+
}

mlir/test/Dialect/Linalg/transform-ops.mlir

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
1+
// RUN: mlir-opt %s --split-input-file | mlir-opt | FileCheck %s
22

33
transform.sequence failures(propagate) {
44
^bb1(%arg0: !transform.any_op):
@@ -57,3 +57,12 @@ transform.sequence failures(propagate) {
5757
%1:2 = transform.structured.fuse_into_containing_op %arg2 into %loop
5858
: (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
5959
}
60+
61+
// -----
62+
63+
transform.sequence failures(propagate) {
64+
^bb0(%arg0: !transform.any_op):
65+
// CHECK: transform.structured.vectorize %arg0 : !transform.any_op
66+
transform.structured.vectorize %arg0 vector_sizes [] : !transform.any_op
67+
68+
}

mlir/test/Dialect/Linalg/vectorization.mlir

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,81 @@ module attributes {transform.with_named_sequence} {
3636

3737
// -----
3838

39+
func.func @vectorize_dynamic_identity_with_constant(%arg0: tensor<?xf32>,
40+
%arg1: tensor<?xf32>,
41+
%arg2: tensor<?xf32>) -> tensor<?xf32> {
42+
%c4 = arith.constant 4 : index
43+
%0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>,
44+
affine_map<(d0) -> (d0)>,
45+
affine_map<(d0) -> (d0)>],
46+
iterator_types = ["parallel"] }
47+
ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
48+
outs(%arg2 : tensor<?xf32>) {
49+
^bb(%in0: f32, %in1: f32, %out: f32) :
50+
%0 = arith.addf %in0, %in1 : f32
51+
linalg.yield %0 : f32
52+
} -> tensor<?xf32>
53+
return %0 : tensor<?xf32>
54+
}
55+
56+
// CHECK-LABEL: @vectorize_dynamic_identity_with_constant
57+
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
58+
// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor<?xf32>
59+
// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1>
60+
// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
61+
// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
62+
// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
63+
// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32>
64+
// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
65+
66+
module attributes {transform.with_named_sequence} {
67+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
68+
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
69+
%size = transform.structured.match ops{["arith.constant"]} in %arg1 : (!transform.any_op) -> !transform.any_op
70+
transform.structured.vectorize %0 vector_sizes [%size] : !transform.any_op, !transform.any_op
71+
transform.yield
72+
}
73+
}
74+
75+
// -----
76+
77+
func.func @vectorize_dynamic_identity_with_param(%arg0: tensor<?xf32>,
78+
%arg1: tensor<?xf32>,
79+
%arg2: tensor<?xf32>) -> tensor<?xf32> {
80+
%0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>,
81+
affine_map<(d0) -> (d0)>,
82+
affine_map<(d0) -> (d0)>],
83+
iterator_types = ["parallel"] }
84+
ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
85+
outs(%arg2 : tensor<?xf32>) {
86+
^bb(%in0: f32, %in1: f32, %out: f32) :
87+
%0 = arith.addf %in0, %in1 : f32
88+
linalg.yield %0 : f32
89+
} -> tensor<?xf32>
90+
return %0 : tensor<?xf32>
91+
}
92+
93+
// CHECK-LABEL: @vectorize_dynamic_identity_with_param
94+
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
95+
// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor<?xf32>
96+
// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1>
97+
// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
98+
// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
99+
// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor<?xf32>, vector<4xf32> } : vector<4xi1> -> vector<4xf32>
100+
// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32>
101+
// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<?xf32> } : vector<4xi1> -> tensor<?xf32>
102+
103+
module attributes {transform.with_named_sequence} {
104+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
105+
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
106+
%vector_size = transform.param.constant 4 : i64 -> !transform.param<i64>
107+
transform.structured.vectorize %0 vector_sizes [%vector_size] : !transform.any_op, !transform.param<i64>
108+
transform.yield
109+
}
110+
}
111+
112+
// -----
113+
39114
func.func @vectorize_dynamic_1d_broadcast(%arg0: tensor<?xf32>,
40115
%arg1: tensor<?xf32>,
41116
%arg2: tensor<?xf32>) -> tensor<?xf32> {
@@ -231,6 +306,49 @@ module attributes {transform.with_named_sequence} {
231306

232307
// -----
233308

309+
func.func @vectorize_dynamic_transpose_reduction_with_params(%arg0: tensor<?x?x?xf32>,
310+
%arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
311+
%0 = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
312+
affine_map<(d0, d1, d2) -> (d2, d1)>],
313+
iterator_types = ["reduction", "parallel", "parallel"] }
314+
ins(%arg0 : tensor<?x?x?xf32>)
315+
outs(%arg1 : tensor<?x?xf32>) {
316+
^bb(%in: f32, %out: f32) :
317+
%0 = arith.addf %in, %out : f32
318+
linalg.yield %0 : f32
319+
} -> tensor<?x?xf32>
320+
return %0 : tensor<?x?xf32>
321+
}
322+
323+
module attributes {transform.with_named_sequence} {
324+
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
325+
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
326+
%vector_size_0 = transform.param.constant 4 : i64 -> !transform.param<i64>
327+
%vector_size_2 = transform.param.constant 16 : i64 -> !transform.param<i64>
328+
transform.structured.vectorize %0 vector_sizes
329+
[%vector_size_0, 8, %vector_size_2] : !transform.any_op, !transform.param<i64>, !transform.param<i64>
330+
transform.yield
331+
}
332+
}
333+
334+
// CHECK-LABEL: @vectorize_dynamic_transpose_reduction_with_params(
335+
// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?x?xf32>,
336+
// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
337+
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
338+
// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?x?xf32>
339+
// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index
340+
// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?x?xf32>
341+
// CHECK: %[[VAL_6:.*]] = arith.constant 2 : index
342+
// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_6]] : tensor<?x?x?xf32>
343+
// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]], %[[VAL_7]] : vector<4x8x16xi1>
344+
// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true, true]} : tensor<?x?x?xf32>, vector<4x8x16xf32> } : vector<4x8x16xi1> -> vector<4x8x16xf32>
345+
// CHECK: %[[VAL_13:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_5]] : vector<16x8xi1>
346+
// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_13]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor<?x?xf32>, vector<8x16xf32> } : vector<16x8xi1> -> vector<8x16xf32>
347+
// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_10]] { vector.multi_reduction <add>, %[[VAL_11]], %[[VAL_14]] [0] : vector<4x8x16xf32> to vector<8x16xf32> } : vector<4x8x16xi1> -> vector<8x16xf32>
348+
// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_13]] { vector.transfer_write %[[VAL_15]], %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : vector<8x16xf32>, tensor<?x?xf32> } : vector<16x8xi1> -> tensor<?x?xf32>
349+
350+
// -----
351+
234352
func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>,
235353
%arg1: tensor<8x?xf32>,
236354
%arg2: tensor<8x?xf32>) -> tensor<8x?xf32> {

mlir/test/python/dialects/transform_structured_ext.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,17 @@ def testVectorizeMixed(target):
210210
# CHECK: transform.sequence
211211
# CHECK: %[[V0:.*]] = transform.structured.match
212212
# CHECK: transform.structured.vectorize
213-
# CHECK-SAME: vector_sizes [%[[V0]] : !transform.any_op, 4]
213+
# CHECK-SAME: vector_sizes [%[[V0]], 4]
214+
215+
216+
@run
217+
@create_sequence
218+
def testVectorizeEmpty(target):
219+
structured.VectorizeOp(target, [])
220+
# CHECK-LABEL: TEST: testVectorizeEmpty
221+
# CHECK: transform.sequence
222+
# CHECK: transform.structured.vectorize
223+
# CHECK-NOT: vector_sizes
214224

215225

216226
@run
@@ -223,7 +233,7 @@ def testVectorizeScalable(target):
223233
# CHECK: transform.sequence
224234
# CHECK-DAG: %[[V0:.*]] = transform.structured.match
225235
# CHECK-DAG: transform.structured.vectorize
226-
# CHECK-SAME: vector_sizes [16, [%[[V0]] : !transform.any_op], [4], [8]]
236+
# CHECK-SAME: vector_sizes [16, [%[[V0]]], [4], [8]]
227237

228238

229239
@run

0 commit comments

Comments
 (0)