Skip to content

Commit d1f1103

Browse files
Add lit tests.
1 parent fd48251 commit d1f1103

File tree

3 files changed

+144
-10
lines changed

3 files changed

+144
-10
lines changed

mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,10 @@ static bool tileDividesIterationDomain(Range loopRange) {
101101
/// `tileSize`, i.e., `min(tileSize, range.end() - iv)`.
102102
static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
103103
Range loopRange, Value iv,
104-
Value tileSize) {
104+
OpFoldResult tileSize) {
105105
std::optional<int64_t> ts = getConstantIntValue(tileSize);
106106
if (ts && ts.value() == 1)
107-
return getAsOpFoldResult(tileSize);
107+
return tileSize;
108108

109109
if (tileDividesIterationDomain(
110110
Range{loopRange.offset, loopRange.size, tileSize}))
@@ -130,12 +130,7 @@ static Operation *cloneOpAndUpdateDestinationArgs(RewriterBase &rewriter,
130130
Operation *clonedOp = rewriter.clone(*op);
131131
if (auto destinationStyleOp =
132132
dyn_cast<DestinationStyleOpInterface>(clonedOp)) {
133-
// Note that this is assuming that
134-
auto [start, end] = destinationStyleOp.getDpsInitsPositionRange();
135-
assert((end - start == newDestArgs.size()) &&
136-
"expected as many new destination args as number of inits of the "
137-
"operation");
138-
clonedOp->setOperands(start, end - start, newDestArgs);
133+
destinationStyleOp.getDpsInitsMutable().assign(newDestArgs);
139134
}
140135
return clonedOp;
141136
}

mlir/test/Interfaces/TilingInterface/tile-using-scfforall.mlir

Lines changed: 131 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
// RUN: mlir-opt -test-tiling-interface=tile-using-scf-forall -split-input-file %s | FileCheck %s
1+
// RUN: mlir-opt -test-tiling-interface=tile-using-scf-forall -split-input-file %s | FileCheck %s
22

33
func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
44
%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
55
%0 = linalg.matmul {__internal_transform__ = "simple_gemm"}
6-
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
6+
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
77
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
88
return %0 : tensor<?x?xf32>
99
}
@@ -35,3 +35,132 @@ func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
3535
// CHECK: tensor.parallel_insert_slice %[[GEMM_TILE]] into %[[INIT]]
3636
// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1]
3737
// CHECK: return %[[RESULT]]
38+
39+
// -----
40+
41+
#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
42+
#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
43+
#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
44+
func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
45+
%init0 = tensor.empty() : tensor<128x300x200xf32>
46+
%init1 = tensor.empty() : tensor<300x128x200xf32>
47+
%0:2 = linalg.generic {
48+
indexing_maps = [#map0, #map1, #map2],
49+
iterator_types = ["parallel", "parallel", "parallel"]}
50+
{__internal_transform__ = "parallel_generic_transpose"}
51+
ins(%arg0 : tensor<128x200x300xf32>)
52+
outs(%init0, %init1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
53+
^bb0(%b0 : f32, %b1 : f32, %b2 : f32):
54+
linalg.yield %b0, %b0 : f32, f32
55+
} -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>)
56+
return %0#0, %0#1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>
57+
}
58+
// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0) -> (10, -d0 + 128)>
59+
// CHECK-LABEL: func.func @multi_result(
60+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<128x200x300xf32>)
61+
// CHECK-DAG: %[[INIT0:.+]] = tensor.empty()
62+
// CHECK-DAG: %[[INIT1:.+]] = tensor.empty()
63+
// CHECK: %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = (0, 0) to (128, 300) step (10, 20)
64+
// CHECK-SAME: shared_outs(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]])
65+
// CHECK: %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]])
66+
// CHECK: %[[ARG_TILE:.+]] = tensor.extract_slice %[[ARG0]]
67+
// CHECK-SAME: [%[[IV0]], 0, %[[IV1]]] [%[[TS_Y]], 200, 20] [1, 1, 1]
68+
// CHECK-DAG: %[[INIT0_TILE:.+]] = tensor.extract_slice %[[ARG1]]
69+
// CHECK-SAME: [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
70+
// CHECK-DAG: %[[INIT1_TILE:.+]] = tensor.extract_slice %[[ARG2]]
71+
// CHECK-SAME: [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
72+
// CHECK: %[[RESULT_TILE:.+]]:2 = linalg.generic
73+
// CHECK-SAME: ins(%[[ARG_TILE]] :
74+
// CHECK-SAME: outs(%[[INIT0_TILE]], %[[INIT1_TILE]] :
75+
// CHECK: scf.forall.in_parallel {
76+
// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#0 into %[[ARG1]][%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
77+
// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#1 into %[[ARG2]][%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
78+
// CHECK: }
79+
// CHECK: return %[[OUTER]]#0, %[[OUTER]]#1
80+
81+
// -----
82+
83+
func.func @conv2D(%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>,
84+
%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
85+
%0 = linalg.conv_2d_nhwc_hwcf {
86+
strides = dense<[2, 3]> : tensor<2xi64>,
87+
dilation = dense<[4, 5]> : tensor<2xi64>,
88+
__internal_transform__ = "simple_conv"}
89+
ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
90+
outs(%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
91+
return %0 : tensor<?x?x?x?xf32>
92+
}
93+
// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (10, -d0 + s0)>
94+
// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (20, -d0 + s0)>
95+
// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (30, -d0 + s0)>
96+
// CHECK-DAG: #[[$MAP3:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 2 - 2)>
97+
// CHECK-DAG: #[[$MAP4:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 3 - 3)>
98+
// CHECK-LABEL: func.func @conv2D(
99+
// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
100+
// CHECK-SAME: %[[FILTER:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
101+
// CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
102+
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
103+
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
104+
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
105+
// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index
106+
// CHECK-DAG: %[[N:.+]] = tensor.dim %[[INPUT]], %[[C0]]
107+
// CHECK-DAG: %[[C:.+]] = tensor.dim %[[INPUT]], %[[C3]]
108+
// CHECK-DAG: %[[P:.+]] = tensor.dim %[[FILTER]], %[[C0]]
109+
// CHECK-DAG: %[[Q:.+]] = tensor.dim %[[FILTER]], %[[C1]]
110+
// CHECK-DAG: %[[F:.+]] = tensor.dim %[[FILTER]], %[[C3]]
111+
// CHECK-DAG: %[[R:.+]] = tensor.dim %[[INIT]], %[[C1]]
112+
// CHECK-DAG: %[[S:.+]] = tensor.dim %[[INIT]], %[[C2]]
113+
// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]], %[[IV2:[a-zA-Z0-9]+]]) =
114+
// CHECK-SAME: (0, 0, 0) to (%[[P]], %[[Q]], %[[C]]) step (10, 20, 30) shared_outs(%[[INIT0:.+]] = %[[INIT]])
115+
// CHECK-DAG: %[[TS_P:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[P]]]
116+
// CHECK-DAG: %[[TS_Q:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[Q]]]
117+
// CHECK-DAG: %[[TS_C:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[C]]]
118+
// CHECK-DAG: %[[TS_H:.+]] = affine.apply #[[$MAP3]](%[[TS_P]])[%[[R]]]
119+
// CHECK-DAG: %[[TS_W:.+]] = affine.apply #[[$MAP4]](%[[TS_Q]])[%[[S]]]
120+
// CHECK-DAG: %[[INPUT_TILE:.+]] = tensor.extract_slice %[[INPUT]]
121+
// CHECK-SAME: [0, %[[IV0]], %[[IV1]], %[[IV2]]] [%[[N]], %[[TS_H]], %[[TS_W]], %[[TS_C]]]
122+
// CHECK-DAG: %[[FILTER_TILE:.+]] = tensor.extract_slice %[[FILTER]]
123+
// CHECK-SAME: [%[[IV0]], %[[IV1]], %[[IV2]], 0] [%[[TS_P]], %[[TS_Q]], %[[TS_C]], %[[F]]]
124+
// CHECK-DAG: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT0]]
125+
// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]]
126+
// CHECK: %[[CONV_TILE:.+]] = linalg.conv_2d_nhwc_hwcf
127+
// CHECK-SAME: dilation = dense<[4, 5]> : tensor<2xi64>, strides = dense<[2, 3]> : tensor<2xi64>
128+
// CHECK-SAME: ins(%[[INPUT_TILE]], %[[FILTER_TILE]] :
129+
// CHECK-SAME: outs(%[[INIT_TILE]] :
130+
// CHECK: scf.forall.in_parallel
131+
// CHECK: tensor.parallel_insert_slice %[[CONV_TILE]] into %[[INIT0]]
132+
// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] [1, 1, 1, 1]
133+
// CHECK: return %[[RESULT]]
134+
135+
// -----
136+
137+
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0, d1) -> (d0 + d1)>
138+
139+
func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
140+
// Check that we correctly amend "linalg.index" results.
141+
142+
%0 = linalg.generic {
143+
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
144+
affine_map<(d0, d1) -> (d0, d1)>],
145+
iterator_types = ["parallel", "parallel"]}
146+
{__internal_transform__ = "indexed_semantics"}
147+
ins(%arg0: tensor<?x?xf32>)
148+
outs(%arg1: tensor<?x?xf32>) {
149+
^bb0(%arg2: f32, %arg3: f32):
150+
%1 = linalg.index 0 : index
151+
%2 = linalg.index 1 : index
152+
%3 = arith.addi %1, %2 : index
153+
%4 = arith.index_cast %3 : index to i64
154+
%5 = arith.uitofp %4 : i64 to f32
155+
%6 = arith.addf %5, %arg2 : f32
156+
linalg.yield %6 : f32
157+
} -> (tensor<?x?xf32>)
158+
return %0 : tensor<?x?xf32>
159+
}
160+
// CHECK-LABEL: @indexed_semantics
161+
// CHECK: scf.forall (%[[I0:.+]], %[[I1:.+]]) =
162+
// CHECK: %[[INDEX0:.+]] = linalg.index 0
163+
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX0]], %[[I0]])
164+
// CHECK: %[[INDEX1:.+]] = linalg.index 1
165+
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX1]], %[[I1]])
166+
// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]]

mlir/test/lib/Interfaces/TilingInterface/TestTilingInterface.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,17 @@ void TestTilingInterfacePass::addTestPatterns(MLIRContext *context,
580580
return;
581581
}
582582
if (testTilingForAll) {
583+
// 1. Tiling M and N dims of `linalg.matmul` on tensors.
583584
addPatternForTilingUsingForall(context, patterns, "simple_gemm", {10, 20});
585+
// 2. Tiling 3D parallel generic op which implements a transpose.
586+
addPatternForTilingUsingForall(context, patterns,
587+
"parallel_generic_transpose", {10, 0, 20});
588+
// 3. Tiling 2D conv op.
589+
addPatternForTilingUsingForall(context, patterns, "simple_conv",
590+
{0, 0, 0, 0, 10, 20, 30});
591+
// 4. Tiling a simple op with `linalg.index` inside.
592+
addPatternForTilingUsingForall(context, patterns, "indexed_semantics",
593+
{10, 20});
584594
return;
585595
}
586596
if (testTileConsumerAndFuseProducer) {

0 commit comments

Comments
 (0)