@@ -20,18 +20,17 @@ func.func @test_inline(%ptr : !llvm.ptr) -> i32 {
20
20
21
21
// -----
22
22
23
- func.func @inner_func_not_inlinable () -> !llvm.ptr <f64 > {
24
- %0 = llvm.mlir.constant (0 : i32 ) : i32
25
- %1 = llvm.alloca %0 x f64 : (i32 ) -> !llvm.ptr <f64 >
26
- return %1 : !llvm.ptr <f64 >
23
+ func.func @inner_func_not_inlinable () -> i32 {
24
+ %0 = llvm.inline_asm has_side_effects " foo" , " bar" : () -> i32
25
+ return %0 : i32
27
26
}
28
27
29
- // CHECK-LABEL: func.func @test_not_inline() -> !llvm.ptr<f64> {
30
- // CHECK-NEXT: %[[RES:.*]] = call @inner_func_not_inlinable() : () -> !llvm.ptr<f64>
31
- // CHECK-NEXT: return %[[RES]] : !llvm.ptr<f64>
32
- func.func @test_not_inline () -> !llvm.ptr < f64 > {
33
- %0 = call @inner_func_not_inlinable () : () -> !llvm.ptr < f64 >
34
- return %0 : !llvm.ptr < f64 >
28
+ // CHECK-LABEL: func.func @test_not_inline() -> i32 {
29
+ // CHECK-NEXT: %[[RES:.*]] = call @inner_func_not_inlinable() : () -> i32
30
+ // CHECK-NEXT: return %[[RES]] : i32
31
+ func.func @test_not_inline () -> i32 {
32
+ %0 = call @inner_func_not_inlinable () : () -> i32
33
+ return %0 : i32
35
34
}
36
35
37
36
// -----
@@ -203,3 +202,76 @@ llvm.func @caller() {
203
202
llvm.call @callee () { branch_weights = dense <42 > : vector <1 xi32 > } : () -> ()
204
203
llvm.return
205
204
}
205
+
206
+ // -----
207
+
208
+ llvm.func @static_alloca () -> f32 {
209
+ %0 = llvm.mlir.constant (4 : i32 ) : i32
210
+ %1 = llvm.alloca %0 x f32 : (i32 ) -> !llvm.ptr
211
+ %2 = llvm.load %1 : !llvm.ptr -> f32
212
+ llvm.return %2 : f32
213
+ }
214
+
215
+ llvm.func @dynamic_alloca (%size : i32 ) -> f32 {
216
+ %0 = llvm.add %size , %size : i32
217
+ %1 = llvm.alloca %0 x f32 : (i32 ) -> !llvm.ptr
218
+ %2 = llvm.load %1 : !llvm.ptr -> f32
219
+ llvm.return %2 : f32
220
+ }
221
+
222
+ // CHECK-LABEL: llvm.func @test_inline
223
+ llvm.func @test_inline (%cond : i1 , %size : i32 ) -> f32 {
224
+ // Check that the static alloca was moved to the entry block after inlining
225
+ // with its size defined by a constant.
226
+ // CHECK-NOT: ^{{.+}}:
227
+ // CHECK-NEXT: llvm.mlir.constant
228
+ // CHECK-NEXT: llvm.alloca
229
+ // CHECK: llvm.cond_br
230
+ llvm.cond_br %cond , ^bb1 , ^bb2
231
+ // CHECK: ^{{.+}}:
232
+ ^bb1 :
233
+ // CHECK-NOT: llvm.call @static_alloca
234
+ %0 = llvm.call @static_alloca () : () -> f32
235
+ // CHECK: llvm.br
236
+ llvm.br ^bb3 (%0: f32 )
237
+ // CHECK: ^{{.+}}:
238
+ ^bb2 :
239
+ // Check that the dynamic alloca was inlined, but that it was not moved to the
240
+ // entry block.
241
+ // CHECK: llvm.add
242
+ // CHECK-NEXT: llvm.alloca
243
+ // CHECK-NOT: llvm.call @dynamic_alloca
244
+ %1 = llvm.call @dynamic_alloca (%size ) : (i32 ) -> f32
245
+ // CHECK: llvm.br
246
+ llvm.br ^bb3 (%1: f32 )
247
+ // CHECK: ^{{.+}}:
248
+ ^bb3 (%arg : f32 ):
249
+ llvm.return %arg : f32
250
+ }
251
+
252
+ // -----
253
+
254
+ llvm.func @static_alloca_not_in_entry (%cond : i1 ) -> f32 {
255
+ llvm.cond_br %cond , ^bb1 , ^bb2
256
+ ^bb1 :
257
+ %0 = llvm.mlir.constant (4 : i32 ) : i32
258
+ %1 = llvm.alloca %0 x f32 : (i32 ) -> !llvm.ptr
259
+ llvm.br ^bb3 (%1: !llvm.ptr )
260
+ ^bb2 :
261
+ %2 = llvm.mlir.constant (8 : i32 ) : i32
262
+ %3 = llvm.alloca %2 x f32 : (i32 ) -> !llvm.ptr
263
+ llvm.br ^bb3 (%3: !llvm.ptr )
264
+ ^bb3 (%ptr : !llvm.ptr ):
265
+ %4 = llvm.load %ptr : !llvm.ptr -> f32
266
+ llvm.return %4 : f32
267
+ }
268
+
269
+ // CHECK-LABEL: llvm.func @test_inline
270
+ llvm.func @test_inline (%cond : i1 ) -> f32 {
271
+ // Make sure the alloca was not moved to the entry block.
272
+ // CHECK-NOT: llvm.alloca
273
+ // CHECK: llvm.cond_br
274
+ // CHECK: llvm.alloca
275
+ %0 = llvm.call @static_alloca_not_in_entry (%cond ) : (i1 ) -> f32
276
+ llvm.return %0 : f32
277
+ }
0 commit comments