@@ -284,3 +284,142 @@ for.cond: ; preds = %for.body, %entry
284
284
for.end: ; preds = %for.cond
285
285
ret void
286
286
}
287
+
288
+ ; Test that WidenPointerInductionRecipes are ordered after the other header phis
289
+ define void @outside_lattice (ptr noalias %p , ptr noalias %q , i32 %n ) {
290
+ ; DEFAULT-LABEL: @outside_lattice(
291
+ ; DEFAULT-NEXT: entry:
292
+ ; DEFAULT-NEXT: [[TMP0:%.*]] = zext i32 [[N:%.*]] to i64
293
+ ; DEFAULT-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 1)
294
+ ; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 4
295
+ ; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
296
+ ; DEFAULT: vector.scevcheck:
297
+ ; DEFAULT-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
298
+ ; DEFAULT-NEXT: [[TMP1:%.*]] = add i32 [[UMAX]], -1
299
+ ; DEFAULT-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
300
+ ; DEFAULT-NEXT: br i1 [[TMP2]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
301
+ ; DEFAULT: vector.ph:
302
+ ; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX1]], 4
303
+ ; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX1]], [[N_MOD_VF]]
304
+ ; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
305
+ ; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr null, i64 [[TMP3]]
306
+ ; DEFAULT-NEXT: [[IND_END2:%.*]] = trunc i64 [[N_VEC]] to i32
307
+ ; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
308
+ ; DEFAULT: vector.body:
309
+ ; DEFAULT-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
310
+ ; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
311
+ ; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
312
+ ; DEFAULT-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 4, i64 8, i64 12>
313
+ ; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
314
+ ; DEFAULT-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX]], 0
315
+ ; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[P:%.*]], i32 [[TMP4]]
316
+ ; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i32 0
317
+ ; DEFAULT-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 8
318
+ ; DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i32 [[TMP4]]
319
+ ; DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
320
+ ; DEFAULT-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP8]], align 4
321
+ ; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
322
+ ; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
323
+ ; DEFAULT-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 16
324
+ ; DEFAULT-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
325
+ ; DEFAULT-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
326
+ ; DEFAULT: middle.block:
327
+ ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX1]], [[N_VEC]]
328
+ ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
329
+ ; DEFAULT: scalar.ph:
330
+ ; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[ENTRY:%.*]] ], [ null, [[VECTOR_SCEVCHECK]] ]
331
+ ; DEFAULT-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i32 [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
332
+ ; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
333
+ ; DEFAULT: for.body:
334
+ ; DEFAULT-NEXT: [[IV_PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_PTR_NEXT:%.*]], [[FOR_BODY]] ]
335
+ ; DEFAULT-NEXT: [[IV_INT:%.*]] = phi i32 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[IV_INT_NEXT:%.*]], [[FOR_BODY]] ]
336
+ ; DEFAULT-NEXT: [[P_GEP:%.*]] = getelementptr inbounds ptr, ptr [[P]], i32 [[IV_INT]]
337
+ ; DEFAULT-NEXT: store ptr [[IV_PTR]], ptr [[P_GEP]], align 8
338
+ ; DEFAULT-NEXT: [[Q_GEP:%.*]] = getelementptr inbounds i32, ptr [[Q]], i32 [[IV_INT]]
339
+ ; DEFAULT-NEXT: store i32 [[IV_INT]], ptr [[Q_GEP]], align 4
340
+ ; DEFAULT-NEXT: [[IV_INT_NEXT]] = add i32 [[IV_INT]], 1
341
+ ; DEFAULT-NEXT: [[IV_PTR_NEXT]] = getelementptr inbounds i32, ptr [[IV_PTR]], i32 1
342
+ ; DEFAULT-NEXT: [[DONE:%.*]] = icmp ult i32 [[IV_INT_NEXT]], [[N]]
343
+ ; DEFAULT-NEXT: br i1 [[DONE]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP7:![0-9]+]]
344
+ ; DEFAULT: for.end:
345
+ ; DEFAULT-NEXT: ret void
346
+ ;
347
+ ; STRIDED-LABEL: @outside_lattice(
348
+ ; STRIDED-NEXT: entry:
349
+ ; STRIDED-NEXT: [[TMP0:%.*]] = zext i32 [[N:%.*]] to i64
350
+ ; STRIDED-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 1)
351
+ ; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 4
352
+ ; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
353
+ ; STRIDED: vector.scevcheck:
354
+ ; STRIDED-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[N]], i32 1)
355
+ ; STRIDED-NEXT: [[TMP1:%.*]] = add i32 [[UMAX]], -1
356
+ ; STRIDED-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
357
+ ; STRIDED-NEXT: br i1 [[TMP2]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
358
+ ; STRIDED: vector.ph:
359
+ ; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX1]], 4
360
+ ; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX1]], [[N_MOD_VF]]
361
+ ; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
362
+ ; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr null, i64 [[TMP3]]
363
+ ; STRIDED-NEXT: [[IND_END2:%.*]] = trunc i64 [[N_VEC]] to i32
364
+ ; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
365
+ ; STRIDED: vector.body:
366
+ ; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
367
+ ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
368
+ ; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
369
+ ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> <i64 0, i64 4, i64 8, i64 12>
370
+ ; STRIDED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
371
+ ; STRIDED-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX]], 0
372
+ ; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[P:%.*]], i32 [[TMP4]]
373
+ ; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i32 0
374
+ ; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 8
375
+ ; STRIDED-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[Q:%.*]], i32 [[TMP4]]
376
+ ; STRIDED-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
377
+ ; STRIDED-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP8]], align 4
378
+ ; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
379
+ ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
380
+ ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 16
381
+ ; STRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
382
+ ; STRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
383
+ ; STRIDED: middle.block:
384
+ ; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX1]], [[N_VEC]]
385
+ ; STRIDED-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
386
+ ; STRIDED: scalar.ph:
387
+ ; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ null, [[ENTRY:%.*]] ], [ null, [[VECTOR_SCEVCHECK]] ]
388
+ ; STRIDED-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i32 [ [[IND_END2]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
389
+ ; STRIDED-NEXT: br label [[FOR_BODY:%.*]]
390
+ ; STRIDED: for.body:
391
+ ; STRIDED-NEXT: [[IV_PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_PTR_NEXT:%.*]], [[FOR_BODY]] ]
392
+ ; STRIDED-NEXT: [[IV_INT:%.*]] = phi i32 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[IV_INT_NEXT:%.*]], [[FOR_BODY]] ]
393
+ ; STRIDED-NEXT: [[P_GEP:%.*]] = getelementptr inbounds ptr, ptr [[P]], i32 [[IV_INT]]
394
+ ; STRIDED-NEXT: store ptr [[IV_PTR]], ptr [[P_GEP]], align 8
395
+ ; STRIDED-NEXT: [[Q_GEP:%.*]] = getelementptr inbounds i32, ptr [[Q]], i32 [[IV_INT]]
396
+ ; STRIDED-NEXT: store i32 [[IV_INT]], ptr [[Q_GEP]], align 4
397
+ ; STRIDED-NEXT: [[IV_INT_NEXT]] = add i32 [[IV_INT]], 1
398
+ ; STRIDED-NEXT: [[IV_PTR_NEXT]] = getelementptr inbounds i32, ptr [[IV_PTR]], i32 1
399
+ ; STRIDED-NEXT: [[DONE:%.*]] = icmp ult i32 [[IV_INT_NEXT]], [[N]]
400
+ ; STRIDED-NEXT: br i1 [[DONE]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP9:![0-9]+]]
401
+ ; STRIDED: for.end:
402
+ ; STRIDED-NEXT: ret void
403
+ ;
404
+ entry:
405
+ br label %for.body
406
+
407
+ for.body: ; preds = %for.body, %entry
408
+ %iv.ptr = phi ptr [ null , %entry ], [ %iv.ptr.next , %for.body ]
409
+ %iv.int = phi i32 [ 0 , %entry ], [ %iv.int.next , %for.body ]
410
+
411
+ %p.gep = getelementptr inbounds ptr , ptr %p , i32 %iv.int
412
+ store ptr %iv.ptr , ptr %p.gep
413
+
414
+ %q.gep = getelementptr inbounds i32 , ptr %q , i32 %iv.int
415
+ store i32 %iv.int , ptr %q.gep
416
+
417
+ %iv.int.next = add i32 %iv.int , 1
418
+ %iv.ptr.next = getelementptr inbounds i32 , ptr %iv.ptr , i32 1
419
+
420
+ %done = icmp ult i32 %iv.int.next , %n
421
+ br i1 %done , label %for.body , label %for.end
422
+
423
+ for.end: ; preds = %for.body
424
+ ret void
425
+ }
0 commit comments