|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -scalable-vectorization=on -force-target-supports-scalable-vectors -passes=loop-vectorize < %s -S | FileCheck %s |
| 3 | +define ptr @foo(ptr %y, float %alpha, i32 %N) { |
| 4 | +; CHECK-LABEL: define ptr @foo( |
| 5 | +; CHECK-SAME: ptr [[Y:%.*]], float [[ALPHA:%.*]], i32 [[N:%.*]]) { |
| 6 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 7 | +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[N]], 0 |
| 8 | +; CHECK-NEXT: br i1 [[CMP3]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]] |
| 9 | +; CHECK: [[FOR_BODY_PREHEADER]]: |
| 10 | +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64 |
| 11 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 12 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP0]] |
| 13 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 14 | +; CHECK: [[VECTOR_PH]]: |
| 15 | +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| 16 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP1]] |
| 17 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] |
| 18 | +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[Y]], i64 [[N_VEC]] |
| 19 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 20 | +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x float> poison, float [[ALPHA]], i64 0 |
| 21 | +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x float> [[BROADCAST_SPLATINSERT]], <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer |
| 22 | +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| 23 | +; CHECK: [[VECTOR_BODY]]: |
| 24 | +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[Y]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ] |
| 25 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 26 | +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() |
| 27 | +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 1 |
| 28 | +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 1, [[TMP4]] |
| 29 | +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP3]], 0 |
| 30 | +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP6]], i64 0 |
| 31 | +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[DOTSPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer |
| 32 | +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64() |
| 33 | +; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 1 x i64> [[DOTSPLAT]], [[TMP7]] |
| 34 | +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul <vscale x 1 x i64> [[TMP8]], shufflevector (<vscale x 1 x i64> insertelement (<vscale x 1 x i64> poison, i64 1, i64 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer) |
| 35 | +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 1 x i64> [[VECTOR_GEP]] |
| 36 | +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 |
| 37 | +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[TMP10]] |
| 38 | +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 0 |
| 39 | +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x float>, ptr [[TMP12]], align 4 |
| 40 | +; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <vscale x 1 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] |
| 41 | +; CHECK-NEXT: store <vscale x 1 x float> [[TMP13]], ptr [[TMP12]], align 4 |
| 42 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] |
| 43 | +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP5]] |
| 44 | +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 45 | +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 46 | +; CHECK: [[MIDDLE_BLOCK]]: |
| 47 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] |
| 48 | +; CHECK-NEXT: [[CMO:%.*]] = sub i64 [[N_VEC]], 1 |
| 49 | +; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[Y]], i64 [[CMO]] |
| 50 | +; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] |
| 51 | +; CHECK: [[SCALAR_PH]]: |
| 52 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[Y]], %[[FOR_BODY_PREHEADER]] ] |
| 53 | +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] |
| 54 | +; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| 55 | +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT]]: |
| 56 | +; CHECK-NEXT: [[END_0_LCSSA:%.*]] = phi ptr [ [[END_0:%.*]], %[[FOR_BODY]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] |
| 57 | +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] |
| 58 | +; CHECK: [[FOR_COND_CLEANUP]]: |
| 59 | +; CHECK-NEXT: [[RESULT:%.*]] = phi ptr [ [[Y]], %[[ENTRY]] ], [ [[END_0_LCSSA]], %[[FOR_COND_CLEANUP_LOOPEXIT]] ] |
| 60 | +; CHECK-NEXT: ret ptr [[RESULT]] |
| 61 | +; CHECK: [[FOR_BODY]]: |
| 62 | +; CHECK-NEXT: [[END_0]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], %[[FOR_BODY]] ] |
| 63 | +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] |
| 64 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV]] |
| 65 | +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 |
| 66 | +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP15]], [[ALPHA]] |
| 67 | +; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX]], align 4 |
| 68 | +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 |
| 69 | +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] |
| 70 | +; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[END_0]], i64 1 |
| 71 | +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| 72 | +; |
| 73 | +entry: |
| 74 | + %cmp3 = icmp sgt i32 %N, 0 |
| 75 | + br i1 %cmp3, label %for.body.preheader, label %for.cond.cleanup |
| 76 | + |
| 77 | +for.body.preheader: |
| 78 | + %wide.trip.count = zext nneg i32 %N to i64 |
| 79 | + br label %for.body |
| 80 | + |
| 81 | +for.cond.cleanup: |
| 82 | + %result = phi ptr [ %y, %entry ], [ %end.0, %for.body ] |
| 83 | + ret ptr %result |
| 84 | + |
| 85 | +for.body: |
| 86 | + %end.0 = phi ptr [ %y, %for.body.preheader ], [ %incdec.ptr, %for.body ] |
| 87 | + %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] |
| 88 | + %arrayidx = getelementptr inbounds float, ptr %y, i64 %indvars.iv |
| 89 | + %0 = load float, ptr %arrayidx, align 4 |
| 90 | + %add = fadd fast float %0, %alpha |
| 91 | + store float %add, ptr %arrayidx, align 4 |
| 92 | + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| 93 | + %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count |
| 94 | + %incdec.ptr = getelementptr inbounds i8, ptr %end.0, i64 1 |
| 95 | + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body |
| 96 | +} |
| 97 | +;. |
| 98 | +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 99 | +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 100 | +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 101 | +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} |
| 102 | +;. |
0 commit comments