|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -passes=loop-vectorize \ |
| 3 | +; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \ |
| 4 | +; RUN: -force-tail-folding-style=data-with-evl \ |
| 5 | +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ |
| 6 | +; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL |
| 7 | + |
| 8 | +; RUN: opt -passes=loop-vectorize \ |
| 9 | +; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \ |
| 10 | +; RUN: -force-tail-folding-style=none \ |
| 11 | +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ |
| 12 | +; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP |
| 13 | + |
| 14 | +define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { |
| 15 | +; IF-EVL-LABEL: @fadd( |
| 16 | +; IF-EVL-NEXT: entry: |
| 17 | +; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]] |
| 18 | +; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() |
| 19 | +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 |
| 20 | +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] |
| 21 | +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 22 | +; IF-EVL: vector.ph: |
| 23 | +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() |
| 24 | +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 |
| 25 | +; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() |
| 26 | +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 |
| 27 | +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 |
| 28 | +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] |
| 29 | +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] |
| 30 | +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| 31 | +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 |
| 32 | +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() |
| 33 | +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 |
| 34 | +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 |
| 35 | +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer |
| 36 | +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] |
| 37 | +; IF-EVL: vector.body: |
| 38 | +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 39 | +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 40 | +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] |
| 41 | +; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] |
| 42 | +; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) |
| 43 | +; IF-EVL-NEXT: [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0 |
| 44 | +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EVL_BASED_IV]], i64 0 |
| 45 | +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer |
| 46 | +; IF-EVL-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64() |
| 47 | +; IF-EVL-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]] |
| 48 | +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP15]] |
| 49 | +; IF-EVL-NEXT: [[TMP16:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] |
| 50 | +; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP13]] |
| 51 | +; IF-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 0 |
| 52 | +; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP18]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), i32 [[TMP12]]) |
| 53 | +; IF-EVL-NEXT: [[TMP19]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> [[TMP16]], i32 [[TMP12]]) |
| 54 | +; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64 |
| 55 | +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] |
| 56 | +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] |
| 57 | +; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 58 | +; IF-EVL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 59 | +; IF-EVL: middle.block: |
| 60 | +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] |
| 61 | +; IF-EVL: scalar.ph: |
| 62 | +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| 63 | +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] |
| 64 | +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] |
| 65 | +; IF-EVL: for.body: |
| 66 | +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| 67 | +; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| 68 | +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] |
| 69 | +; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX]], align 4 |
| 70 | +; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP22]], [[SUM_07]] |
| 71 | +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| 72 | +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] |
| 73 | +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| 74 | +; IF-EVL: for.end: |
| 75 | +; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] |
| 76 | +; IF-EVL-NEXT: ret float [[ADD_LCSSA]] |
| 77 | +; |
| 78 | +; NO-VP-LABEL: @fadd( |
| 79 | +; NO-VP-NEXT: entry: |
| 80 | +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] |
| 81 | +; NO-VP: for.body: |
| 82 | +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] |
| 83 | +; NO-VP-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] |
| 84 | +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] |
| 85 | +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 |
| 86 | +; NO-VP-NEXT: [[ADD]] = fadd float [[TMP0]], [[SUM_07]] |
| 87 | +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 |
| 88 | +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] |
| 89 | +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 90 | +; NO-VP: for.end: |
| 91 | +; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ] |
| 92 | +; NO-VP-NEXT: ret float [[ADD_LCSSA]] |
| 93 | +; |
| 94 | +entry: |
| 95 | + br label %for.body |
| 96 | + |
| 97 | +for.body: |
| 98 | + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| 99 | + %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ] |
| 100 | + %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv |
| 101 | + %0 = load float, ptr %arrayidx, align 4 |
| 102 | + %add = fadd float %0, %sum.07 |
| 103 | + %iv.next = add nuw nsw i64 %iv, 1 |
| 104 | + %exitcond.not = icmp eq i64 %iv.next, %n |
| 105 | + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 |
| 106 | + |
| 107 | +for.end: |
| 108 | + ret float %add |
| 109 | +} |
| 110 | + |
| 111 | +!0 = distinct !{!0, !1} |
| 112 | +!1 = !{!"llvm.loop.vectorize.enable", i1 true} |
0 commit comments