|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s |
| 3 | + |
| 4 | +; Make sure we don't crash when transforming a VPWidenCastRecipe created without |
| 5 | +; an underlying value to an EVL recipe. This occurs in this test via |
| 6 | +; VPlanTransforms::truncateToMinimalBitwidths |
| 7 | + |
| 8 | +define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { |
| 9 | +; CHECK-LABEL: define void @truncate_to_minimal_bitwidths_widen_cast_recipe( |
| 10 | +; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { |
| 11 | +; CHECK-NEXT: [[ENTRY:.*]]: |
| 12 | +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 13 | +; CHECK: [[VECTOR_PH]]: |
| 14 | +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 15 | +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], 1 |
| 16 | +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 2, [[TMP1]] |
| 17 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP0]] |
| 18 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] |
| 19 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| 20 | +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| 21 | +; CHECK: [[VECTOR_BODY]]: |
| 22 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 23 | +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 24 | +; CHECK-NEXT: [[AVL:%.*]] = sub i64 2, [[EVL_BASED_IV]] |
| 25 | +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true) |
| 26 | +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[EVL_BASED_IV]], 0 |
| 27 | +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP4]] |
| 28 | +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 0 |
| 29 | +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0(ptr align 1 [[TMP6]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 30 | +; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 1 x i16> @llvm.vp.zext.nxv1i16.nxv1i8(<vscale x 1 x i8> [[VP_OP_LOAD]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 31 | +; CHECK-NEXT: [[VP_OP:%.*]] = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> zeroinitializer, <vscale x 1 x i16> [[TMP7]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 32 | +; CHECK-NEXT: [[VP_OP1:%.*]] = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> [[VP_OP]], <vscale x 1 x i16> trunc (<vscale x 1 x i32> splat (i32 1) to <vscale x 1 x i16>), <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 33 | +; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 1 x i8> @llvm.vp.trunc.nxv1i8.nxv1i16(<vscale x 1 x i16> [[VP_OP1]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 34 | +; CHECK-NEXT: call void @llvm.vp.scatter.nxv1i8.nxv1p0(<vscale x 1 x i8> [[TMP8]], <vscale x 1 x ptr> align 1 zeroinitializer, <vscale x 1 x i1> splat (i1 true), i32 [[TMP3]]) |
| 35 | +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP3]] to i64 |
| 36 | +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]] |
| 37 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] |
| 38 | +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| 39 | +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 40 | +; CHECK: [[MIDDLE_BLOCK]]: |
| 41 | +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| 42 | +; CHECK: [[SCALAR_PH]]: |
| 43 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| 44 | +; CHECK-NEXT: br label %[[LOOP:.*]] |
| 45 | +; CHECK: [[LOOP]]: |
| 46 | +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] |
| 47 | +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] |
| 48 | +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC]], align 1 |
| 49 | +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32 |
| 50 | +; CHECK-NEXT: [[MUL16:%.*]] = mul i32 0, [[CONV]] |
| 51 | +; CHECK-NEXT: [[SHR35:%.*]] = lshr i32 [[MUL16]], 1 |
| 52 | +; CHECK-NEXT: [[CONV36:%.*]] = trunc i32 [[SHR35]] to i8 |
| 53 | +; CHECK-NEXT: store i8 [[CONV36]], ptr null, align 1 |
| 54 | +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 |
| 55 | +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 |
| 56 | +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] |
| 57 | +; CHECK: [[EXIT]]: |
| 58 | +; CHECK-NEXT: ret void |
| 59 | +; |
| 60 | +entry: |
| 61 | + br label %loop |
| 62 | + |
| 63 | +loop: ; preds = %loop, %entry |
| 64 | + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| 65 | + %gep.src = getelementptr i8, ptr %src, i64 %iv |
| 66 | + %0 = load i8, ptr %gep.src, align 1 |
| 67 | + %conv = zext i8 %0 to i32 |
| 68 | + %mul16 = mul i32 0, %conv |
| 69 | + %shr35 = lshr i32 %mul16, 1 |
| 70 | + %conv36 = trunc i32 %shr35 to i8 |
| 71 | + store i8 %conv36, ptr null, align 1 |
| 72 | + %iv.next = add i64 %iv, 1 |
| 73 | + %ec = icmp eq i64 %iv, 1 |
| 74 | + br i1 %ec, label %exit, label %loop |
| 75 | + |
| 76 | +exit: ; preds = %loop |
| 77 | + ret void |
| 78 | +} |
| 79 | +;. |
| 80 | +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} |
| 81 | +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 82 | +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 83 | +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} |
| 84 | +;. |
0 commit comments