|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
|
2 |
| -; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-target-instruction-cost=1 -S < %s | FileCheck %s |
| 2 | +; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-target-instruction-cost=1 -S < %s | FileCheck %s --check-prefix=CHECK-NODOTPROD |
| 3 | +; RUN: opt -mattr=+dotprod -passes=loop-vectorize -force-vector-interleave=1 -force-target-instruction-cost=1 -S < %s | FileCheck %s --check-prefix=CHECK-DOTPROD |
3 | 4 |
|
4 | 5 | target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
|
5 | 6 | target triple = "aarch64-none-unknown-elf"
|
6 | 7 |
|
7 | 8 | define i32 @dotp(ptr %a, ptr %b) #0 {
|
8 |
| -; CHECK-LABEL: define i32 @dotp( |
9 |
| -; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
10 |
| -; CHECK-NEXT: iter.check: |
11 |
| -; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() |
12 |
| -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4 |
13 |
| -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP11]] |
14 |
| -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[ENTRY:%.*]] |
15 |
| -; CHECK: vector.main.loop.iter.check: |
16 |
| -; CHECK-NEXT: br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
17 |
| -; CHECK: vector.ph: |
18 |
| -; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
19 |
| -; CHECK: vector.body: |
20 |
| -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
21 |
| -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
22 |
| -; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 |
23 |
| -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP0]] |
24 |
| -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 0 |
25 |
| -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 |
26 |
| -; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
27 |
| -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP0]] |
28 |
| -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 |
29 |
| -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 |
30 |
| -; CHECK-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> |
31 |
| -; CHECK-NEXT: [[TMP7:%.*]] = mul <16 x i32> [[TMP6]], [[TMP3]] |
32 |
| -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]]) |
33 |
| -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
34 |
| -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0 |
35 |
| -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
36 |
| -; CHECK: middle.block: |
37 |
| -; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) |
38 |
| -; CHECK-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]] |
39 |
| -; CHECK: vec.epilog.iter.check: |
40 |
| -; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() |
41 |
| -; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4 |
42 |
| -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP13]] |
43 |
| -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[SCALAR_PH]] |
44 |
| -; CHECK: vec.epilog.ph: |
45 |
| -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] |
46 |
| -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] |
47 |
| -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() |
48 |
| -; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 4 |
49 |
| -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 0, [[TMP15]] |
50 |
| -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 0, [[N_MOD_VF]] |
51 |
| -; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
52 |
| -; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 4 |
53 |
| -; CHECK-NEXT: [[TMP18:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 |
54 |
| -; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
55 |
| -; CHECK: vec.epilog.vector.body: |
56 |
| -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT6:%.*]], [[FOR_BODY]] ] |
57 |
| -; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ [[TMP18]], [[SCALAR_PH]] ], [ [[TMP27:%.*]], [[FOR_BODY]] ] |
58 |
| -; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0 |
59 |
| -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP19]] |
60 |
| -; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i32 0 |
61 |
| -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP21]], align 1 |
62 |
| -; CHECK-NEXT: [[TMP22:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> |
63 |
| -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP19]] |
64 |
| -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP23]], i32 0 |
65 |
| -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP24]], align 1 |
66 |
| -; CHECK-NEXT: [[TMP25:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> |
67 |
| -; CHECK-NEXT: [[TMP26:%.*]] = mul <vscale x 4 x i32> [[TMP25]], [[TMP22]] |
68 |
| -; CHECK-NEXT: [[TMP27]] = add <vscale x 4 x i32> [[TMP26]], [[VEC_PHI3]] |
69 |
| -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX2]], [[TMP17]] |
70 |
| -; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] |
71 |
| -; CHECK-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
72 |
| -; CHECK: vec.epilog.middle.block: |
73 |
| -; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP27]]) |
74 |
| -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 0, [[N_VEC]] |
75 |
| -; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT]], label [[VEC_EPILOG_SCALAR_PH]] |
76 |
| -; CHECK: vec.epilog.scalar.ph: |
77 |
| -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 0, [[MIDDLE_BLOCK]] ], [ 0, [[ITER_CHECK:%.*]] ] |
78 |
| -; CHECK-NEXT: [[BC_MERGE_RDX7:%.*]] = phi i32 [ [[TMP29]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[ITER_CHECK]] ] |
79 |
| -; CHECK-NEXT: br label [[FOR_BODY1:%.*]] |
80 |
| -; CHECK: for.body: |
81 |
| -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY1]] ] |
82 |
| -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX7]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ] |
83 |
| -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] |
84 |
| -; CHECK-NEXT: [[LOAD_A:%.*]] = load i8, ptr [[GEP_A]], align 1 |
85 |
| -; CHECK-NEXT: [[EXT_A:%.*]] = zext i8 [[LOAD_A]] to i32 |
86 |
| -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]] |
87 |
| -; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[GEP_B]], align 1 |
88 |
| -; CHECK-NEXT: [[EXT_B:%.*]] = zext i8 [[LOAD_B]] to i32 |
89 |
| -; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[EXT_B]], [[EXT_A]] |
90 |
| -; CHECK-NEXT: [[ADD]] = add i32 [[MUL]], [[ACCUM]] |
91 |
| -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 |
92 |
| -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 0 |
93 |
| -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] |
94 |
| -; CHECK: for.exit: |
95 |
| -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY1]] ], [ [[TMP9]], [[MIDDLE_BLOCK1]] ], [ [[TMP29]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] |
96 |
| -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] |
| 9 | +; CHECK-NODOTPROD-LABEL: define i32 @dotp( |
| 10 | +; CHECK-NODOTPROD-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| 11 | +; CHECK-NODOTPROD-NEXT: iter.check: |
| 12 | +; CHECK-NODOTPROD-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 13 | +; CHECK-NODOTPROD-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 |
| 14 | +; CHECK-NODOTPROD-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP1]] |
| 15 | +; CHECK-NODOTPROD-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] |
| 16 | +; CHECK-NODOTPROD: vector.main.loop.iter.check: |
| 17 | +; CHECK-NODOTPROD-NEXT: br i1 true, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 18 | +; CHECK-NODOTPROD: vector.ph: |
| 19 | +; CHECK-NODOTPROD-NEXT: br label [[VECTOR_BODY:%.*]] |
| 20 | +; CHECK-NODOTPROD: vector.body: |
| 21 | +; CHECK-NODOTPROD-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 22 | +; CHECK-NODOTPROD-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ] |
| 23 | +; CHECK-NODOTPROD-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 |
| 24 | +; CHECK-NODOTPROD-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]] |
| 25 | +; CHECK-NODOTPROD-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP3]], i32 0 |
| 26 | +; CHECK-NODOTPROD-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 |
| 27 | +; CHECK-NODOTPROD-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| 28 | +; CHECK-NODOTPROD-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]] |
| 29 | +; CHECK-NODOTPROD-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i32 0 |
| 30 | +; CHECK-NODOTPROD-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 |
| 31 | +; CHECK-NODOTPROD-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> |
| 32 | +; CHECK-NODOTPROD-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]] |
| 33 | +; CHECK-NODOTPROD-NEXT: [[TMP10]] = add <16 x i32> [[TMP9]], [[VEC_PHI]] |
| 34 | +; CHECK-NODOTPROD-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| 35 | +; CHECK-NODOTPROD-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0 |
| 36 | +; CHECK-NODOTPROD-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 37 | +; CHECK-NODOTPROD: middle.block: |
| 38 | +; CHECK-NODOTPROD-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP10]]) |
| 39 | +; CHECK-NODOTPROD-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] |
| 40 | +; CHECK-NODOTPROD: vec.epilog.iter.check: |
| 41 | +; CHECK-NODOTPROD-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() |
| 42 | +; CHECK-NODOTPROD-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4 |
| 43 | +; CHECK-NODOTPROD-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP14]] |
| 44 | +; CHECK-NODOTPROD-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] |
| 45 | +; CHECK-NODOTPROD: vec.epilog.ph: |
| 46 | +; CHECK-NODOTPROD-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 47 | +; CHECK-NODOTPROD-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP12]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 48 | +; CHECK-NODOTPROD-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() |
| 49 | +; CHECK-NODOTPROD-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 4 |
| 50 | +; CHECK-NODOTPROD-NEXT: [[N_MOD_VF:%.*]] = urem i64 0, [[TMP16]] |
| 51 | +; CHECK-NODOTPROD-NEXT: [[N_VEC:%.*]] = sub i64 0, [[N_MOD_VF]] |
| 52 | +; CHECK-NODOTPROD-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() |
| 53 | +; CHECK-NODOTPROD-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 4 |
| 54 | +; CHECK-NODOTPROD-NEXT: [[TMP19:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 |
| 55 | +; CHECK-NODOTPROD-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] |
| 56 | +; CHECK-NODOTPROD: vec.epilog.vector.body: |
| 57 | +; CHECK-NODOTPROD-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] |
| 58 | +; CHECK-NODOTPROD-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ [[TMP19]], [[VEC_EPILOG_PH]] ], [ [[TMP28:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] |
| 59 | +; CHECK-NODOTPROD-NEXT: [[TMP20:%.*]] = add i64 [[INDEX2]], 0 |
| 60 | +; CHECK-NODOTPROD-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP20]] |
| 61 | +; CHECK-NODOTPROD-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[TMP21]], i32 0 |
| 62 | +; CHECK-NODOTPROD-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP22]], align 1 |
| 63 | +; CHECK-NODOTPROD-NEXT: [[TMP23:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> |
| 64 | +; CHECK-NODOTPROD-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP20]] |
| 65 | +; CHECK-NODOTPROD-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP24]], i32 0 |
| 66 | +; CHECK-NODOTPROD-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP25]], align 1 |
| 67 | +; CHECK-NODOTPROD-NEXT: [[TMP26:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> |
| 68 | +; CHECK-NODOTPROD-NEXT: [[TMP27:%.*]] = mul <vscale x 4 x i32> [[TMP26]], [[TMP23]] |
| 69 | +; CHECK-NODOTPROD-NEXT: [[TMP28]] = add <vscale x 4 x i32> [[TMP27]], [[VEC_PHI3]] |
| 70 | +; CHECK-NODOTPROD-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX2]], [[TMP18]] |
| 71 | +; CHECK-NODOTPROD-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] |
| 72 | +; CHECK-NODOTPROD-NEXT: br i1 [[TMP29]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| 73 | +; |
| 74 | +; CHECK-DOTPROD-LABEL: define i32 @dotp( |
| 75 | +; CHECK-DOTPROD-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { |
| 76 | +; CHECK-DOTPROD-NEXT: iter.check: |
| 77 | +; CHECK-DOTPROD-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| 78 | +; CHECK-DOTPROD-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 |
| 79 | +; CHECK-DOTPROD-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP1]] |
| 80 | +; CHECK-DOTPROD-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] |
| 81 | +; CHECK-DOTPROD: vector.main.loop.iter.check: |
| 82 | +; CHECK-DOTPROD-NEXT: br i1 true, label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 83 | +; CHECK-DOTPROD: vector.ph: |
| 84 | +; CHECK-DOTPROD-NEXT: br label [[VECTOR_BODY:%.*]] |
| 85 | +; CHECK-DOTPROD: vector.body: |
| 86 | +; CHECK-DOTPROD-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 87 | +; CHECK-DOTPROD-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] |
| 88 | +; CHECK-DOTPROD-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 |
| 89 | +; CHECK-DOTPROD-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]] |
| 90 | +; CHECK-DOTPROD-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP3]], i32 0 |
| 91 | +; CHECK-DOTPROD-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 |
| 92 | +; CHECK-DOTPROD-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| 93 | +; CHECK-DOTPROD-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP2]] |
| 94 | +; CHECK-DOTPROD-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP6]], i32 0 |
| 95 | +; CHECK-DOTPROD-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 |
| 96 | +; CHECK-DOTPROD-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> |
| 97 | +; CHECK-DOTPROD-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]] |
| 98 | +; CHECK-DOTPROD-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.experimental.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]]) |
| 99 | +; CHECK-DOTPROD-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| 100 | +; CHECK-DOTPROD-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0 |
| 101 | +; CHECK-DOTPROD-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 102 | +; CHECK-DOTPROD: middle.block: |
| 103 | +; CHECK-DOTPROD-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) |
| 104 | +; CHECK-DOTPROD-NEXT: br i1 true, label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] |
| 105 | +; CHECK-DOTPROD: vec.epilog.iter.check: |
| 106 | +; CHECK-DOTPROD-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() |
| 107 | +; CHECK-DOTPROD-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4 |
| 108 | +; CHECK-DOTPROD-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 0, [[TMP13]] |
| 109 | +; CHECK-DOTPROD-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] |
| 110 | +; CHECK-DOTPROD: vec.epilog.ph: |
| 111 | +; CHECK-DOTPROD-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 0, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 112 | +; CHECK-DOTPROD-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP11]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] |
| 113 | +; CHECK-DOTPROD-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() |
| 114 | +; CHECK-DOTPROD-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 4 |
| 115 | +; CHECK-DOTPROD-NEXT: [[N_MOD_VF:%.*]] = urem i64 0, [[TMP15]] |
| 116 | +; CHECK-DOTPROD-NEXT: [[N_VEC:%.*]] = sub i64 0, [[N_MOD_VF]] |
| 117 | +; CHECK-DOTPROD-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() |
| 118 | +; CHECK-DOTPROD-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 4 |
| 119 | +; CHECK-DOTPROD-NEXT: [[TMP18:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 |
| 120 | +; CHECK-DOTPROD-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] |
| 121 | +; CHECK-DOTPROD: vec.epilog.vector.body: |
| 122 | +; CHECK-DOTPROD-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] |
| 123 | +; CHECK-DOTPROD-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ [[TMP18]], [[VEC_EPILOG_PH]] ], [ [[TMP27:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] |
| 124 | +; CHECK-DOTPROD-NEXT: [[TMP19:%.*]] = add i64 [[INDEX2]], 0 |
| 125 | +; CHECK-DOTPROD-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP19]] |
| 126 | +; CHECK-DOTPROD-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP20]], i32 0 |
| 127 | +; CHECK-DOTPROD-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP21]], align 1 |
| 128 | +; CHECK-DOTPROD-NEXT: [[TMP22:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> |
| 129 | +; CHECK-DOTPROD-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP19]] |
| 130 | +; CHECK-DOTPROD-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP23]], i32 0 |
| 131 | +; CHECK-DOTPROD-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP24]], align 1 |
| 132 | +; CHECK-DOTPROD-NEXT: [[TMP25:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> |
| 133 | +; CHECK-DOTPROD-NEXT: [[TMP26:%.*]] = mul <vscale x 4 x i32> [[TMP25]], [[TMP22]] |
| 134 | +; CHECK-DOTPROD-NEXT: [[TMP27]] = add <vscale x 4 x i32> [[TMP26]], [[VEC_PHI3]] |
| 135 | +; CHECK-DOTPROD-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX2]], [[TMP17]] |
| 136 | +; CHECK-DOTPROD-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] |
| 137 | +; CHECK-DOTPROD-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
97 | 138 | ;
|
98 | 139 | entry:
|
99 | 140 | br label %for.body
|
|
0 commit comments