@@ -17,9 +17,42 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
17
17
; CHECK: [[FOR_BODY_LR_PH]]:
18
18
; CHECK-NEXT: [[MUL15:%.*]] = mul nsw i32 [[ADD]], [[K]]
19
19
; CHECK-NEXT: [[MUL17:%.*]] = mul nsw i32 [[ADD12]], [[K]]
20
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[K]], 2
21
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
22
+ ; CHECK: [[VECTOR_PH]]:
23
+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[K]], 2
24
+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[K]], [[N_MOD_VF]]
25
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
26
+ ; CHECK: [[VECTOR_BODY]]:
27
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
28
+ ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
29
+ ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[INDEX]], [[MUL15]]
30
+ ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[INDEX]], [[MUL17]]
31
+ ; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP0]] to i64
32
+ ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_A_GRID_COERCE]], i64 [[TMP2]]
33
+ ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP3]], i32 0
34
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP4]], align 1
35
+ ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP1]] to i64
36
+ ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_B_GRID_COERCE]], i64 [[TMP5]]
37
+ ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i32 0
38
+ ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP7]], align 1
39
+ ; CHECK-NEXT: [[TMP8:%.*]] = zext <2 x i8> [[WIDE_LOAD]] to <2 x i32>
40
+ ; CHECK-NEXT: [[TMP9:%.*]] = zext <2 x i8> [[WIDE_LOAD1]] to <2 x i32>
41
+ ; CHECK-NEXT: [[TMP10:%.*]] = mul nuw nsw <2 x i32> [[TMP9]], [[TMP8]]
42
+ ; CHECK-NEXT: [[TMP11]] = add <2 x i32> [[TMP10]], [[VEC_PHI]]
43
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
44
+ ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
45
+ ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
46
+ ; CHECK: [[MIDDLE_BLOCK]]:
47
+ ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP11]])
48
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]]
49
+ ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
50
+ ; CHECK: [[SCALAR_PH]]:
51
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ]
52
+ ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ]
20
53
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
21
- ; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.* ]]:
22
- ; CHECK-NEXT: [[ADD24_LCSSA:%.*]] = phi i32 [ [[ADD24:%.*]], %[[FOR_BODY]] ]
54
+ ; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT]]:
55
+ ; CHECK-NEXT: [[ADD24_LCSSA:%.*]] = phi i32 [ [[ADD24:%.*]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ]
23
56
; CHECK-NEXT: [[TMP15:%.*]] = trunc i32 [[ADD24_LCSSA]] to i8
24
57
; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]]
25
58
; CHECK: [[FOR_COND_CLEANUP]]:
@@ -31,8 +64,8 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
31
64
; CHECK-NEXT: store i8 [[V_ACC_0_LCSSA]], ptr addrspace(1) [[ARRAYIDX28]], align 1
32
65
; CHECK-NEXT: br label %[[IF_END]]
33
66
; CHECK: [[FOR_BODY]]:
34
- ; CHECK-NEXT: [[K_IDX_046:%.*]] = phi i32 [ 0 , %[[FOR_BODY_LR_PH ]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
35
- ; CHECK-NEXT: [[V_ACC_045:%.*]] = phi i32 [ 0 , %[[FOR_BODY_LR_PH ]] ], [ [[ADD24]], %[[FOR_BODY]] ]
67
+ ; CHECK-NEXT: [[K_IDX_046:%.*]] = phi i32 [ [[BC_RESUME_VAL]] , %[[SCALAR_PH ]] ], [ [[INC:%.*]], %[[FOR_BODY]] ]
68
+ ; CHECK-NEXT: [[V_ACC_045:%.*]] = phi i32 [ [[BC_MERGE_RDX]] , %[[SCALAR_PH ]] ], [ [[ADD24]], %[[FOR_BODY]] ]
36
69
; CHECK-NEXT: [[ADD16:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL15]]
37
70
; CHECK-NEXT: [[ADD18:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL17]]
38
71
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD16]] to i64
@@ -47,7 +80,7 @@ define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce
47
80
; CHECK-NEXT: [[ADD24]] = add i32 [[MUL23]], [[V_ACC_045]]
48
81
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_IDX_046]], 1
49
82
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[K]]
50
- ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]]
83
+ ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
51
84
; CHECK: [[IF_END]]:
52
85
; CHECK-NEXT: ret void
53
86
;
@@ -99,3 +132,9 @@ for.body: ; preds = %for.body, %for.body
99
132
if.end: ; preds = %for.cond.cleanup, %entry
100
133
ret void
101
134
}
135
+ ;.
136
+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
137
+ ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
138
+ ; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]], [[META1]]}
139
+ ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
140
+ ;.
0 commit comments