|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -passes=loop-vectorize -amdgpu-coerce-illegal-types=1 < %s -S -o - | FileCheck %s |
| 3 | + |
| 4 | +; REQUIRES: asserts |
| 5 | + |
| 6 | +target triple = "amdgcn-amd-amdhsa" |
| 7 | + |
| 8 | +; Function Attrs: mustprogress nofree norecurse nosync nounwind memory(argmem: readwrite) |
| 9 | +define protected amdgpu_kernel void @func_int8(ptr addrspace(1) %p_a_grid.coerce, ptr addrspace(1) %p_b_grid.coerce, ptr addrspace(1) %p_c_grid.coerce, i32 %m, i32 %n, i32 %k, i1 %c, i32 %add, i32 %add12) { |
| 10 | +; CHECK-LABEL: define protected amdgpu_kernel void @func_int8( |
| 11 | +; CHECK-SAME: ptr addrspace(1) [[P_A_GRID_COERCE:%.*]], ptr addrspace(1) [[P_B_GRID_COERCE:%.*]], ptr addrspace(1) [[P_C_GRID_COERCE:%.*]], i32 [[M:%.*]], i32 [[N:%.*]], i32 [[K:%.*]], i1 [[C:%.*]], i32 [[ADD:%.*]], i32 [[ADD12:%.*]]) #[[ATTR0:[0-9]+]] { |
| 12 | +; CHECK-NEXT: [[ENTRY:.*:]] |
| 13 | +; CHECK-NEXT: br i1 [[C]], label %[[FOR_COND_PREHEADER:.*]], label %[[IF_END:.*]] |
| 14 | +; CHECK: [[FOR_COND_PREHEADER]]: |
| 15 | +; CHECK-NEXT: [[CMP1444:%.*]] = icmp sgt i32 [[K]], 0 |
| 16 | +; CHECK-NEXT: br i1 [[CMP1444]], label %[[FOR_BODY_LR_PH:.*]], label %[[FOR_COND_CLEANUP:.*]] |
| 17 | +; CHECK: [[FOR_BODY_LR_PH]]: |
| 18 | +; CHECK-NEXT: [[MUL15:%.*]] = mul nsw i32 [[ADD]], [[K]] |
| 19 | +; CHECK-NEXT: [[MUL17:%.*]] = mul nsw i32 [[ADD12]], [[K]] |
| 20 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[K]], 2 |
| 21 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| 22 | +; CHECK: [[VECTOR_PH]]: |
| 23 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[K]], 2 |
| 24 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[K]], [[N_MOD_VF]] |
| 25 | +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| 26 | +; CHECK: [[VECTOR_BODY]]: |
| 27 | +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| 28 | +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ] |
| 29 | +; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[TMP0]], [[MUL15]] |
| 30 | +; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[TMP0]], [[MUL17]] |
| 31 | +; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[TMP1]] to i64 |
| 32 | +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_A_GRID_COERCE]], i64 [[TMP3]] |
| 33 | +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP4]], i32 0 |
| 34 | +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP5]], align 1 |
| 35 | +; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP2]] to i64 |
| 36 | +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_B_GRID_COERCE]], i64 [[TMP6]] |
| 37 | +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 0 |
| 38 | +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i8>, ptr addrspace(1) [[TMP8]], align 1 |
| 39 | +; CHECK-NEXT: [[TMP9:%.*]] = zext <2 x i8> [[WIDE_LOAD]] to <2 x i32> |
| 40 | +; CHECK-NEXT: [[TMP10:%.*]] = zext <2 x i8> [[WIDE_LOAD1]] to <2 x i32> |
| 41 | +; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw <2 x i32> [[TMP10]], [[TMP9]] |
| 42 | +; CHECK-NEXT: [[TMP12]] = add <2 x i32> [[TMP11]], [[VEC_PHI]] |
| 43 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 |
| 44 | +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] |
| 45 | +; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 46 | +; CHECK: [[MIDDLE_BLOCK]]: |
| 47 | +; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP12]]) |
| 48 | +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]] |
| 49 | +; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] |
| 50 | +; CHECK: [[SCALAR_PH]]: |
| 51 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ] |
| 52 | +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP14]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_LR_PH]] ] |
| 53 | +; CHECK-NEXT: br label %[[FOR_BODY:.*]] |
| 54 | +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT]]: |
| 55 | +; CHECK-NEXT: [[ADD24_LCSSA:%.*]] = phi i32 [ [[ADD24:%.*]], %[[FOR_BODY]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] |
| 56 | +; CHECK-NEXT: [[TMP15:%.*]] = trunc i32 [[ADD24_LCSSA]] to i8 |
| 57 | +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] |
| 58 | +; CHECK: [[FOR_COND_CLEANUP]]: |
| 59 | +; CHECK-NEXT: [[V_ACC_0_LCSSA:%.*]] = phi i8 [ 0, %[[FOR_COND_PREHEADER]] ], [ [[TMP15]], %[[FOR_COND_CLEANUP_LOOPEXIT]] ] |
| 60 | +; CHECK-NEXT: [[MUL25:%.*]] = mul nsw i32 [[ADD]], [[N]] |
| 61 | +; CHECK-NEXT: [[ADD26:%.*]] = add nsw i32 [[ADD12]], [[MUL25]] |
| 62 | +; CHECK-NEXT: [[IDXPROM27:%.*]] = sext i32 [[ADD26]] to i64 |
| 63 | +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_C_GRID_COERCE]], i64 [[IDXPROM27]] |
| 64 | +; CHECK-NEXT: store i8 [[V_ACC_0_LCSSA]], ptr addrspace(1) [[ARRAYIDX28]], align 1 |
| 65 | +; CHECK-NEXT: br label %[[IF_END]] |
| 66 | +; CHECK: [[FOR_BODY]]: |
| 67 | +; CHECK-NEXT: [[K_IDX_046:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] |
| 68 | +; CHECK-NEXT: [[V_ACC_045:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD24]], %[[FOR_BODY]] ] |
| 69 | +; CHECK-NEXT: [[ADD16:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL15]] |
| 70 | +; CHECK-NEXT: [[ADD18:%.*]] = add nsw i32 [[K_IDX_046]], [[MUL17]] |
| 71 | +; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD16]] to i64 |
| 72 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_A_GRID_COERCE]], i64 [[IDXPROM]] |
| 73 | +; CHECK-NEXT: [[ARRAYIDX_VAL:%.*]] = load i8, ptr addrspace(1) [[ARRAYIDX]], align 1 |
| 74 | +; CHECK-NEXT: [[IDXPROM19:%.*]] = sext i32 [[ADD18]] to i64 |
| 75 | +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[P_B_GRID_COERCE]], i64 [[IDXPROM19]] |
| 76 | +; CHECK-NEXT: [[ARRAYIDX20_VAL:%.*]] = load i8, ptr addrspace(1) [[ARRAYIDX20]], align 1 |
| 77 | +; CHECK-NEXT: [[CONV_I47:%.*]] = zext i8 [[ARRAYIDX_VAL]] to i32 |
| 78 | +; CHECK-NEXT: [[CONV_I4248:%.*]] = zext i8 [[ARRAYIDX20_VAL]] to i32 |
| 79 | +; CHECK-NEXT: [[MUL23:%.*]] = mul nuw nsw i32 [[CONV_I4248]], [[CONV_I47]] |
| 80 | +; CHECK-NEXT: [[ADD24]] = add i32 [[MUL23]], [[V_ACC_045]] |
| 81 | +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_IDX_046]], 1 |
| 82 | +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[K]] |
| 83 | +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] |
| 84 | +; CHECK: [[IF_END]]: |
| 85 | +; CHECK-NEXT: ret void |
| 86 | +; |
| 87 | +entry: |
| 88 | + br i1 %c, label %for.cond.preheader, label %if.end |
| 89 | + |
| 90 | +for.cond.preheader: ; preds = %entry |
| 91 | + %cmp1444 = icmp sgt i32 %k, 0 |
| 92 | + br i1 %cmp1444, label %for.body.lr.ph, label %for.cond.cleanup |
| 93 | + |
| 94 | +for.body.lr.ph: ; preds = %for.cond.preheader |
| 95 | + %mul15 = mul nsw i32 %add, %k |
| 96 | + %mul17 = mul nsw i32 %add12, %k |
| 97 | + br label %for.body |
| 98 | + |
| 99 | +for.cond.cleanup.loopexit: ; preds = %for.body |
| 100 | + %add24.lcssa = phi i32 [ %add24, %for.body ] |
| 101 | + %17 = trunc i32 %add24.lcssa to i8 |
| 102 | + br label %for.cond.cleanup |
| 103 | + |
| 104 | +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %for.cond.preheader |
| 105 | + %v_acc.0.lcssa = phi i8 [ 0, %for.cond.preheader ], [ %17, %for.cond.cleanup.loopexit ] |
| 106 | + %mul25 = mul nsw i32 %add, %n |
| 107 | + %add26 = add nsw i32 %add12, %mul25 |
| 108 | + %idxprom27 = sext i32 %add26 to i64 |
| 109 | + %arrayidx28 = getelementptr inbounds i8, ptr addrspace(1) %p_c_grid.coerce, i64 %idxprom27 |
| 110 | + store i8 %v_acc.0.lcssa, ptr addrspace(1) %arrayidx28, align 1 |
| 111 | + br label %if.end |
| 112 | + |
| 113 | +for.body: ; preds = %for.body, %for.body.lr.ph |
| 114 | + %k_idx.046 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] |
| 115 | + %v_acc.045 = phi i32 [ 0, %for.body.lr.ph ], [ %add24, %for.body ] |
| 116 | + %add16 = add nsw i32 %k_idx.046, %mul15 |
| 117 | + %add18 = add nsw i32 %k_idx.046, %mul17 |
| 118 | + %idxprom = sext i32 %add16 to i64 |
| 119 | + %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %p_a_grid.coerce, i64 %idxprom |
| 120 | + %arrayidx.val = load i8, ptr addrspace(1) %arrayidx, align 1 |
| 121 | + %idxprom19 = sext i32 %add18 to i64 |
| 122 | + %arrayidx20 = getelementptr inbounds i8, ptr addrspace(1) %p_b_grid.coerce, i64 %idxprom19 |
| 123 | + %arrayidx20.val = load i8, ptr addrspace(1) %arrayidx20, align 1 |
| 124 | + %conv.i47 = zext i8 %arrayidx.val to i32 |
| 125 | + %conv.i4248 = zext i8 %arrayidx20.val to i32 |
| 126 | + %mul23 = mul nuw nsw i32 %conv.i4248, %conv.i47 |
| 127 | + %add24 = add i32 %mul23, %v_acc.045 |
| 128 | + %inc = add nuw nsw i32 %k_idx.046, 1 |
| 129 | + %exitcond.not = icmp eq i32 %inc, %k |
| 130 | + br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body |
| 131 | + |
| 132 | +if.end: ; preds = %for.cond.cleanup, %entry |
| 133 | + ret void |
| 134 | +} |
| 135 | +;. |
| 136 | +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]} |
| 137 | +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} |
| 138 | +; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]], [[META1]]} |
| 139 | +; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} |
| 140 | +;. |
0 commit comments