|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 |
| 2 | +; RUN: opt -S -loop-reduce < %s | FileCheck %s |
| 3 | + |
| 4 | +;;target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" |
| 5 | +target triple = "aarch64-unknown-linux-gnu" |
| 6 | + |
| 7 | +define void @mulvl123_addressing(ptr %src, ptr %dst, i64 %count) #0 { |
| 8 | +; CHECK-LABEL: define void @mulvl123_addressing( |
| 9 | +; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]], i64 [[COUNT:%.*]]) #[[ATTR0:[0-9]+]] { |
| 10 | +; CHECK-NEXT: entry: |
| 11 | +; CHECK-NEXT: [[VSCALE:%.*]] = tail call i64 @llvm.vscale.i64() |
| 12 | +; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[VSCALE]], 4 |
| 13 | +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw i64 [[VSCALE]], 48 |
| 14 | +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i64 [[VSCALE]], 80 |
| 15 | +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[VSCALE]], 5 |
| 16 | +; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| 17 | +; CHECK: for.body: |
| 18 | +; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], [[FOR_BODY]] ], [ [[SRC]], [[ENTRY:%.*]] ] |
| 19 | +; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IDX_NEXT:%.*]], [[FOR_BODY]] ] |
| 20 | +; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i8>, ptr [[LSR_IV]], align 16 |
| 21 | +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[LSR_IV]], i64 [[TMP0]] |
| 22 | +; CHECK-NEXT: [[TMP6:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP3]], align 16 |
| 23 | +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[LSR_IV]], i64 [[TMP3]] |
| 24 | +; CHECK-NEXT: [[TMP7:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP2]], align 16 |
| 25 | +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[LSR_IV]], i64 [[TMP2]] |
| 26 | +; CHECK-NEXT: [[TMP8:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP1]], align 16 |
| 27 | +; CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> [[TMP5]], <vscale x 16 x i8> [[TMP6]]) |
| 28 | +; CHECK-NEXT: [[TMP10:%.*]] = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> [[TMP7]], <vscale x 16 x i8> [[TMP8]]) |
| 29 | +; CHECK-NEXT: [[TMP11:%.*]] = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> [[TMP9]], <vscale x 16 x i8> [[TMP10]]) |
| 30 | +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IDX]] |
| 31 | +; CHECK-NEXT: store <vscale x 16 x i8> [[TMP11]], ptr [[ARRAYIDX4]], align 16 |
| 32 | +; CHECK-NEXT: [[IDX_NEXT]] = add i64 [[IDX]], [[TMP0]] |
| 33 | +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 [[TMP1]] |
| 34 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IDX_NEXT]], [[COUNT]] |
| 35 | +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_EXIT:%.*]] |
| 36 | +; CHECK: for.exit: |
| 37 | +; CHECK-NEXT: ret void |
| 38 | +; |
| 39 | +entry: |
| 40 | + %vscale = tail call i64 @llvm.vscale.i64() |
| 41 | + %2 = shl nuw nsw i64 %vscale, 4 |
| 42 | + %mul = shl nuw nsw i64 %vscale, 6 |
| 43 | + br label %for.body |
| 44 | + |
| 45 | +for.body: |
| 46 | + %src.addr = phi ptr [ %src, %entry ], [ %src.addr.next, %for.body ] |
| 47 | + %idx = phi i64 [ 0, %entry ], [ %idx.next, %for.body ] |
| 48 | + %arrayidx = getelementptr inbounds i8, ptr %src.addr, i64 %idx |
| 49 | + %3 = load <vscale x 16 x i8>, ptr %arrayidx |
| 50 | + %4 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 1 |
| 51 | + %5 = load <vscale x 16 x i8>, ptr %4 |
| 52 | + %6 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 2 |
| 53 | + %7 = load <vscale x 16 x i8>, ptr %6 |
| 54 | + %8 = getelementptr <vscale x 16 x i8>, ptr %arrayidx, i64 3 |
| 55 | + %9 = load <vscale x 16 x i8>, ptr %8 |
| 56 | + %10 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %3, <vscale x 16 x i8> %5) |
| 57 | + %11 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %7, <vscale x 16 x i8> %9) |
| 58 | + %12 = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %10, <vscale x 16 x i8> %11) |
| 59 | + %src.addr.next = getelementptr inbounds i8, ptr %src.addr, i64 %mul |
| 60 | + %arrayidx4 = getelementptr inbounds i8, ptr %dst, i64 %idx |
| 61 | + store <vscale x 16 x i8> %12, ptr %arrayidx4 |
| 62 | + %idx.next = add i64 %idx, %2 |
| 63 | + %cmp = icmp ult i64 %idx.next, %count |
| 64 | + br i1 %cmp, label %for.body, label %for.exit |
| 65 | + |
| 66 | +for.exit: |
| 67 | + ret void |
| 68 | +} |
| 69 | + |
| 70 | +define void @many_mulvl1_addressing(ptr %src_rows, ptr %dst_rows, i64 %stride, i64 %count) #0 { |
| 71 | +; CHECK-LABEL: define void @many_mulvl1_addressing( |
| 72 | +; CHECK-SAME: ptr [[SRC_ROWS:%.*]], ptr [[DST_ROWS:%.*]], i64 [[STRIDE:%.*]], i64 [[COUNT:%.*]]) #[[ATTR0]] { |
| 73 | +; CHECK-NEXT: entry: |
| 74 | +; CHECK-NEXT: [[VSCALE:%.*]] = tail call i64 @llvm.vscale.i64() |
| 75 | +; CHECK-NEXT: [[MUL:%.*]] = shl i64 [[VSCALE]], 5 |
| 76 | +; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[VSCALE]], 4 |
| 77 | +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[SRC_ROWS]], i64 [[STRIDE]] |
| 78 | +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[TMP0]] |
| 79 | +; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[SRC_ROWS]], i64 [[TMP0]] |
| 80 | +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[VSCALE]], 3 |
| 81 | +; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr i8, ptr [[DST_ROWS]], i64 [[TMP1]] |
| 82 | +; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| 83 | +; CHECK: for.body: |
| 84 | +; CHECK-NEXT: [[LSR_IV10:%.*]] = phi i64 [ [[LSR_IV_NEXT11:%.*]], [[FOR_BODY]] ], [ [[COUNT]], [[ENTRY:%.*]] ] |
| 85 | +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] |
| 86 | +; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[SRC_ROWS]], i64 [[LSR_IV]] |
| 87 | +; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[DST_ROWS]], i64 [[LSR_IV]] |
| 88 | +; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP6]], align 16 |
| 89 | +; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[SCEVGEP4]], i64 [[LSR_IV]] |
| 90 | +; CHECK-NEXT: [[TMP3:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP5]], align 16 |
| 91 | +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[LSR_IV]] |
| 92 | +; CHECK-NEXT: [[TMP4:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP3]], align 16 |
| 93 | +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[LSR_IV]] |
| 94 | +; CHECK-NEXT: [[TMP5:%.*]] = load <vscale x 16 x i8>, ptr [[SCEVGEP2]], align 16 |
| 95 | +; CHECK-NEXT: [[TMP6:%.*]] = add <vscale x 16 x i8> [[TMP2]], [[TMP4]] |
| 96 | +; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 16 x i8> [[TMP3]], [[TMP5]] |
| 97 | +; CHECK-NEXT: [[TMP8:%.*]] = bitcast <vscale x 16 x i8> [[TMP6]] to <vscale x 8 x i16> |
| 98 | +; CHECK-NEXT: [[TMP9:%.*]] = trunc <vscale x 8 x i16> [[TMP8]] to <vscale x 8 x i8> |
| 99 | +; CHECK-NEXT: store <vscale x 8 x i8> [[TMP9]], ptr [[SCEVGEP9]], align 8 |
| 100 | +; CHECK-NEXT: [[TMP10:%.*]] = bitcast <vscale x 16 x i8> [[TMP7]] to <vscale x 8 x i16> |
| 101 | +; CHECK-NEXT: [[SCEVGEP8:%.*]] = getelementptr i8, ptr [[SCEVGEP7]], i64 [[LSR_IV]] |
| 102 | +; CHECK-NEXT: [[TMP11:%.*]] = trunc <vscale x 8 x i16> [[TMP10]] to <vscale x 8 x i8> |
| 103 | +; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[SCEVGEP8]], align 8 |
| 104 | +; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], [[MUL]] |
| 105 | +; CHECK-NEXT: [[LSR_IV_NEXT11]] = add i64 [[LSR_IV10]], -1 |
| 106 | +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT11]], 0 |
| 107 | +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]] |
| 108 | +; CHECK: for.exit: |
| 109 | +; CHECK-NEXT: ret void |
| 110 | +; |
| 111 | +entry: |
| 112 | + %vscale = tail call i64 @llvm.vscale.i64() |
| 113 | + %mul = shl nuw nsw i64 %vscale, 5 |
| 114 | + br label %for.body |
| 115 | + |
| 116 | +for.body: |
| 117 | + %src_row_addr = phi ptr [ %src_rows, %entry ], [ %add_ptr_src, %for.body ] |
| 118 | + %dst_row_addr = phi ptr [ %dst_rows, %entry ], [ %add_ptr_dst, %for.body ] |
| 119 | + %idx = phi i64 [ 0, %entry ], [ %inc, %for.body ] |
| 120 | + %2 = load <vscale x 16 x i8>, ptr %src_row_addr |
| 121 | + %3 = getelementptr <vscale x 16 x i8>, ptr %src_row_addr, i64 1 |
| 122 | + %4 = load <vscale x 16 x i8>, ptr %3 |
| 123 | + %arrayidx2 = getelementptr inbounds i8, ptr %src_row_addr, i64 %stride |
| 124 | + %5 = load <vscale x 16 x i8>, ptr %arrayidx2 |
| 125 | + %6 = getelementptr <vscale x 16 x i8>, ptr %arrayidx2, i64 1 |
| 126 | + %7 = load <vscale x 16 x i8>, ptr %6 |
| 127 | + %8 = add <vscale x 16 x i8> %2, %5 |
| 128 | + %9 = add <vscale x 16 x i8> %4, %7 |
| 129 | + %10 = bitcast <vscale x 16 x i8> %8 to <vscale x 8 x i16> |
| 130 | + %11 = trunc <vscale x 8 x i16> %10 to <vscale x 8 x i8> |
| 131 | + store <vscale x 8 x i8> %11, ptr %dst_row_addr |
| 132 | + %12 = bitcast <vscale x 16 x i8> %9 to <vscale x 8 x i16> |
| 133 | + %13 = getelementptr <vscale x 8 x i8>, ptr %dst_row_addr, i64 1 |
| 134 | + %14 = trunc <vscale x 8 x i16> %12 to <vscale x 8 x i8> |
| 135 | + store <vscale x 8 x i8> %14, ptr %13 |
| 136 | + %add_ptr_src = getelementptr inbounds i8, ptr %src_row_addr, i64 %mul |
| 137 | + %add_ptr_dst = getelementptr inbounds i8, ptr %dst_row_addr, i64 %mul |
| 138 | + %inc = add nuw i64 %idx, 1 |
| 139 | + %exitcond = icmp eq i64 %inc, %count |
| 140 | + br i1 %exitcond, label %for.exit, label %for.body |
| 141 | + |
| 142 | +for.exit: |
| 143 | + ret void |
| 144 | +} |
| 145 | + |
| 146 | +attributes #0 = { "target-features"="+sve2" vscale_range(1,16) } |
0 commit comments