|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64 < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE2 |
| 3 | +; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v2 < %s | FileCheck %s --check-prefixes=CHECK,SSE,SSE4 |
| 4 | +; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v3 < %s | FileCheck %s --check-prefixes=CHECK,AVX |
| 5 | +; RUN: opt -O3 -S -mtriple=x86_64-- -mcpu=x86-64-v4 < %s | FileCheck %s --check-prefixes=CHECK,AVX |
| 6 | + |
| 7 | +%"struct.std::array" = type { [8 x i16] } |
| 8 | + |
| 9 | +define { i64, i64 } @compute_min(ptr noundef nonnull align 2 dereferenceable(16) %x, ptr noundef nonnull align 2 dereferenceable(16) %y) { |
| 10 | +; SSE2-LABEL: @compute_min( |
| 11 | +; SSE2-NEXT: entry: |
| 12 | +; SSE2-NEXT: [[LD0:%.*]] = load i16, ptr [[Y:%.*]], align 2 |
| 13 | +; SSE2-NEXT: [[LD1:%.*]] = load i16, ptr [[X:%.*]], align 2 |
| 14 | +; SSE2-NEXT: [[LD2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0]], i16 [[LD1]]) |
| 15 | +; SSE2-NEXT: [[PT1_1:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 2 |
| 16 | +; SSE2-NEXT: [[PT0_1:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 2 |
| 17 | +; SSE2-NEXT: [[LD0_1:%.*]] = load i16, ptr [[PT0_1]], align 2 |
| 18 | +; SSE2-NEXT: [[LD1_1:%.*]] = load i16, ptr [[PT1_1]], align 2 |
| 19 | +; SSE2-NEXT: [[LD2_1:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_1]], i16 [[LD1_1]]) |
| 20 | +; SSE2-NEXT: [[PT1_2:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 4 |
| 21 | +; SSE2-NEXT: [[PT0_2:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 4 |
| 22 | +; SSE2-NEXT: [[LD0_2:%.*]] = load i16, ptr [[PT0_2]], align 2 |
| 23 | +; SSE2-NEXT: [[LD1_2:%.*]] = load i16, ptr [[PT1_2]], align 2 |
| 24 | +; SSE2-NEXT: [[LD2_2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_2]], i16 [[LD1_2]]) |
| 25 | +; SSE2-NEXT: [[PT1_3:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 6 |
| 26 | +; SSE2-NEXT: [[PT0_3:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 6 |
| 27 | +; SSE2-NEXT: [[LD0_3:%.*]] = load i16, ptr [[PT0_3]], align 2 |
| 28 | +; SSE2-NEXT: [[LD1_3:%.*]] = load i16, ptr [[PT1_3]], align 2 |
| 29 | +; SSE2-NEXT: [[LD2_3:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_3]], i16 [[LD1_3]]) |
| 30 | +; SSE2-NEXT: [[PT1_4:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 8 |
| 31 | +; SSE2-NEXT: [[PT0_4:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 8 |
| 32 | +; SSE2-NEXT: [[LD0_4:%.*]] = load i16, ptr [[PT0_4]], align 2 |
| 33 | +; SSE2-NEXT: [[LD1_4:%.*]] = load i16, ptr [[PT1_4]], align 2 |
| 34 | +; SSE2-NEXT: [[LD2_4:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_4]], i16 [[LD1_4]]) |
| 35 | +; SSE2-NEXT: [[PT1_5:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 10 |
| 36 | +; SSE2-NEXT: [[PT0_5:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 10 |
| 37 | +; SSE2-NEXT: [[LD0_5:%.*]] = load i16, ptr [[PT0_5]], align 2 |
| 38 | +; SSE2-NEXT: [[LD1_5:%.*]] = load i16, ptr [[PT1_5]], align 2 |
| 39 | +; SSE2-NEXT: [[LD2_5:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_5]], i16 [[LD1_5]]) |
| 40 | +; SSE2-NEXT: [[PT1_6:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 12 |
| 41 | +; SSE2-NEXT: [[PT0_6:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 12 |
| 42 | +; SSE2-NEXT: [[LD0_6:%.*]] = load i16, ptr [[PT0_6]], align 2 |
| 43 | +; SSE2-NEXT: [[LD1_6:%.*]] = load i16, ptr [[PT1_6]], align 2 |
| 44 | +; SSE2-NEXT: [[LD2_6:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_6]], i16 [[LD1_6]]) |
| 45 | +; SSE2-NEXT: [[PT1_7:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i64 14 |
| 46 | +; SSE2-NEXT: [[PT0_7:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i64 14 |
| 47 | +; SSE2-NEXT: [[LD0_7:%.*]] = load i16, ptr [[PT0_7]], align 2 |
| 48 | +; SSE2-NEXT: [[LD1_7:%.*]] = load i16, ptr [[PT1_7]], align 2 |
| 49 | +; SSE2-NEXT: [[LD2_7:%.*]] = tail call i16 @llvm.smin.i16(i16 [[LD0_7]], i16 [[LD1_7]]) |
| 50 | +; SSE2-NEXT: [[RETVAL_SROA_4_0_INSERT_EXT:%.*]] = zext i16 [[LD2_3]] to i64 |
| 51 | +; SSE2-NEXT: [[RETVAL_SROA_4_0_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_4_0_INSERT_EXT]], 48 |
| 52 | +; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i16 [[LD2_2]] to i64 |
| 53 | +; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32 |
| 54 | +; SSE2-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_4_0_INSERT_SHIFT]], [[RETVAL_SROA_3_0_INSERT_SHIFT]] |
| 55 | +; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i16 [[LD2_1]] to i64 |
| 56 | +; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_2_0_INSERT_EXT]], 16 |
| 57 | +; SSE2-NEXT: [[RETVAL_SROA_2_0_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], [[RETVAL_SROA_2_0_INSERT_SHIFT]] |
| 58 | +; SSE2-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[LD2]] to i64 |
| 59 | +; SSE2-NEXT: [[TMP20:%.*]] = or disjoint i64 [[RETVAL_SROA_2_0_INSERT_INSERT]], [[RETVAL_SROA_0_0_INSERT_EXT]] |
| 60 | +; SSE2-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0 |
| 61 | +; SSE2-NEXT: [[RETVAL_SROA_9_8_INSERT_EXT:%.*]] = zext i16 [[LD2_7]] to i64 |
| 62 | +; SSE2-NEXT: [[RETVAL_SROA_9_8_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_9_8_INSERT_EXT]], 48 |
| 63 | +; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_EXT:%.*]] = zext i16 [[LD2_6]] to i64 |
| 64 | +; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_8_8_INSERT_EXT]], 32 |
| 65 | +; SSE2-NEXT: [[RETVAL_SROA_8_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_9_8_INSERT_SHIFT]], [[RETVAL_SROA_8_8_INSERT_SHIFT]] |
| 66 | +; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_EXT:%.*]] = zext i16 [[LD2_5]] to i64 |
| 67 | +; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_7_8_INSERT_EXT]], 16 |
| 68 | +; SSE2-NEXT: [[RETVAL_SROA_7_8_INSERT_INSERT:%.*]] = or disjoint i64 [[RETVAL_SROA_8_8_INSERT_INSERT]], [[RETVAL_SROA_7_8_INSERT_SHIFT]] |
| 69 | +; SSE2-NEXT: [[RETVAL_SROA_5_8_INSERT_EXT:%.*]] = zext i16 [[LD2_4]] to i64 |
| 70 | +; SSE2-NEXT: [[TMP21:%.*]] = or disjoint i64 [[RETVAL_SROA_7_8_INSERT_INSERT]], [[RETVAL_SROA_5_8_INSERT_EXT]] |
| 71 | +; SSE2-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1 |
| 72 | +; SSE2-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]] |
| 73 | +; |
| 74 | +; SSE4-LABEL: @compute_min( |
| 75 | +; SSE4-NEXT: entry: |
| 76 | +; SSE4-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2 |
| 77 | +; SSE4-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2 |
| 78 | +; SSE4-NEXT: [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 79 | +; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4> |
| 80 | +; SSE4-NEXT: [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 81 | +; SSE4-NEXT: [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5> |
| 82 | +; SSE4-NEXT: [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 83 | +; SSE4-NEXT: [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6> |
| 84 | +; SSE4-NEXT: [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 85 | +; SSE4-NEXT: [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7> |
| 86 | +; SSE4-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64> |
| 87 | +; SSE4-NEXT: [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48> |
| 88 | +; SSE4-NEXT: [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64> |
| 89 | +; SSE4-NEXT: [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32> |
| 90 | +; SSE4-NEXT: [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]] |
| 91 | +; SSE4-NEXT: [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64> |
| 92 | +; SSE4-NEXT: [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16) |
| 93 | +; SSE4-NEXT: [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]] |
| 94 | +; SSE4-NEXT: [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64> |
| 95 | +; SSE4-NEXT: [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]] |
| 96 | +; SSE4-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0 |
| 97 | +; SSE4-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0 |
| 98 | +; SSE4-NEXT: [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1 |
| 99 | +; SSE4-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1 |
| 100 | +; SSE4-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]] |
| 101 | +; |
| 102 | +; AVX-LABEL: @compute_min( |
| 103 | +; AVX-NEXT: entry: |
| 104 | +; AVX-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Y:%.*]], align 2 |
| 105 | +; AVX-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr [[X:%.*]], align 2 |
| 106 | +; AVX-NEXT: [[TMP2:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 107 | +; AVX-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> poison, <2 x i32> <i32 0, i32 4> |
| 108 | +; AVX-NEXT: [[TMP4:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 109 | +; AVX-NEXT: [[TMP5:%.*]] = shufflevector <8 x i16> [[TMP4]], <8 x i16> poison, <2 x i32> <i32 1, i32 5> |
| 110 | +; AVX-NEXT: [[TMP6:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 111 | +; AVX-NEXT: [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP6]], <8 x i16> poison, <2 x i32> <i32 3, i32 6> |
| 112 | +; AVX-NEXT: [[TMP8:%.*]] = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]]) |
| 113 | +; AVX-NEXT: [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP8]], <8 x i16> poison, <2 x i32> <i32 2, i32 7> |
| 114 | +; AVX-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i64> |
| 115 | +; AVX-NEXT: [[TMP11:%.*]] = shl nuw <2 x i64> [[TMP10]], <i64 32, i64 48> |
| 116 | +; AVX-NEXT: [[TMP12:%.*]] = zext <2 x i16> [[TMP7]] to <2 x i64> |
| 117 | +; AVX-NEXT: [[TMP13:%.*]] = shl nuw <2 x i64> [[TMP12]], <i64 48, i64 32> |
| 118 | +; AVX-NEXT: [[TMP14:%.*]] = or disjoint <2 x i64> [[TMP11]], [[TMP13]] |
| 119 | +; AVX-NEXT: [[TMP15:%.*]] = zext <2 x i16> [[TMP5]] to <2 x i64> |
| 120 | +; AVX-NEXT: [[TMP16:%.*]] = shl nuw nsw <2 x i64> [[TMP15]], splat (i64 16) |
| 121 | +; AVX-NEXT: [[TMP17:%.*]] = or disjoint <2 x i64> [[TMP14]], [[TMP16]] |
| 122 | +; AVX-NEXT: [[TMP18:%.*]] = zext <2 x i16> [[TMP3]] to <2 x i64> |
| 123 | +; AVX-NEXT: [[TMP19:%.*]] = or disjoint <2 x i64> [[TMP17]], [[TMP18]] |
| 124 | +; AVX-NEXT: [[TMP20:%.*]] = extractelement <2 x i64> [[TMP19]], i64 0 |
| 125 | +; AVX-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[TMP20]], 0 |
| 126 | +; AVX-NEXT: [[TMP21:%.*]] = extractelement <2 x i64> [[TMP19]], i64 1 |
| 127 | +; AVX-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[TMP21]], 1 |
| 128 | +; AVX-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]] |
| 129 | +; |
| 130 | +entry: |
| 131 | + %retval = alloca %"struct.std::array", align 2 |
| 132 | + br label %for.cond |
| 133 | + |
| 134 | +for.cond: ; preds = %for.body, %entry |
| 135 | + %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] |
| 136 | + %cmp.not = icmp eq i32 %i.0, 8 |
| 137 | + br i1 %cmp.not, label %for.cond.cleanup, label %for.body |
| 138 | + |
| 139 | +for.cond.cleanup: ; preds = %for.cond |
| 140 | + %.fca.0.load = load i64, ptr %retval, align 2 |
| 141 | + %.fca.0.insert = insertvalue { i64, i64 } poison, i64 %.fca.0.load, 0 |
| 142 | + %.fca.1.gep = getelementptr inbounds nuw i8, ptr %retval, i64 8 |
| 143 | + %.fca.1.load = load i64, ptr %.fca.1.gep, align 2 |
| 144 | + %.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %.fca.1.load, 1 |
| 145 | + ret { i64, i64 } %.fca.1.insert |
| 146 | + |
| 147 | +for.body: ; preds = %for.cond |
| 148 | + %conv = zext nneg i32 %i.0 to i64 |
| 149 | + %pt1 = getelementptr inbounds nuw [8 x i16], ptr %x, i64 0, i64 %conv |
| 150 | + %pt0 = getelementptr inbounds nuw [8 x i16], ptr %y, i64 0, i64 %conv |
| 151 | + %ld0 = load i16, ptr %pt0, align 2 |
| 152 | + %ld1 = load i16, ptr %pt1, align 2 |
| 153 | + %cmp.i = icmp slt i16 %ld0, %ld1 |
| 154 | + %sel = select i1 %cmp.i, ptr %pt0, ptr %pt1 |
| 155 | + %ld2 = load i16, ptr %sel, align 2 |
| 156 | + %pt2 = getelementptr inbounds nuw [8 x i16], ptr %retval, i64 0, i64 %conv |
| 157 | + store i16 %ld2, ptr %pt2, align 2 |
| 158 | + %inc = add nuw nsw i32 %i.0, 1 |
| 159 | + br label %for.cond |
| 160 | +} |
| 161 | +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| 162 | +; CHECK: {{.*}} |
| 163 | +; SSE: {{.*}} |
0 commit comments