|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 |
| 2 | +; RUN: opt < %s -passes=instcombine -S | FileCheck %s |
| 3 | + |
| 4 | +; Tests for InstCombineCompares.cpp::foldICmpUSubSatOrUAddSatWithConstant |
| 5 | +; - uadd_sat case |
| 6 | + |
| 7 | +; ============================================================================== |
| 8 | +; Basic tests with one user |
| 9 | +; ============================================================================== |
| 10 | +define i1 @icmp_eq_basic(i8 %arg) { |
| 11 | +; CHECK-LABEL: define i1 @icmp_eq_basic |
| 12 | +; CHECK-SAME: (i8 [[ARG:%.*]]) { |
| 13 | +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2) |
| 14 | +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[ADD]], 5 |
| 15 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 16 | +; |
| 17 | + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2) |
| 18 | + %cmp = icmp eq i8 %add, 5 |
| 19 | + ret i1 %cmp |
| 20 | +} |
| 21 | + |
| 22 | +define i1 @icmp_ne_basic(i16 %arg) { |
| 23 | +; CHECK-LABEL: define i1 @icmp_ne_basic |
| 24 | +; CHECK-SAME: (i16 [[ARG:%.*]]) { |
| 25 | +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 8) |
| 26 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i16 [[ADD]], 9 |
| 27 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 28 | +; |
| 29 | + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 8) |
| 30 | + %cmp = icmp ne i16 %add, 9 |
| 31 | + ret i1 %cmp |
| 32 | +} |
| 33 | + |
| 34 | +define i1 @icmp_ule_basic(i32 %arg) { |
| 35 | +; CHECK-LABEL: define i1 @icmp_ule_basic |
| 36 | +; CHECK-SAME: (i32 [[ARG:%.*]]) { |
| 37 | +; CHECK-NEXT: [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 2) |
| 38 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 4 |
| 39 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 40 | +; |
| 41 | + %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 2) |
| 42 | + %cmp = icmp ule i32 %add, 3 |
| 43 | + ret i1 %cmp |
| 44 | +} |
| 45 | + |
| 46 | +define i1 @icmp_ult_basic(i64 %arg) { |
| 47 | +; CHECK-LABEL: define i1 @icmp_ult_basic |
| 48 | +; CHECK-SAME: (i64 [[ARG:%.*]]) { |
| 49 | +; CHECK-NEXT: [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 5) |
| 50 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[ADD]], 20 |
| 51 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 52 | +; |
| 53 | + %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 5) |
| 54 | + %cmp = icmp ult i64 %add, 20 |
| 55 | + ret i1 %cmp |
| 56 | +} |
| 57 | + |
| 58 | +define i1 @icmp_uge_basic(i8 %arg) { |
| 59 | +; CHECK-LABEL: define i1 @icmp_uge_basic |
| 60 | +; CHECK-SAME: (i8 [[ARG:%.*]]) { |
| 61 | +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 4) |
| 62 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[ADD]], 7 |
| 63 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 64 | +; |
| 65 | + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 4) |
| 66 | + %cmp = icmp uge i8 %add, 8 |
| 67 | + ret i1 %cmp |
| 68 | +} |
| 69 | + |
| 70 | +define i1 @icmp_ugt_basic(i16 %arg) { |
| 71 | +; CHECK-LABEL: define i1 @icmp_ugt_basic |
| 72 | +; CHECK-SAME: (i16 [[ARG:%.*]]) { |
| 73 | +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 1) |
| 74 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i16 [[ADD]], 3 |
| 75 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 76 | +; |
| 77 | + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 1) |
| 78 | + %cmp = icmp ugt i16 %add, 3 |
| 79 | + ret i1 %cmp |
| 80 | +} |
| 81 | + |
| 82 | +define i1 @icmp_sle_basic(i32 %arg) { |
| 83 | +; CHECK-LABEL: define i1 @icmp_sle_basic |
| 84 | +; CHECK-SAME: (i32 [[ARG:%.*]]) { |
| 85 | +; CHECK-NEXT: [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 10) |
| 86 | +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], 9 |
| 87 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 88 | +; |
| 89 | + %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 10) |
| 90 | + %cmp = icmp sle i32 %add, 8 |
| 91 | + ret i1 %cmp |
| 92 | +} |
| 93 | + |
| 94 | +define i1 @icmp_slt_basic(i64 %arg) { |
| 95 | +; CHECK-LABEL: define i1 @icmp_slt_basic |
| 96 | +; CHECK-SAME: (i64 [[ARG:%.*]]) { |
| 97 | +; CHECK-NEXT: [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 24) |
| 98 | +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[ADD]], 5 |
| 99 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 100 | +; |
| 101 | + %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 24) |
| 102 | + %cmp = icmp slt i64 %add, 5 |
| 103 | + ret i1 %cmp |
| 104 | +} |
| 105 | + |
| 106 | +define i1 @icmp_sge_basic(i8 %arg) { |
| 107 | +; CHECK-LABEL: define i1 @icmp_sge_basic |
| 108 | +; CHECK-SAME: (i8 [[ARG:%.*]]) { |
| 109 | +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 1) |
| 110 | +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[ADD]], 3 |
| 111 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 112 | +; |
| 113 | + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 1) |
| 114 | + %cmp = icmp sge i8 %add, 4 |
| 115 | + ret i1 %cmp |
| 116 | +} |
| 117 | + |
| 118 | +define i1 @icmp_sgt_basic(i16 %arg) { |
| 119 | +; CHECK-LABEL: define i1 @icmp_sgt_basic |
| 120 | +; CHECK-SAME: (i16 [[ARG:%.*]]) { |
| 121 | +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 2) |
| 122 | +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[ADD]], 5 |
| 123 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 124 | +; |
| 125 | + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 2) |
| 126 | + %cmp = icmp sgt i16 %add, 5 |
| 127 | + ret i1 %cmp |
| 128 | +} |
| 129 | + |
| 130 | +; ============================================================================== |
| 131 | +; Tests with more than user |
| 132 | +; ============================================================================== |
| 133 | +define i1 @icmp_eq_multiuse(i8 %arg) { |
| 134 | +; CHECK-LABEL: define i1 @icmp_eq_multiuse |
| 135 | +; CHECK-SAME: (i8 [[ARG:%.*]]) { |
| 136 | +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2) |
| 137 | +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[ADD]], 5 |
| 138 | +; CHECK-NEXT: call void @use.i8(i8 [[ADD]]) |
| 139 | +; CHECK-NEXT: ret i1 [[CMP]] |
| 140 | +; |
| 141 | + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2) |
| 142 | + %cmp = icmp eq i8 %add, 5 |
| 143 | + call void @use.i8(i8 %add) |
| 144 | + ret i1 %cmp |
| 145 | +} |
| 146 | + |
| 147 | +; ============================================================================== |
| 148 | +; Tests with vector types |
| 149 | +; ============================================================================== |
| 150 | +define <2 x i1> @icmp_eq_vector_equal(<2 x i8> %arg) { |
| 151 | +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_equal |
| 152 | +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { |
| 153 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 2, i8 2>) |
| 154 | +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 5> |
| 155 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 156 | +; |
| 157 | + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>) |
| 158 | + %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5> |
| 159 | + ret <2 x i1> %cmp |
| 160 | +} |
| 161 | + |
| 162 | +define <2 x i1> @icmp_eq_vector_unequal(<2 x i8> %arg) { |
| 163 | +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_unequal |
| 164 | +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { |
| 165 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 1, i8 2>) |
| 166 | +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 6> |
| 167 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 168 | +; |
| 169 | + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 1, i8 2>) |
| 170 | + %cmp = icmp eq <2 x i8> %add, <i8 5, i8 6> |
| 171 | + ret <2 x i1> %cmp |
| 172 | +} |
| 173 | + |
| 174 | +define <2 x i1> @icmp_ne_vector_equal(<2 x i16> %arg) { |
| 175 | +; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_equal |
| 176 | +; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) { |
| 177 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> <i16 3, i16 3>) |
| 178 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], <i16 5, i16 5> |
| 179 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 180 | +; |
| 181 | + %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 3>) |
| 182 | + %cmp = icmp ne <2 x i16> %add, <i16 5, i16 5> |
| 183 | + ret <2 x i1> %cmp |
| 184 | +} |
| 185 | + |
| 186 | +define <2 x i1> @icmp_ne_vector_unequal(<2 x i16> %arg) { |
| 187 | +; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_unequal |
| 188 | +; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) { |
| 189 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> <i16 3, i16 33>) |
| 190 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], <i16 7, i16 6> |
| 191 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 192 | +; |
| 193 | + %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 33>) |
| 194 | + %cmp = icmp ne <2 x i16> %add, <i16 7, i16 6> |
| 195 | + ret <2 x i1> %cmp |
| 196 | +} |
| 197 | + |
| 198 | +define <2 x i1> @icmp_ule_vector_equal(<2 x i32> %arg) { |
| 199 | +; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_equal |
| 200 | +; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) { |
| 201 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> <i32 3, i32 3>) |
| 202 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], <i32 5, i32 5> |
| 203 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 204 | +; |
| 205 | + %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 3>) |
| 206 | + %cmp = icmp ult <2 x i32> %add, <i32 5, i32 5> |
| 207 | + ret <2 x i1> %cmp |
| 208 | +} |
| 209 | + |
| 210 | +define <2 x i1> @icmp_ule_vector_unequal(<2 x i32> %arg) { |
| 211 | +; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_unequal |
| 212 | +; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) { |
| 213 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> <i32 3, i32 35>) |
| 214 | +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], <i32 5, i32 7> |
| 215 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 216 | +; |
| 217 | + %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 35>) |
| 218 | + %cmp = icmp ult <2 x i32> %add, <i32 5, i32 7> |
| 219 | + ret <2 x i1> %cmp |
| 220 | +} |
| 221 | + |
| 222 | +define <2 x i1> @icmp_sgt_vector_equal(<2 x i64> %arg) { |
| 223 | +; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_equal |
| 224 | +; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) { |
| 225 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> <i64 409623, i64 409623>) |
| 226 | +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], <i64 1234, i64 1234> |
| 227 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 228 | +; |
| 229 | + %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 409623, i64 409623>) |
| 230 | + %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 1234> |
| 231 | + ret <2 x i1> %cmp |
| 232 | +} |
| 233 | + |
| 234 | +define <2 x i1> @icmp_sgt_vector_unequal(<2 x i64> %arg) { |
| 235 | +; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_unequal |
| 236 | +; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) { |
| 237 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> <i64 320498, i64 409623>) |
| 238 | +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], <i64 1234, i64 3456> |
| 239 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 240 | +; |
| 241 | + %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 320498, i64 409623>) |
| 242 | + %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 3456> |
| 243 | + ret <2 x i1> %cmp |
| 244 | +} |
| 245 | + |
| 246 | +; ============================================================================== |
| 247 | +; Tests with vector types and multiple uses |
| 248 | +; ============================================================================== |
| 249 | +define <2 x i1> @icmp_eq_vector_multiuse_equal(<2 x i8> %arg) { |
| 250 | +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_multiuse_equal |
| 251 | +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { |
| 252 | +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 2, i8 2>) |
| 253 | +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 5> |
| 254 | +; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[ADD]]) |
| 255 | +; CHECK-NEXT: ret <2 x i1> [[CMP]] |
| 256 | +; |
| 257 | + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>) |
| 258 | + %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5> |
| 259 | + call void @use.v2i8(<2 x i8> %add) |
| 260 | + ret <2 x i1> %cmp |
| 261 | +} |
| 262 | + |
| 263 | +declare i8 @llvm.uadd.sat.i8(i8, i8) |
| 264 | +declare i16 @llvm.uadd.sat.i16(i16, i16) |
| 265 | +declare i32 @llvm.uadd.sat.i32(i32, i32) |
| 266 | +declare i64 @llvm.uadd.sat.i64(i64, i64) |
| 267 | + |
| 268 | +declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) |
| 269 | +declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) |
| 270 | +declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) |
| 271 | +declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) |
| 272 | + |
| 273 | +declare void @use.i8(i8) |
| 274 | +declare void @use.v2i8(<2 x i8>) |
0 commit comments