|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s |
| 3 | + |
| 4 | +; logic shift reg pattern: and |
| 5 | +; already optimized by another pattern |
| 6 | + |
| 7 | +define i64 @and_shiftedreg_from_and(i64 %a, i64 %b) { |
| 8 | +; CHECK-LABEL: and_shiftedreg_from_and: |
| 9 | +; CHECK: // %bb.0: |
| 10 | +; CHECK-NEXT: and x8, x1, x0, asr #23 |
| 11 | +; CHECK-NEXT: and x0, x8, #0xffffffffff000000 |
| 12 | +; CHECK-NEXT: ret |
| 13 | + %ashr = ashr i64 %a, 23 |
| 14 | + %and = and i64 %ashr, -16777216 |
| 15 | + %r = and i64 %b, %and |
| 16 | + ret i64 %r |
| 17 | +} |
| 18 | + |
| 19 | +; TODO: logic shift reg pattern: bic |
| 20 | + |
| 21 | +define i64 @bic_shiftedreg_from_and(i64 %a, i64 %b) { |
| 22 | +; CHECK-LABEL: bic_shiftedreg_from_and: |
| 23 | +; CHECK: // %bb.0: |
| 24 | +; CHECK-NEXT: mov w8, #16777215 |
| 25 | +; CHECK-NEXT: orn x8, x8, x0, asr #23 |
| 26 | +; CHECK-NEXT: and x0, x1, x8 |
| 27 | +; CHECK-NEXT: ret |
| 28 | + %ashr = ashr i64 %a, 23 |
| 29 | + %and = and i64 %ashr, -16777216 |
| 30 | + %not = xor i64 %and, -1 |
| 31 | + %r = and i64 %b, %not |
| 32 | + ret i64 %r |
| 33 | +} |
| 34 | + |
| 35 | +; logic shift reg pattern: eon |
| 36 | + |
| 37 | +define i64 @eon_shiftedreg_from_and(i64 %a, i64 %b) { |
| 38 | +; CHECK-LABEL: eon_shiftedreg_from_and: |
| 39 | +; CHECK: // %bb.0: |
| 40 | +; CHECK-NEXT: lsl x8, x0, #36 |
| 41 | +; CHECK-NEXT: and x8, x8, #0xffe0000000000000 |
| 42 | +; CHECK-NEXT: eon x0, x8, x1 |
| 43 | +; CHECK-NEXT: ret |
| 44 | + %shl = shl i64 %a, 36 |
| 45 | + %and = and i64 %shl, -9007199254740992 |
| 46 | + %xor = xor i64 %and, -1 |
| 47 | + %r = xor i64 %b, %xor |
| 48 | + ret i64 %r |
| 49 | +} |
| 50 | + |
| 51 | +; logic shift reg pattern: eor |
| 52 | + |
| 53 | +define i64 @eor_shiftedreg_from_and(i64 %a, i64 %b) { |
| 54 | +; CHECK-LABEL: eor_shiftedreg_from_and: |
| 55 | +; CHECK: // %bb.0: |
| 56 | +; CHECK-NEXT: lsr x8, x0, #23 |
| 57 | +; CHECK-NEXT: and x8, x8, #0x1ffff000000 |
| 58 | +; CHECK-NEXT: eor x0, x8, x1 |
| 59 | +; CHECK-NEXT: ret |
| 60 | + %lshr = lshr i64 %a, 23 |
| 61 | + %and = and i64 %lshr, 2199006478336 |
| 62 | + %or = xor i64 %and, %b |
| 63 | + ret i64 %or |
| 64 | +} |
| 65 | + |
| 66 | +; logic shift reg pattern: mvn |
| 67 | +; already optimized by another pattern |
| 68 | + |
| 69 | +define i64 @mvn_shiftedreg_from_and(i64 %a) { |
| 70 | +; CHECK-LABEL: mvn_shiftedreg_from_and: |
| 71 | +; CHECK: // %bb.0: |
| 72 | +; CHECK-NEXT: mov x8, #9007199254740991 |
| 73 | +; CHECK-NEXT: orn x0, x8, x0, lsl #36 |
| 74 | +; CHECK-NEXT: ret |
| 75 | + %shl = shl i64 %a, 36 |
| 76 | + %and = and i64 %shl, -9007199254740992 |
| 77 | + %xor = xor i64 %and, -1 |
| 78 | + ret i64 %xor |
| 79 | +} |
| 80 | + |
| 81 | +; logic shift reg pattern: orn |
| 82 | +; already optimized by another pattern |
| 83 | + |
| 84 | +define i64 @orn_shiftedreg_from_and(i64 %a, i64 %b) { |
| 85 | +; CHECK-LABEL: orn_shiftedreg_from_and: |
| 86 | +; CHECK: // %bb.0: |
| 87 | +; CHECK-NEXT: orn x8, x1, x0, lsr #23 |
| 88 | +; CHECK-NEXT: orr x0, x8, #0xfffffe0000ffffff |
| 89 | +; CHECK-NEXT: ret |
| 90 | + %lshr = lshr i64 %a, 23 |
| 91 | + %and = and i64 %lshr, 2199006478336 |
| 92 | + %not = xor i64 %and, -1 |
| 93 | + %or = or i64 %not, %b |
| 94 | + ret i64 %or |
| 95 | +} |
| 96 | + |
| 97 | +; logic shift reg pattern: orr |
| 98 | +; srl constant bitwidth == (lowbits + masklen + shiftamt) |
| 99 | + |
| 100 | +define i64 @orr_shiftedreg_from_and(i64 %a, i64 %b) { |
| 101 | +; CHECK-LABEL: orr_shiftedreg_from_and: |
| 102 | +; CHECK: // %bb.0: |
| 103 | +; CHECK-NEXT: lsr x8, x0, #23 |
| 104 | +; CHECK-NEXT: and x8, x8, #0x1ffff000000 |
| 105 | +; CHECK-NEXT: orr x0, x8, x1 |
| 106 | +; CHECK-NEXT: ret |
| 107 | + %lshr = lshr i64 %a, 23 |
| 108 | + %and = and i64 %lshr, 2199006478336 ; 0x1ffff000000 |
| 109 | + %or = or i64 %and, %b |
| 110 | + ret i64 %or |
| 111 | +} |
| 112 | + |
| 113 | +; logic shift reg pattern: orr |
| 114 | +; srl constant bitwidth < (lowbits + masklen + shiftamt) |
| 115 | + |
| 116 | +define i64 @orr_shiftedreg_from_and_mask2(i64 %a, i64 %b) { |
| 117 | +; CHECK-LABEL: orr_shiftedreg_from_and_mask2: |
| 118 | +; CHECK: // %bb.0: |
| 119 | +; CHECK-NEXT: lsr x8, x0, #23 |
| 120 | +; CHECK-NEXT: and x8, x8, #0x1ffff000000 |
| 121 | +; CHECK-NEXT: orr x0, x8, x1 |
| 122 | +; CHECK-NEXT: ret |
| 123 | + %lshr = lshr i64 %a, 23 |
| 124 | + %and = and i64 %lshr, 4398029733888 ; 0x3ffff000000 |
| 125 | + %or = or i64 %and, %b |
| 126 | + ret i64 %or |
| 127 | +} |
| 128 | + |
| 129 | + |
| 130 | +; arithmetic shift reg pattern: add |
| 131 | + |
| 132 | +define i32 @add_shiftedreg_from_and(i32 %a, i32 %b) { |
| 133 | +; CHECK-LABEL: add_shiftedreg_from_and: |
| 134 | +; CHECK: // %bb.0: |
| 135 | +; CHECK-NEXT: asr w8, w0, #3 |
| 136 | +; CHECK-NEXT: and w8, w8, #0xff000000 |
| 137 | +; CHECK-NEXT: add w0, w8, w1 |
| 138 | +; CHECK-NEXT: ret |
| 139 | + %ashr = ashr i32 %a, 3 |
| 140 | + %and = and i32 %ashr, -16777216 |
| 141 | + %add = add i32 %and, %b |
| 142 | + ret i32 %add |
| 143 | +} |
| 144 | + |
| 145 | +; arithmetic shift reg pattern: sub |
| 146 | + |
| 147 | +define i64 @sub_shiftedreg_from_and_shl(i64 %a, i64 %b) { |
| 148 | +; CHECK-LABEL: sub_shiftedreg_from_and_shl: |
| 149 | +; CHECK: // %bb.0: |
| 150 | +; CHECK-NEXT: lsl x8, x0, #36 |
| 151 | +; CHECK-NEXT: and x8, x8, #0xffe0000000000000 |
| 152 | +; CHECK-NEXT: sub x0, x1, x8 |
| 153 | +; CHECK-NEXT: ret |
| 154 | + %shl = shl i64 %a, 36 |
| 155 | + %and = and i64 %shl, -9007199254740992 |
| 156 | + %sub = sub i64 %b, %and |
| 157 | + ret i64 %sub |
| 158 | +} |
| 159 | + |
| 160 | +; negative test: type is not i32 or i64 |
| 161 | + |
| 162 | +define <2 x i32> @shiftedreg_from_and_negative_type(<2 x i32> %a, <2 x i32> %b) { |
| 163 | +; CHECK-LABEL: shiftedreg_from_and_negative_type: |
| 164 | +; CHECK: // %bb.0: |
| 165 | +; CHECK-NEXT: shl v0.2s, v0.2s, #2 |
| 166 | +; CHECK-NEXT: bic v0.2s, #31 |
| 167 | +; CHECK-NEXT: sub v0.2s, v1.2s, v0.2s |
| 168 | +; CHECK-NEXT: ret |
| 169 | + %shl = shl <2 x i32> %a, <i32 2, i32 2> |
| 170 | + %and = and <2 x i32> %shl, <i32 -32, i32 -32> |
| 171 | + %sub = sub <2 x i32> %b, %and |
| 172 | + ret <2 x i32> %sub |
| 173 | +} |
| 174 | + |
| 175 | +; negative test: shift one-use |
| 176 | + |
| 177 | +define i32 @shiftedreg_from_and_negative_oneuse1(i32 %a, i32 %b) { |
| 178 | +; CHECK-LABEL: shiftedreg_from_and_negative_oneuse1: |
| 179 | +; CHECK: // %bb.0: |
| 180 | +; CHECK-NEXT: asr w8, w0, #23 |
| 181 | +; CHECK-NEXT: and w9, w8, #0xff000000 |
| 182 | +; CHECK-NEXT: add w9, w9, w1 |
| 183 | +; CHECK-NEXT: mul w0, w8, w9 |
| 184 | +; CHECK-NEXT: ret |
| 185 | + %ashr = ashr i32 %a, 23 |
| 186 | + %and = and i32 %ashr, -16777216 |
| 187 | + %add = add i32 %and, %b |
| 188 | + %r = mul i32 %ashr, %add |
| 189 | + ret i32 %r |
| 190 | +} |
| 191 | + |
| 192 | +; negative test: and one-use |
| 193 | + |
| 194 | +define i32 @shiftedreg_from_and_negative_oneuse2(i32 %a, i32 %b) { |
| 195 | +; CHECK-LABEL: shiftedreg_from_and_negative_oneuse2: |
| 196 | +; CHECK: // %bb.0: |
| 197 | +; CHECK-NEXT: asr w8, w0, #23 |
| 198 | +; CHECK-NEXT: and w8, w8, #0xff000000 |
| 199 | +; CHECK-NEXT: add w9, w8, w1 |
| 200 | +; CHECK-NEXT: mul w0, w8, w9 |
| 201 | +; CHECK-NEXT: ret |
| 202 | + %ashr = ashr i32 %a, 23 |
| 203 | + %and = and i32 %ashr, -16777216 |
| 204 | + %add = add i32 %and, %b |
| 205 | + %r = mul i32 %and, %add |
| 206 | + ret i32 %r |
| 207 | +} |
| 208 | + |
| 209 | +; negative test: and c is not mask |
| 210 | + |
| 211 | +define i32 @shiftedreg_from_and_negative_andc1(i32 %a, i32 %b) { |
| 212 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc1: |
| 213 | +; CHECK: // %bb.0: |
| 214 | +; CHECK-NEXT: mov w8, #26215 |
| 215 | +; CHECK-NEXT: movk w8, #65510, lsl #16 |
| 216 | +; CHECK-NEXT: and w8, w8, w0, asr #23 |
| 217 | +; CHECK-NEXT: add w0, w8, w1 |
| 218 | +; CHECK-NEXT: ret |
| 219 | + %ashr = ashr i32 %a, 23 |
| 220 | + %and = and i32 %ashr, -1677721 |
| 221 | + %add = add i32 %and, %b |
| 222 | + ret i32 %add |
| 223 | +} |
| 224 | + |
| 225 | +; negative test: sra with and c is not legal mask |
| 226 | + |
| 227 | +define i32 @shiftedreg_from_and_negative_andc2(i32 %a, i32 %b) { |
| 228 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc2: |
| 229 | +; CHECK: // %bb.0: |
| 230 | +; CHECK-NEXT: mov w8, #-285212672 |
| 231 | +; CHECK-NEXT: and w8, w8, w0, asr #23 |
| 232 | +; CHECK-NEXT: add w0, w8, w1 |
| 233 | +; CHECK-NEXT: ret |
| 234 | + %ashr = ashr i32 %a, 23 |
| 235 | + %and = and i32 %ashr, 4009754624 ; 0xef000000 |
| 236 | + %add = add i32 %and, %b |
| 237 | + ret i32 %add |
| 238 | +} |
| 239 | + |
| 240 | +; negative test: shl with and c is not legal mask |
| 241 | + |
| 242 | +define i64 @shiftedreg_from_and_negative_andc3(i64 %a, i64 %b) { |
| 243 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc3: |
| 244 | +; CHECK: // %bb.0: |
| 245 | +; CHECK-NEXT: eor x0, x1, x0, lsl #36 |
| 246 | +; CHECK-NEXT: ret |
| 247 | + %shl = shl i64 %a, 36 |
| 248 | + %and = and i64 %shl, -4294967296 |
| 249 | + %xor = xor i64 %and, %b |
| 250 | + ret i64 %xor |
| 251 | +} |
| 252 | + |
| 253 | +; negative test: shl with and c is not legal mask |
| 254 | + |
| 255 | +define i64 @shiftedreg_from_and_negative_andc4(i64 %a, i64 %b) { |
| 256 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc4: |
| 257 | +; CHECK: // %bb.0: |
| 258 | +; CHECK-NEXT: lsl x8, x0, #36 |
| 259 | +; CHECK-NEXT: and x8, x8, #0x7fe0000000000000 |
| 260 | +; CHECK-NEXT: eor x0, x8, x1 |
| 261 | +; CHECK-NEXT: ret |
| 262 | + %shl = shl i64 %a, 36 |
| 263 | + %and = and i64 %shl, 9214364837600034816 |
| 264 | + %xor = xor i64 %and, %b |
| 265 | + ret i64 %xor |
| 266 | +} |
| 267 | + |
| 268 | +; negative test: sra with and c is not legal mask |
| 269 | + |
| 270 | +define i32 @shiftedreg_from_and_negative_andc5(i32 %a, i32 %b) { |
| 271 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc5: |
| 272 | +; CHECK: // %bb.0: |
| 273 | +; CHECK-NEXT: asr w8, w0, #23 |
| 274 | +; CHECK-NEXT: and w8, w8, #0xff000000 |
| 275 | +; CHECK-NEXT: add w0, w8, w1 |
| 276 | +; CHECK-NEXT: ret |
| 277 | + %ashr = ashr i32 %a, 23 |
| 278 | + %and = and i32 %ashr, -16777216 |
| 279 | + %add = add i32 %and, %b |
| 280 | + ret i32 %add |
| 281 | +} |
| 282 | + |
| 283 | +; negative test: srl with and c is not legal mask |
| 284 | +; srl constant bitwidth > (lowbits + masklen + shiftamt) |
| 285 | + |
| 286 | +define i64 @shiftedreg_from_and_negative_andc6(i64 %a, i64 %b) { |
| 287 | +; CHECK-LABEL: shiftedreg_from_and_negative_andc6: |
| 288 | +; CHECK: // %bb.0: |
| 289 | +; CHECK-NEXT: lsr x8, x0, #2 |
| 290 | +; CHECK-NEXT: and x8, x8, #0x6 |
| 291 | +; CHECK-NEXT: add x0, x8, x1 |
| 292 | +; CHECK-NEXT: ret |
| 293 | + %lshr = lshr i64 %a, 2 |
| 294 | + %and = and i64 %lshr, 6 |
| 295 | + %add = add i64 %and, %b |
| 296 | + ret i64 %add |
| 297 | +} |
0 commit comments