|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt < %s -passes=instcombine -S | FileCheck %s |
| 3 | + |
| 4 | +; PR125228 |
| 5 | + |
| 6 | +define <16 x i8> @knownbits_bitcast_masked_shift(<16 x i8> %arg1, <16 x i8> %arg2) { |
| 7 | +; CHECK-LABEL: define <16 x i8> @knownbits_bitcast_masked_shift( |
| 8 | +; CHECK-SAME: <16 x i8> [[ARG1:%.*]], <16 x i8> [[ARG2:%.*]]) { |
| 9 | +; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG1]], splat (i8 3) |
| 10 | +; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[ARG2]], splat (i8 48) |
| 11 | +; CHECK-NEXT: [[OR:%.*]] = or disjoint <16 x i8> [[AND3]], [[AND]] |
| 12 | +; CHECK-NEXT: [[BITCAST4:%.*]] = bitcast <16 x i8> [[OR]] to <8 x i16> |
| 13 | +; CHECK-NEXT: [[SHL5:%.*]] = shl nuw <8 x i16> [[BITCAST4]], splat (i16 2) |
| 14 | +; CHECK-NEXT: [[BITCAST6:%.*]] = bitcast <8 x i16> [[SHL5]] to <16 x i8> |
| 15 | +; CHECK-NEXT: [[AND7:%.*]] = and <16 x i8> [[BITCAST6]], splat (i8 -52) |
| 16 | +; CHECK-NEXT: ret <16 x i8> [[AND7]] |
| 17 | +; |
| 18 | + %and = and <16 x i8> %arg1, splat (i8 3) |
| 19 | + %and3 = and <16 x i8> %arg2, splat (i8 48) |
| 20 | + %or = or disjoint <16 x i8> %and3, %and |
| 21 | + %bitcast4 = bitcast <16 x i8> %or to <8 x i16> |
| 22 | + %shl5 = shl nuw <8 x i16> %bitcast4, splat (i16 2) |
| 23 | + %bitcast6 = bitcast <8 x i16> %shl5 to <16 x i8> |
| 24 | + %and7 = and <16 x i8> %bitcast6, splat (i8 -52) |
| 25 | + ret <16 x i8> %and7 |
| 26 | +} |
| 27 | + |
| 28 | +define <16 x i8> @knownbits_shuffle_masked_nibble_shift(<16 x i8> %arg) { |
| 29 | +; CHECK-LABEL: define <16 x i8> @knownbits_shuffle_masked_nibble_shift( |
| 30 | +; CHECK-SAME: <16 x i8> [[ARG:%.*]]) { |
| 31 | +; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG]], splat (i8 15) |
| 32 | +; CHECK-NEXT: [[SHUFFLEVECTOR:%.*]] = shufflevector <16 x i8> [[AND]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> |
| 33 | +; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <16 x i8> [[SHUFFLEVECTOR]] to <8 x i16> |
| 34 | +; CHECK-NEXT: [[SHL:%.*]] = shl nuw <8 x i16> [[BITCAST1]], splat (i16 4) |
| 35 | +; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast <8 x i16> [[SHL]] to <16 x i8> |
| 36 | +; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[BITCAST2]], splat (i8 -16) |
| 37 | +; CHECK-NEXT: ret <16 x i8> [[AND3]] |
| 38 | +; |
| 39 | + %and = and <16 x i8> %arg, splat (i8 15) |
| 40 | + %shufflevector = shufflevector <16 x i8> %and, <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> |
| 41 | + %bitcast1 = bitcast <16 x i8> %shufflevector to <8 x i16> |
| 42 | + %shl = shl nuw <8 x i16> %bitcast1, splat (i16 4) |
| 43 | + %bitcast2 = bitcast <8 x i16> %shl to <16 x i8> |
| 44 | + %and3 = and <16 x i8> %bitcast2, splat (i8 -16) |
| 45 | + ret <16 x i8> %and3 |
| 46 | +} |
| 47 | + |
| 48 | +define <16 x i8> @knownbits_reverse_shuffle_masked_shift(<16 x i8> %arg) { |
| 49 | +; CHECK-LABEL: define <16 x i8> @knownbits_reverse_shuffle_masked_shift( |
| 50 | +; CHECK-SAME: <16 x i8> [[ARG:%.*]]) { |
| 51 | +; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[ARG]], splat (i8 15) |
| 52 | +; CHECK-NEXT: [[SHUFFLEVECTOR:%.*]] = shufflevector <16 x i8> [[AND]], <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12> |
| 53 | +; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <16 x i8> [[SHUFFLEVECTOR]] to <8 x i16> |
| 54 | +; CHECK-NEXT: [[SHL:%.*]] = shl nuw <8 x i16> [[BITCAST1]], splat (i16 4) |
| 55 | +; CHECK-NEXT: [[BITCAST2:%.*]] = bitcast <8 x i16> [[SHL]] to <16 x i8> |
| 56 | +; CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[BITCAST2]], splat (i8 -16) |
| 57 | +; CHECK-NEXT: ret <16 x i8> [[AND3]] |
| 58 | +; |
| 59 | + %and = and <16 x i8> %arg, splat (i8 15) |
| 60 | + %shufflevector = shufflevector <16 x i8> %and, <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12> |
| 61 | + %bitcast1 = bitcast <16 x i8> %shufflevector to <8 x i16> |
| 62 | + %shl = shl nuw <8 x i16> %bitcast1, splat (i16 4) |
| 63 | + %bitcast2 = bitcast <8 x i16> %shl to <16 x i8> |
| 64 | + %and3 = and <16 x i8> %bitcast2, splat (i8 -16) |
| 65 | + ret <16 x i8> %and3 |
| 66 | +} |
| 67 | + |
| 68 | +define <16 x i8> @knownbits_extract_bit(<8 x i16> %arg) { |
| 69 | +; CHECK-LABEL: define <16 x i8> @knownbits_extract_bit( |
| 70 | +; CHECK-SAME: <8 x i16> [[ARG:%.*]]) { |
| 71 | +; CHECK-NEXT: [[LSHR:%.*]] = lshr <8 x i16> [[ARG]], splat (i16 15) |
| 72 | +; CHECK-NEXT: [[BITCAST1:%.*]] = bitcast <8 x i16> [[LSHR]] to <16 x i8> |
| 73 | +; CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[BITCAST1]], splat (i8 1) |
| 74 | +; CHECK-NEXT: ret <16 x i8> [[AND]] |
| 75 | +; |
| 76 | + %lshr = lshr <8 x i16> %arg, splat (i16 15) |
| 77 | + %bitcast1 = bitcast <8 x i16> %lshr to <16 x i8> |
| 78 | + %and = and <16 x i8> %bitcast1, splat (i8 1) |
| 79 | + ret <16 x i8> %and |
| 80 | +} |
| 81 | + |
| 82 | +define { i32, i1 } @knownbits_popcount_add_with_overflow(<2 x i64> %arg1, <2 x i64> %arg2) { |
| 83 | +; CHECK-LABEL: define { i32, i1 } @knownbits_popcount_add_with_overflow( |
| 84 | +; CHECK-SAME: <2 x i64> [[ARG1:%.*]], <2 x i64> [[ARG2:%.*]]) { |
| 85 | +; CHECK-NEXT: [[CALL:%.*]] = tail call range(i64 0, 65) <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[ARG1]]) |
| 86 | +; CHECK-NEXT: [[BITCAST5:%.*]] = bitcast <2 x i64> [[CALL]] to <4 x i32> |
| 87 | +; CHECK-NEXT: [[EXTRACTELEMENT:%.*]] = extractelement <4 x i32> [[BITCAST5]], i64 0 |
| 88 | +; CHECK-NEXT: [[CALL9:%.*]] = tail call range(i64 0, 65) <2 x i64> @llvm.ctpop.v2i64(<2 x i64> [[ARG2]]) |
| 89 | +; CHECK-NEXT: [[BITCAST10:%.*]] = bitcast <2 x i64> [[CALL9]] to <4 x i32> |
| 90 | +; CHECK-NEXT: [[EXTRACTELEMENT11:%.*]] = extractelement <4 x i32> [[BITCAST10]], i64 0 |
| 91 | +; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[EXTRACTELEMENT]], i32 [[EXTRACTELEMENT11]]) |
| 92 | +; CHECK-NEXT: ret { i32, i1 } [[TMP1]] |
| 93 | +; |
| 94 | + %call = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %arg1) |
| 95 | + %bitcast5 = bitcast <2 x i64> %call to <4 x i32> |
| 96 | + %extractelement = extractelement <4 x i32> %bitcast5, i64 0 |
| 97 | + %call9 = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %arg2) |
| 98 | + %bitcast10 = bitcast <2 x i64> %call9 to <4 x i32> |
| 99 | + %extractelement11 = extractelement <4 x i32> %bitcast10, i64 0 |
| 100 | + %call12 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %extractelement, i32 %extractelement11) |
| 101 | + ret { i32, i1 } %call12 |
| 102 | +} |
| 103 | + |
| 104 | +define <16 x i8> @knownbits_shuffle_add_shift_v32i8(<16 x i8> %arg1, <8 x i16> %arg2, <8 x i16> %arg3) { |
| 105 | +; CHECK-LABEL: define <16 x i8> @knownbits_shuffle_add_shift_v32i8( |
| 106 | +; CHECK-SAME: <16 x i8> [[ARG1:%.*]], <8 x i16> [[ARG2:%.*]], <8 x i16> [[ARG3:%.*]]) { |
| 107 | +; CHECK-NEXT: [[SHL6:%.*]] = shl <8 x i16> [[ARG2]], splat (i16 8) |
| 108 | +; CHECK-NEXT: [[BITCAST7:%.*]] = bitcast <8 x i16> [[SHL6]] to <16 x i8> |
| 109 | +; CHECK-NEXT: [[SHL10:%.*]] = shl <8 x i16> [[ARG3]], splat (i16 8) |
| 110 | +; CHECK-NEXT: [[BITCAST11:%.*]] = bitcast <8 x i16> [[SHL10]] to <16 x i8> |
| 111 | +; CHECK-NEXT: [[ADD12:%.*]] = add <16 x i8> [[BITCAST11]], [[BITCAST7]] |
| 112 | +; CHECK-NEXT: [[ADD14:%.*]] = add <16 x i8> [[ADD12]], [[ARG1]] |
| 113 | +; CHECK-NEXT: [[BITCAST14:%.*]] = bitcast <16 x i8> [[ADD12]] to <8 x i16> |
| 114 | +; CHECK-NEXT: [[SHL15:%.*]] = shl <8 x i16> [[BITCAST14]], splat (i16 8) |
| 115 | +; CHECK-NEXT: [[BITCAST16:%.*]] = bitcast <8 x i16> [[SHL15]] to <16 x i8> |
| 116 | +; CHECK-NEXT: [[ADD13:%.*]] = add <16 x i8> [[ADD14]], [[BITCAST16]] |
| 117 | +; CHECK-NEXT: ret <16 x i8> [[ADD13]] |
| 118 | +; |
| 119 | + %shl6 = shl <8 x i16> %arg2, splat (i16 8) |
| 120 | + %bitcast7 = bitcast <8 x i16> %shl6 to <16 x i8> |
| 121 | + %shl10 = shl <8 x i16> %arg3, splat (i16 8) |
| 122 | + %bitcast11 = bitcast <8 x i16> %shl10 to <16 x i8> |
| 123 | + %add12 = add <16 x i8> %bitcast11, %bitcast7 |
| 124 | + %add13 = add <16 x i8> %add12, %arg1 |
| 125 | + %bitcast14 = bitcast <16 x i8> %add12 to <8 x i16> |
| 126 | + %shl15 = shl <8 x i16> %bitcast14, splat (i16 8) |
| 127 | + %bitcast16 = bitcast <8 x i16> %shl15 to <16 x i8> |
| 128 | + %add17 = add <16 x i8> %add13, %bitcast16 |
| 129 | + ret <16 x i8> %add17 |
| 130 | +} |
| 131 | + |
| 132 | +declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) |
| 133 | + |
| 134 | +declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) |
0 commit comments