|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: opt -passes=instcombine -S < %s | FileCheck %s |
| 3 | + |
| 4 | +declare void @escape(i16 %add) |
| 5 | +declare void @escape2(<2 x i16> %add) |
| 6 | + |
| 7 | +define void @numsignbits_shl_zext(i8 %x) { |
| 8 | +; CHECK-LABEL: define void @numsignbits_shl_zext( |
| 9 | +; CHECK-SAME: i8 [[X:%.*]]) { |
| 10 | +; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X]], 5 |
| 11 | +; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[ASHR]] to i16 |
| 12 | +; CHECK-NEXT: [[NSB4:%.*]] = shl i16 [[ZEXT]], 10 |
| 13 | +; CHECK-NEXT: [[AND14:%.*]] = and i16 [[NSB4]], 16384 |
| 14 | +; CHECK-NEXT: [[ADD14:%.*]] = add i16 [[AND14]], [[NSB4]] |
| 15 | +; CHECK-NEXT: call void @escape(i16 [[ADD14]]) |
| 16 | +; CHECK-NEXT: [[AND13:%.*]] = and i16 [[NSB4]], 8192 |
| 17 | +; CHECK-NEXT: [[ADD13:%.*]] = add i16 [[AND13]], [[NSB4]] |
| 18 | +; CHECK-NEXT: call void @escape(i16 [[ADD13]]) |
| 19 | +; CHECK-NEXT: [[AND12:%.*]] = and i16 [[NSB4]], 4096 |
| 20 | +; CHECK-NEXT: [[ADD12:%.*]] = add i16 [[AND12]], [[NSB4]] |
| 21 | +; CHECK-NEXT: call void @escape(i16 [[ADD12]]) |
| 22 | +; CHECK-NEXT: [[AND11:%.*]] = and i16 [[NSB4]], 2048 |
| 23 | +; CHECK-NEXT: [[ADD11:%.*]] = add i16 [[AND11]], [[NSB4]] |
| 24 | +; CHECK-NEXT: call void @escape(i16 [[ADD11]]) |
| 25 | +; CHECK-NEXT: ret void |
| 26 | +; |
| 27 | + %ashr = ashr i8 %x, 5 |
| 28 | + %zext = zext i8 %ashr to i16 |
| 29 | + %nsb4 = shl i16 %zext, 10 |
| 30 | + ; Validate ComputeNumSignBits using this simplification: |
| 31 | + ; (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
| 32 | + ; 4 sign bits: Goal is to fold away the add for bits 12-14. |
| 33 | + %and14 = and i16 %nsb4, 16384 |
| 34 | + %add14 = add i16 %and14, %nsb4 |
| 35 | + call void @escape(i16 %add14) |
| 36 | + %and13 = and i16 %nsb4, 8192 |
| 37 | + %add13 = add i16 %and13, %nsb4 |
| 38 | + call void @escape(i16 %add13) |
| 39 | + %and12 = and i16 %nsb4, 4096 |
| 40 | + %add12 = add i16 %and12, %nsb4 |
| 41 | + call void @escape(i16 %add12) |
| 42 | + %and11 = and i16 %nsb4, 2048 |
| 43 | + %add11 = add i16 %and11, %nsb4 |
| 44 | + call void @escape(i16 %add11) |
| 45 | + ret void |
| 46 | +} |
| 47 | + |
| 48 | +define void @numsignbits_shl_zext_shift_amounr_matches_extend(i8 %x) { |
| 49 | +; CHECK-LABEL: define void @numsignbits_shl_zext_shift_amounr_matches_extend( |
| 50 | +; CHECK-SAME: i8 [[X:%.*]]) { |
| 51 | +; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X]], 2 |
| 52 | +; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[ASHR]] to i16 |
| 53 | +; CHECK-NEXT: [[NSB3:%.*]] = shl nuw i16 [[ZEXT]], 8 |
| 54 | +; CHECK-NEXT: [[AND14:%.*]] = and i16 [[NSB3]], 16384 |
| 55 | +; CHECK-NEXT: [[ADD14:%.*]] = add i16 [[AND14]], [[NSB3]] |
| 56 | +; CHECK-NEXT: call void @escape(i16 [[ADD14]]) |
| 57 | +; CHECK-NEXT: [[AND13:%.*]] = and i16 [[NSB3]], 8192 |
| 58 | +; CHECK-NEXT: [[ADD13:%.*]] = add i16 [[AND13]], [[NSB3]] |
| 59 | +; CHECK-NEXT: call void @escape(i16 [[ADD13]]) |
| 60 | +; CHECK-NEXT: [[AND12:%.*]] = and i16 [[NSB3]], 4096 |
| 61 | +; CHECK-NEXT: [[ADD12:%.*]] = add i16 [[AND12]], [[NSB3]] |
| 62 | +; CHECK-NEXT: call void @escape(i16 [[ADD12]]) |
| 63 | +; CHECK-NEXT: ret void |
| 64 | +; |
| 65 | + %ashr = ashr i8 %x, 2 |
| 66 | + %zext = zext i8 %ashr to i16 |
| 67 | + %nsb3 = shl i16 %zext, 8 |
| 68 | + ; Validate ComputeNumSignBits using this simplification: |
| 69 | + ; (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
| 70 | + ; 3 sign bits: Goal is to fold away the add for bits 13-14. |
| 71 | + %and14 = and i16 %nsb3, 16384 |
| 72 | + %add14 = add i16 %and14, %nsb3 |
| 73 | + call void @escape(i16 %add14) |
| 74 | + %and13 = and i16 %nsb3, 8192 |
| 75 | + %add13 = add i16 %and13, %nsb3 |
| 76 | + call void @escape(i16 %add13) |
| 77 | + %and12 = and i16 %nsb3, 4096 |
| 78 | + %add12 = add i16 %and12, %nsb3 |
| 79 | + call void @escape(i16 %add12) |
| 80 | + ret void |
| 81 | +} |
| 82 | + |
| 83 | +define void @numsignbits_shl_zext_extended_bits_remains(i8 %x) { |
| 84 | +; CHECK-LABEL: define void @numsignbits_shl_zext_extended_bits_remains( |
| 85 | +; CHECK-SAME: i8 [[X:%.*]]) { |
| 86 | +; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X]], 5 |
| 87 | +; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[ASHR]] to i16 |
| 88 | +; CHECK-NEXT: [[NSB1:%.*]] = shl nuw nsw i16 [[ZEXT]], 7 |
| 89 | +; CHECK-NEXT: [[AND14:%.*]] = and i16 [[NSB1]], 16384 |
| 90 | +; CHECK-NEXT: [[ADD14:%.*]] = add nuw i16 [[AND14]], [[NSB1]] |
| 91 | +; CHECK-NEXT: call void @escape(i16 [[ADD14]]) |
| 92 | +; CHECK-NEXT: ret void |
| 93 | +; |
| 94 | + %ashr = ashr i8 %x, 5 |
| 95 | + %zext = zext i8 %ashr to i16 |
| 96 | + %nsb1 = shl i16 %zext, 7 |
| 97 | + ; Validate ComputeNumSignBits using this simplification: |
| 98 | + ; (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
| 99 | + ; 1 sign bit: The add can't be folded away here. |
| 100 | + %and14 = and i16 %nsb1, 16384 |
| 101 | + %add14 = add i16 %and14, %nsb1 |
| 102 | + call void @escape(i16 %add14) |
| 103 | + ret void |
| 104 | +} |
| 105 | + |
| 106 | +define void @numsignbits_shl_zext_all_bits_shifted_out(i8 %x) { |
| 107 | +; CHECK-LABEL: define void @numsignbits_shl_zext_all_bits_shifted_out( |
| 108 | +; CHECK-SAME: i8 [[X:%.*]]) { |
| 109 | +; CHECK-NEXT: [[ASHR:%.*]] = lshr i8 [[X]], 5 |
| 110 | +; CHECK-NEXT: [[ZEXT:%.*]] = zext nneg i8 [[ASHR]] to i16 |
| 111 | +; CHECK-NEXT: [[NSB1:%.*]] = shl i16 [[ZEXT]], 14 |
| 112 | +; CHECK-NEXT: [[AND14:%.*]] = and i16 [[NSB1]], 16384 |
| 113 | +; CHECK-NEXT: [[ADD14:%.*]] = add i16 [[AND14]], [[NSB1]] |
| 114 | +; CHECK-NEXT: call void @escape(i16 [[ADD14]]) |
| 115 | +; CHECK-NEXT: ret void |
| 116 | +; |
| 117 | + %ashr = ashr i8 %x, 5 |
| 118 | + %zext = zext i8 %ashr to i16 |
| 119 | + %nsb1 = shl i16 %zext, 14 |
| 120 | + ; Validate ComputeNumSignBits using this simplification: |
| 121 | + ; (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
| 122 | + ; 1 sign bit: The add can't be folded away here. |
| 123 | + %and14 = and i16 %nsb1, 16384 |
| 124 | + %add14 = add i16 %and14, %nsb1 |
| 125 | + call void @escape(i16 %add14) |
| 126 | + ret void |
| 127 | +} |
| 128 | + |
| 129 | +define void @numsignbits_shl_zext_vector(<2 x i8> %x) { |
| 130 | +; CHECK-LABEL: define void @numsignbits_shl_zext_vector( |
| 131 | +; CHECK-SAME: <2 x i8> [[X:%.*]]) { |
| 132 | +; CHECK-NEXT: [[ASHR:%.*]] = ashr <2 x i8> [[X]], <i8 5, i8 5> |
| 133 | +; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i8> [[ASHR]] to <2 x i16> |
| 134 | +; CHECK-NEXT: [[NSB4:%.*]] = shl <2 x i16> [[ZEXT]], <i16 10, i16 10> |
| 135 | +; CHECK-NEXT: [[AND14:%.*]] = and <2 x i16> [[NSB4]], <i16 16384, i16 16384> |
| 136 | +; CHECK-NEXT: [[ADD14:%.*]] = add <2 x i16> [[AND14]], [[NSB4]] |
| 137 | +; CHECK-NEXT: call void @escape2(<2 x i16> [[ADD14]]) |
| 138 | +; CHECK-NEXT: [[AND13:%.*]] = and <2 x i16> [[NSB4]], <i16 8192, i16 8192> |
| 139 | +; CHECK-NEXT: [[ADD13:%.*]] = add <2 x i16> [[AND13]], [[NSB4]] |
| 140 | +; CHECK-NEXT: call void @escape2(<2 x i16> [[ADD13]]) |
| 141 | +; CHECK-NEXT: [[AND12:%.*]] = and <2 x i16> [[NSB4]], <i16 4096, i16 4096> |
| 142 | +; CHECK-NEXT: [[ADD12:%.*]] = add <2 x i16> [[AND12]], [[NSB4]] |
| 143 | +; CHECK-NEXT: call void @escape2(<2 x i16> [[ADD12]]) |
| 144 | +; CHECK-NEXT: [[AND11:%.*]] = and <2 x i16> [[NSB4]], <i16 2048, i16 2048> |
| 145 | +; CHECK-NEXT: [[ADD11:%.*]] = add <2 x i16> [[AND11]], [[NSB4]] |
| 146 | +; CHECK-NEXT: call void @escape2(<2 x i16> [[ADD11]]) |
| 147 | +; CHECK-NEXT: ret void |
| 148 | +; |
| 149 | + %ashr = ashr <2 x i8> %x, <i8 5, i8 5> |
| 150 | + %zext = zext <2 x i8> %ashr to <2 x i16> |
| 151 | + %nsb4 = shl <2 x i16> %zext, <i16 10, i16 10> |
| 152 | + ; Validate ComputeNumSignBits using this simplification: |
| 153 | + ; (A & 2^C1) + A => A & (2^C1 - 1) iff bit C1 in A is a sign bit |
| 154 | + ; 4 sign bits: Goal is to fold away the add for bits 12-14. |
| 155 | + %and14 = and <2 x i16> %nsb4, <i16 16384, i16 16384> |
| 156 | + %add14 = add <2 x i16> %and14, %nsb4 |
| 157 | + call void @escape2(<2 x i16> %add14) |
| 158 | + %and13 = and <2 x i16> %nsb4, <i16 8192, i16 8192> |
| 159 | + %add13 = add <2 x i16> %and13, %nsb4 |
| 160 | + call void @escape2(<2 x i16> %add13) |
| 161 | + %and12 = and <2 x i16> %nsb4, <i16 4096, i16 4096> |
| 162 | + %add12 = add <2 x i16> %and12, %nsb4 |
| 163 | + call void @escape2(<2 x i16> %add12) |
| 164 | + %and11 = and <2 x i16> %nsb4, <i16 2048, i16 2048> |
| 165 | + %add11 = add <2 x i16> %and11, %nsb4 |
| 166 | + call void @escape2(<2 x i16> %add11) |
| 167 | + ret void |
| 168 | +} |
0 commit comments