|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -scoped-noalias-aa -slp-vectorizer -mtriple=arm64-apple-darwin -enable-new-pm=false -S %s | FileCheck %s |
| 3 | + |
| 4 | +define void @needs_versioning_not_profitable(i32* %dst, i32* %src) { |
| 5 | +; CHECK-LABEL: @needs_versioning_not_profitable( |
| 6 | +; CHECK-NEXT: entry: |
| 7 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4 |
| 8 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 9 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 10 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 11 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 12 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 13 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 14 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 15 | +; CHECK-NEXT: ret void |
| 16 | +; |
| 17 | +entry: |
| 18 | + %src.0 = load i32, i32* %src, align 4 |
| 19 | + %r.0 = ashr i32 %src.0, 16 |
| 20 | + store i32 %r.0, i32* %dst, align 4 |
| 21 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 22 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 23 | + %r.1 = ashr i32 %src.1, 16 |
| 24 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 25 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 26 | + ret void |
| 27 | +} |
| 28 | + |
| 29 | +define void @needs_versioning_profitable(i32* %dst, i32* %src) { |
| 30 | +; CHECK-LABEL: @needs_versioning_profitable( |
| 31 | +; CHECK-NEXT: entry: |
| 32 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4 |
| 33 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 34 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 35 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 36 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 37 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 38 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 39 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 40 | +; CHECK-NEXT: [[SRC_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2 |
| 41 | +; CHECK-NEXT: [[SRC_2:%.*]] = load i32, i32* [[SRC_GEP_2]], align 4 |
| 42 | +; CHECK-NEXT: [[R_2:%.*]] = ashr i32 [[SRC_2]], 16 |
| 43 | +; CHECK-NEXT: [[DST_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2 |
| 44 | +; CHECK-NEXT: store i32 [[R_2]], i32* [[DST_GEP_2]], align 4 |
| 45 | +; CHECK-NEXT: [[SRC_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3 |
| 46 | +; CHECK-NEXT: [[SRC_3:%.*]] = load i32, i32* [[SRC_GEP_3]], align 4 |
| 47 | +; CHECK-NEXT: [[R_3:%.*]] = ashr i32 [[SRC_3]], 16 |
| 48 | +; CHECK-NEXT: [[DST_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3 |
| 49 | +; CHECK-NEXT: store i32 [[R_3]], i32* [[DST_GEP_3]], align 4 |
| 50 | +; CHECK-NEXT: ret void |
| 51 | +; |
| 52 | +entry: |
| 53 | + %src.0 = load i32, i32* %src, align 4 |
| 54 | + %r.0 = ashr i32 %src.0, 16 |
| 55 | + store i32 %r.0, i32* %dst, align 4 |
| 56 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 57 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 58 | + %r.1 = ashr i32 %src.1, 16 |
| 59 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 60 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 61 | + %src.gep.2 = getelementptr inbounds i32, i32* %src, i64 2 |
| 62 | + %src.2 = load i32, i32* %src.gep.2, align 4 |
| 63 | + %r.2 = ashr i32 %src.2, 16 |
| 64 | + %dst.gep.2 = getelementptr inbounds i32, i32* %dst, i64 2 |
| 65 | + store i32 %r.2, i32* %dst.gep.2, align 4 |
| 66 | + %src.gep.3 = getelementptr inbounds i32, i32* %src, i64 3 |
| 67 | + %src.3 = load i32, i32* %src.gep.3, align 4 |
| 68 | + %r.3 = ashr i32 %src.3, 16 |
| 69 | + %dst.gep.3 = getelementptr inbounds i32, i32* %dst, i64 3 |
| 70 | + store i32 %r.3, i32* %dst.gep.3, align 4 |
| 71 | + |
| 72 | + ret void |
| 73 | +} |
| 74 | + |
| 75 | + |
| 76 | +define void @no_version(i32* nocapture %dst, i32* nocapture readonly %src) { |
| 77 | +; CHECK-LABEL: @no_version( |
| 78 | +; CHECK-NEXT: entry: |
| 79 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1 |
| 80 | +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <2 x i32>* |
| 81 | +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4 |
| 82 | +; CHECK-NEXT: [[TMP2:%.*]] = ashr <2 x i32> [[TMP1]], <i32 16, i32 16> |
| 83 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1 |
| 84 | +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <2 x i32>* |
| 85 | +; CHECK-NEXT: store <2 x i32> [[TMP2]], <2 x i32>* [[TMP3]], align 4 |
| 86 | +; CHECK-NEXT: ret void |
| 87 | +; |
| 88 | +entry: |
| 89 | + %src.0 = load i32, i32* %src, align 4 |
| 90 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 91 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 92 | + %r.0 = ashr i32 %src.0, 16 |
| 93 | + %r.1 = ashr i32 %src.1, 16 |
| 94 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 95 | + store i32 %r.0, i32* %dst, align 4 |
| 96 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 97 | + ret void |
| 98 | +} |
| 99 | + |
| 100 | +define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly %counter) { |
| 101 | +; CHECK-LABEL: @version_multiple( |
| 102 | +; CHECK-NEXT: entry: |
| 103 | +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[COUNTER:%.*]], align 4 |
| 104 | +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK:%.*]], align 4 |
| 105 | +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], [[TMP0]] |
| 106 | +; CHECK-NEXT: store i32 [[XOR]], i32* [[OUT_BLOCK]], align 4 |
| 107 | +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1 |
| 108 | +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4 |
| 109 | +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1 |
| 110 | +; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX2_1]], align 4 |
| 111 | +; CHECK-NEXT: [[XOR_1:%.*]] = xor i32 [[TMP3]], [[TMP2]] |
| 112 | +; CHECK-NEXT: store i32 [[XOR_1]], i32* [[ARRAYIDX2_1]], align 4 |
| 113 | +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2 |
| 114 | +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4 |
| 115 | +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2 |
| 116 | +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX2_2]], align 4 |
| 117 | +; CHECK-NEXT: [[XOR_2:%.*]] = xor i32 [[TMP5]], [[TMP4]] |
| 118 | +; CHECK-NEXT: store i32 [[XOR_2]], i32* [[ARRAYIDX2_2]], align 4 |
| 119 | +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3 |
| 120 | +; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4 |
| 121 | +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3 |
| 122 | +; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX2_3]], align 4 |
| 123 | +; CHECK-NEXT: [[XOR_3:%.*]] = xor i32 [[TMP7]], [[TMP6]] |
| 124 | +; CHECK-NEXT: store i32 [[XOR_3]], i32* [[ARRAYIDX2_3]], align 4 |
| 125 | +; CHECK-NEXT: ret void |
| 126 | +; |
| 127 | +entry: |
| 128 | + %0 = load i32, i32* %counter, align 4 |
| 129 | + %1 = load i32, i32* %out_block, align 4 |
| 130 | + %xor = xor i32 %1, %0 |
| 131 | + store i32 %xor, i32* %out_block, align 4 |
| 132 | + %arrayidx.1 = getelementptr inbounds i32, i32* %counter, i64 1 |
| 133 | + %2 = load i32, i32* %arrayidx.1, align 4 |
| 134 | + %arrayidx2.1 = getelementptr inbounds i32, i32* %out_block, i64 1 |
| 135 | + %3 = load i32, i32* %arrayidx2.1, align 4 |
| 136 | + %xor.1 = xor i32 %3, %2 |
| 137 | + store i32 %xor.1, i32* %arrayidx2.1, align 4 |
| 138 | + %arrayidx.2 = getelementptr inbounds i32, i32* %counter, i64 2 |
| 139 | + %4 = load i32, i32* %arrayidx.2, align 4 |
| 140 | + %arrayidx2.2 = getelementptr inbounds i32, i32* %out_block, i64 2 |
| 141 | + %5 = load i32, i32* %arrayidx2.2, align 4 |
| 142 | + %xor.2 = xor i32 %5, %4 |
| 143 | + store i32 %xor.2, i32* %arrayidx2.2, align 4 |
| 144 | + %arrayidx.3 = getelementptr inbounds i32, i32* %counter, i64 3 |
| 145 | + %6 = load i32, i32* %arrayidx.3, align 4 |
| 146 | + %arrayidx2.3 = getelementptr inbounds i32, i32* %out_block, i64 3 |
| 147 | + %7 = load i32, i32* %arrayidx2.3, align 4 |
| 148 | + %xor.3 = xor i32 %7, %6 |
| 149 | + store i32 %xor.3, i32* %arrayidx2.3, align 4 |
| 150 | + ret void |
| 151 | +} |
| 152 | + |
| 153 | +define i32 @use_outside_version_bb(i32* %dst, i32* %src, i1 %c.1) { |
| 154 | +; CHECK-LABEL: @use_outside_version_bb( |
| 155 | +; CHECK-NEXT: entry: |
| 156 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4 |
| 157 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 158 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 159 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 160 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 161 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 162 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 163 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 164 | +; CHECK-NEXT: br label [[EXIT:%.*]] |
| 165 | +; CHECK: exit: |
| 166 | +; CHECK-NEXT: ret i32 [[R_0]] |
| 167 | +; |
| 168 | +entry: |
| 169 | + %src.0 = load i32, i32* %src, align 4 |
| 170 | + %r.0 = ashr i32 %src.0, 16 |
| 171 | + store i32 %r.0, i32* %dst, align 4 |
| 172 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 173 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 174 | + %r.1 = ashr i32 %src.1, 16 |
| 175 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 176 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 177 | + br label %exit |
| 178 | + |
| 179 | +exit: |
| 180 | + ret i32 %r.0 |
| 181 | +} |
| 182 | + |
| 183 | +define i32 @value_used_in_return(i32* %dst, i32* %src, i32 %x) { |
| 184 | +; CHECK-LABEL: @value_used_in_return( |
| 185 | +; CHECK-NEXT: entry: |
| 186 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4 |
| 187 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 188 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 189 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 190 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 191 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 192 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 193 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 194 | +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], 20 |
| 195 | +; CHECK-NEXT: ret i32 [[ADD]] |
| 196 | +; |
| 197 | +entry: |
| 198 | + %src.0 = load i32, i32* %src, align 4 |
| 199 | + %r.0 = ashr i32 %src.0, 16 |
| 200 | + store i32 %r.0, i32* %dst, align 4 |
| 201 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 202 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 203 | + %r.1 = ashr i32 %src.1, 16 |
| 204 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 205 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 206 | + %add = add i32 %x, 20 |
| 207 | + ret i32 %add |
| 208 | +} |
| 209 | +define i32 @needs_versioning2_cond_br(i32* %dst, i32* %src, i1 %c.1) { |
| 210 | +; CHECK-LABEL: @needs_versioning2_cond_br( |
| 211 | +; CHECK-NEXT: entry: |
| 212 | +; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]] |
| 213 | +; CHECK: then: |
| 214 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4 |
| 215 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 216 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 217 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 218 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 219 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 220 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 221 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 222 | +; CHECK-NEXT: ret i32 10 |
| 223 | +; CHECK: else: |
| 224 | +; CHECK-NEXT: ret i32 0 |
| 225 | +; |
| 226 | +entry: |
| 227 | + br i1 %c.1, label %then, label %else |
| 228 | + |
| 229 | +then: |
| 230 | + %src.0 = load i32, i32* %src, align 4 |
| 231 | + %r.0 = ashr i32 %src.0, 16 |
| 232 | + store i32 %r.0, i32* %dst, align 4 |
| 233 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 234 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 235 | + %r.1 = ashr i32 %src.1, 16 |
| 236 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 237 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 238 | + ret i32 10 |
| 239 | + |
| 240 | + |
| 241 | +else: |
| 242 | + ret i32 0 |
| 243 | +} |
| 244 | + |
| 245 | +define void @pointer_defined_in_bb(i32* %dst, i32** %src.p) { |
| 246 | +; CHECK-LABEL: @pointer_defined_in_bb( |
| 247 | +; CHECK-NEXT: entry: |
| 248 | +; CHECK-NEXT: [[SRC:%.*]] = load i32*, i32** [[SRC_P:%.*]], align 8 |
| 249 | +; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC]], align 4 |
| 250 | +; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16 |
| 251 | +; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4 |
| 252 | +; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1 |
| 253 | +; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4 |
| 254 | +; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16 |
| 255 | +; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1 |
| 256 | +; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4 |
| 257 | +; CHECK-NEXT: ret void |
| 258 | +; |
| 259 | +entry: |
| 260 | + %src = load i32*, i32** %src.p |
| 261 | + %src.0 = load i32, i32* %src, align 4 |
| 262 | + %r.0 = ashr i32 %src.0, 16 |
| 263 | + store i32 %r.0, i32* %dst, align 4 |
| 264 | + %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1 |
| 265 | + %src.1 = load i32, i32* %src.gep.1, align 4 |
| 266 | + %r.1 = ashr i32 %src.1, 16 |
| 267 | + %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1 |
| 268 | + store i32 %r.1, i32* %dst.gep.1, align 4 |
| 269 | + ret void |
| 270 | +} |
| 271 | + |
| 272 | +define void @clobber_same_underlying_object(i32* %this) { |
| 273 | +; CHECK-LABEL: @clobber_same_underlying_object( |
| 274 | +; CHECK-NEXT: entry: |
| 275 | +; CHECK-NEXT: [[P_3:%.*]] = getelementptr inbounds i32, i32* [[THIS:%.*]], i32 3 |
| 276 | +; CHECK-NEXT: store i32 10, i32* [[P_3]], align 8 |
| 277 | +; CHECK-NEXT: tail call void @clobber() |
| 278 | +; CHECK-NEXT: [[P_4:%.*]] = getelementptr inbounds i32, i32* [[THIS]], i32 4 |
| 279 | +; CHECK-NEXT: [[L2:%.*]] = load i32, i32* [[P_4]], align 8 |
| 280 | +; CHECK-NEXT: store i32 20, i32* [[P_4]], align 8 |
| 281 | +; CHECK-NEXT: ret void |
| 282 | +; |
| 283 | +entry: |
| 284 | + %p.3 = getelementptr inbounds i32, i32* %this, i32 3 |
| 285 | + store i32 10, i32* %p.3, align 8 |
| 286 | + tail call void @clobber() |
| 287 | + %p.4 = getelementptr inbounds i32, i32* %this, i32 4 |
| 288 | + %l2 = load i32, i32* %p.4, align 8 |
| 289 | + store i32 20, i32* %p.4, align 8 |
| 290 | + ret void |
| 291 | +} |
| 292 | + |
| 293 | +declare void @clobber() |
| 294 | + |
| 295 | +define void @slp_not_beneficial(i32* %A, i32* %B) { |
| 296 | +; CHECK-LABEL: @slp_not_beneficial( |
| 297 | +; CHECK-NEXT: bb: |
| 298 | +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 4 |
| 299 | +; CHECK-NEXT: store i32 0, i32* [[TMP]], align 8 |
| 300 | +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 5 |
| 301 | +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 4 |
| 302 | +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 8 |
| 303 | +; CHECK-NEXT: store i32 [[TMP5]], i32* [[TMP3]], align 8 |
| 304 | +; CHECK-NEXT: ret void |
| 305 | +; |
| 306 | +bb: |
| 307 | + %tmp = getelementptr inbounds i32, i32* %A, i32 4 |
| 308 | + store i32 0, i32* %tmp, align 8 |
| 309 | + %tmp3 = getelementptr inbounds i32, i32* %A, i32 5 |
| 310 | + %tmp4 = getelementptr inbounds i32, i32* %B, i32 4 |
| 311 | + %tmp5 = load i32, i32* %tmp4, align 8 |
| 312 | + store i32 %tmp5, i32* %tmp3, align 8 |
| 313 | + ret void |
| 314 | +} |
| 315 | + |
| 316 | +define void @widget(double* %ptr, double* %ptr.2) { |
| 317 | +; CHECK-LABEL: @widget( |
| 318 | +; CHECK-NEXT: bb1: |
| 319 | +; CHECK-NEXT: [[TMP3:%.*]] = load double, double* null, align 8 |
| 320 | +; CHECK-NEXT: [[TMP4:%.*]] = fmul double undef, [[TMP3]] |
| 321 | +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i32 0 |
| 322 | +; CHECK-NEXT: [[TMP6:%.*]] = load double, double* [[TMP5]], align 8 |
| 323 | +; CHECK-NEXT: [[TMP7:%.*]] = fadd double [[TMP6]], [[TMP4]] |
| 324 | +; CHECK-NEXT: store double [[TMP7]], double* [[TMP5]], align 8 |
| 325 | +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, double* [[PTR_2:%.*]], i64 0 |
| 326 | +; CHECK-NEXT: [[TMP9:%.*]] = load double, double* [[TMP8]], align 8 |
| 327 | +; CHECK-NEXT: [[TMP10:%.*]] = fmul double undef, [[TMP9]] |
| 328 | +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 1 |
| 329 | +; CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8 |
| 330 | +; CHECK-NEXT: [[TMP13:%.*]] = fadd double [[TMP12]], [[TMP10]] |
| 331 | +; CHECK-NEXT: store double [[TMP13]], double* [[TMP11]], align 8 |
| 332 | +; CHECK-NEXT: br label [[BB15:%.*]] |
| 333 | +; CHECK: bb15: |
| 334 | +; CHECK-NEXT: br label [[BB15]] |
| 335 | +; |
| 336 | +bb1: ; preds = %bb |
| 337 | + %tmp3 = load double, double* null, align 8 |
| 338 | + %tmp4 = fmul double undef, %tmp3 |
| 339 | + %tmp5 = getelementptr inbounds double, double* %ptr, i32 0 |
| 340 | + %tmp6 = load double, double* %tmp5, align 8 |
| 341 | + %tmp7 = fadd double %tmp6, %tmp4 |
| 342 | + store double %tmp7, double* %tmp5, align 8 |
| 343 | + %tmp8 = getelementptr inbounds double, double* %ptr.2, i64 0 |
| 344 | + %tmp9 = load double, double* %tmp8, align 8 |
| 345 | + %tmp10 = fmul double undef, %tmp9 |
| 346 | + %tmp11 = getelementptr inbounds double, double* %ptr, i32 1 |
| 347 | + %tmp12 = load double, double* %tmp11, align 8 |
| 348 | + %tmp13 = fadd double %tmp12, %tmp10 |
| 349 | + store double %tmp13, double* %tmp11, align 8 |
| 350 | + br label %bb15 |
| 351 | + |
| 352 | +bb15: ; preds = %bb15, %bb14 |
| 353 | + br label %bb15 |
| 354 | +} |
0 commit comments