Skip to content

Commit 47aaa99

Browse files
committed
[VectorCombine] allow peeking through GEPs when creating a vector load
This is an enhancement motivated by https://llvm.org/PR16739 (see D92858 for another). We can look through a GEP to find a base pointer that may be safe to use for a vector load. If so, then we shuffle (shift) the necessary vector element over to index 0. Alive2 proof based on 1 of the regression tests: https://alive2.llvm.org/ce/z/yPJLkh The vector translation is independent of endian (verify by changing to leading 'E' in the datalayout string). Differential Revision: https://reviews.llvm.org/D93229
1 parent 0336ff0 commit 47aaa99

File tree

2 files changed

+95
-32
lines changed

2 files changed

+95
-32
lines changed

llvm/lib/Transforms/Vectorize/VectorCombine.cpp

Lines changed: 47 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ static void replaceValue(Value &Old, Value &New) {
9393

9494
bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
9595
// Match insert into fixed vector of scalar value.
96+
// TODO: Handle non-zero insert index.
9697
auto *Ty = dyn_cast<FixedVectorType>(I.getType());
9798
Value *Scalar;
9899
if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
@@ -115,7 +116,6 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
115116
mustSuppressSpeculation(*Load))
116117
return false;
117118

118-
// TODO: Extend this to match GEP with constant offsets.
119119
const DataLayout &DL = I.getModule()->getDataLayout();
120120
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
121121
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
@@ -127,10 +127,13 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
127127
if (AS != SrcPtr->getType()->getPointerAddressSpace())
128128
SrcPtr = Load->getPointerOperand();
129129

130+
// We are potentially transforming byte-sized (8-bit) memory accesses, so make
131+
// sure we have all of our type-based constraints in place for this target.
130132
Type *ScalarTy = Scalar->getType();
131133
uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
132134
unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
133-
if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0)
135+
if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
136+
ScalarSize % 8 != 0)
134137
return false;
135138

136139
// Check safety of replacing the scalar load with a larger vector load.
@@ -139,12 +142,45 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
139142
// we may use a larger value based on alignment attributes.
140143
unsigned MinVecNumElts = MinVectorSize / ScalarSize;
141144
auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
142-
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
143-
return false;
145+
unsigned OffsetEltIndex = 0;
146+
Align Alignment = Load->getAlign();
147+
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
148+
// It is not safe to load directly from the pointer, but we can still peek
149+
// through gep offsets and check if it safe to load from a base address with
150+
// updated alignment. If it is, we can shuffle the element(s) into place
151+
// after loading.
152+
unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
153+
APInt Offset(OffsetBitWidth, 0);
154+
SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
155+
156+
// We want to shuffle the result down from a high element of a vector, so
157+
// the offset must be positive.
158+
if (Offset.isNegative())
159+
return false;
160+
161+
// The offset must be a multiple of the scalar element to shuffle cleanly
162+
// in the element's size.
163+
uint64_t ScalarSizeInBytes = ScalarSize / 8;
164+
if (Offset.urem(ScalarSizeInBytes) != 0)
165+
return false;
166+
167+
// If we load MinVecNumElts, will our target element still be loaded?
168+
OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
169+
if (OffsetEltIndex >= MinVecNumElts)
170+
return false;
171+
172+
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
173+
return false;
174+
175+
// Update alignment with offset value. Note that the offset could be negated
176+
// to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
177+
// negation does not change the result of the alignment calculation.
178+
Alignment = commonAlignment(Alignment, Offset.getZExtValue());
179+
}
144180

145181
// Original pattern: insertelt undef, load [free casts of] PtrOp, 0
146182
// Use the greater of the alignment on the load or its source pointer.
147-
Align Alignment = std::max(SrcPtr->getPointerAlignment(DL), Load->getAlign());
183+
Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
148184
Type *LoadTy = Load->getType();
149185
int OldCost = TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
150186
APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
@@ -153,6 +189,9 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
153189

154190
// New pattern: load VecPtr
155191
int NewCost = TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
192+
// Optionally, we are shuffling the loaded vector element(s) into place.
193+
if (OffsetEltIndex)
194+
NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy);
156195

157196
// We can aggressively convert to the vector form because the backend can
158197
// invert this transform if it does not result in a performance win.
@@ -168,12 +207,13 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
168207
// Set everything but element 0 to undef to prevent poison from propagating
169208
// from the extra loaded memory. This will also optionally shrink/grow the
170209
// vector from the loaded size to the output size.
171-
// We assume this operation has no cost in codegen.
210+
// We assume this operation has no cost in codegen if there was no offset.
172211
// Note that we could use freeze to avoid poison problems, but then we might
173212
// still need a shuffle to change the vector size.
174213
unsigned OutputNumElts = Ty->getNumElements();
175214
SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
176-
Mask[0] = 0;
215+
assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
216+
Mask[0] = OffsetEltIndex;
177217
VecLd = Builder.CreateShuffleVector(VecLd, Mask);
178218

179219
replaceValue(I, *VecLd);

llvm/test/Transforms/VectorCombine/X86/load.ll

Lines changed: 48 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2-
; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 | FileCheck %s
3-
; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s
2+
; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
3+
; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
44

55
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
66

@@ -269,36 +269,50 @@ define <8 x i16> @gep01_load_i16_insert_v8i16(<8 x i16>* align 16 dereferenceabl
269269
ret <8 x i16> %r
270270
}
271271

272-
; Negative test - can't safely load the offset vector, but could load+shuffle.
272+
; Can't safely load the offset vector, but can load+shuffle if it is profitable.
273273

274274
define <8 x i16> @gep01_load_i16_insert_v8i16_deref(<8 x i16>* align 16 dereferenceable(17) %p) {
275-
; CHECK-LABEL: @gep01_load_i16_insert_v8i16_deref(
276-
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 0, i64 1
277-
; CHECK-NEXT: [[S:%.*]] = load i16, i16* [[GEP]], align 2
278-
; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
279-
; CHECK-NEXT: ret <8 x i16> [[R]]
275+
; SSE2-LABEL: @gep01_load_i16_insert_v8i16_deref(
276+
; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 0, i64 1
277+
; SSE2-NEXT: [[S:%.*]] = load i16, i16* [[GEP]], align 2
278+
; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
279+
; SSE2-NEXT: ret <8 x i16> [[R]]
280+
;
281+
; AVX2-LABEL: @gep01_load_i16_insert_v8i16_deref(
282+
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[P:%.*]], align 16
283+
; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
284+
; AVX2-NEXT: ret <8 x i16> [[R]]
280285
;
281286
%gep = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i64 0, i64 1
282287
%s = load i16, i16* %gep, align 2
283288
%r = insertelement <8 x i16> undef, i16 %s, i64 0
284289
ret <8 x i16> %r
285290
}
286291

287-
; TODO: Verify that alignment of the new load is not over-specified.
292+
; Verify that alignment of the new load is not over-specified.
288293

289294
define <8 x i16> @gep01_load_i16_insert_v8i16_deref_minalign(<8 x i16>* align 2 dereferenceable(16) %p) {
290-
; CHECK-LABEL: @gep01_load_i16_insert_v8i16_deref_minalign(
291-
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 0, i64 1
292-
; CHECK-NEXT: [[S:%.*]] = load i16, i16* [[GEP]], align 8
293-
; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
294-
; CHECK-NEXT: ret <8 x i16> [[R]]
295+
; SSE2-LABEL: @gep01_load_i16_insert_v8i16_deref_minalign(
296+
; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 0, i64 1
297+
; SSE2-NEXT: [[S:%.*]] = load i16, i16* [[GEP]], align 8
298+
; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
299+
; SSE2-NEXT: ret <8 x i16> [[R]]
300+
;
301+
; AVX2-LABEL: @gep01_load_i16_insert_v8i16_deref_minalign(
302+
; AVX2-NEXT: [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[P:%.*]], align 2
303+
; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
304+
; AVX2-NEXT: ret <8 x i16> [[R]]
295305
;
296306
%gep = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i64 0, i64 1
297307
%s = load i16, i16* %gep, align 8
298308
%r = insertelement <8 x i16> undef, i16 %s, i64 0
299309
ret <8 x i16> %r
300310
}
301311

312+
; Negative test - if we are shuffling a load from the base pointer, the address offset
313+
; must be a multiple of element size.
314+
; TODO: Could bitcast around this limitation.
315+
302316
define <4 x i32> @gep01_bitcast_load_i32_insert_v4i32(<16 x i8>* align 1 dereferenceable(16) %p) {
303317
; CHECK-LABEL: @gep01_bitcast_load_i32_insert_v4i32(
304318
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[P:%.*]], i64 0, i64 1
@@ -316,10 +330,9 @@ define <4 x i32> @gep01_bitcast_load_i32_insert_v4i32(<16 x i8>* align 1 derefer
316330

317331
define <4 x i32> @gep012_bitcast_load_i32_insert_v4i32(<16 x i8>* align 1 dereferenceable(20) %p) {
318332
; CHECK-LABEL: @gep012_bitcast_load_i32_insert_v4i32(
319-
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[P:%.*]], i64 0, i64 12
320-
; CHECK-NEXT: [[B:%.*]] = bitcast i8* [[GEP]] to i32*
321-
; CHECK-NEXT: [[S:%.*]] = load i32, i32* [[B]], align 1
322-
; CHECK-NEXT: [[R:%.*]] = insertelement <4 x i32> undef, i32 [[S]], i64 0
333+
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i8>* [[P:%.*]] to <4 x i32>*
334+
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 1
335+
; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef>
323336
; CHECK-NEXT: ret <4 x i32> [[R]]
324337
;
325338
%gep = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i64 0, i64 12
@@ -329,6 +342,10 @@ define <4 x i32> @gep012_bitcast_load_i32_insert_v4i32(<16 x i8>* align 1 derefe
329342
ret <4 x i32> %r
330343
}
331344

345+
; Negative test - if we are shuffling a load from the base pointer, the address offset
346+
; must be a multiple of element size and the offset must be low enough to fit in the vector
347+
; (bitcasting would not help this case).
348+
332349
define <4 x i32> @gep013_bitcast_load_i32_insert_v4i32(<16 x i8>* align 1 dereferenceable(20) %p) {
333350
; CHECK-LABEL: @gep013_bitcast_load_i32_insert_v4i32(
334351
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[P:%.*]], i64 0, i64 13
@@ -608,15 +625,21 @@ define <8 x i32> @load_v1i32_extract_insert_v8i32_extra_use(<1 x i32>* align 16
608625
ret <8 x i32> %r
609626
}
610627

611-
; TODO: Can't safely load the offset vector, but can load+shuffle if it is profitable.
628+
; Can't safely load the offset vector, but can load+shuffle if it is profitable.
612629

613630
define <8 x i16> @gep1_load_v2i16_extract_insert_v8i16(<2 x i16>* align 1 dereferenceable(16) %p) {
614-
; CHECK-LABEL: @gep1_load_v2i16_extract_insert_v8i16(
615-
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <2 x i16>, <2 x i16>* [[P:%.*]], i64 1
616-
; CHECK-NEXT: [[L:%.*]] = load <2 x i16>, <2 x i16>* [[GEP]], align 8
617-
; CHECK-NEXT: [[S:%.*]] = extractelement <2 x i16> [[L]], i32 0
618-
; CHECK-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
619-
; CHECK-NEXT: ret <8 x i16> [[R]]
631+
; SSE2-LABEL: @gep1_load_v2i16_extract_insert_v8i16(
632+
; SSE2-NEXT: [[GEP:%.*]] = getelementptr inbounds <2 x i16>, <2 x i16>* [[P:%.*]], i64 1
633+
; SSE2-NEXT: [[L:%.*]] = load <2 x i16>, <2 x i16>* [[GEP]], align 8
634+
; SSE2-NEXT: [[S:%.*]] = extractelement <2 x i16> [[L]], i32 0
635+
; SSE2-NEXT: [[R:%.*]] = insertelement <8 x i16> undef, i16 [[S]], i64 0
636+
; SSE2-NEXT: ret <8 x i16> [[R]]
637+
;
638+
; AVX2-LABEL: @gep1_load_v2i16_extract_insert_v8i16(
639+
; AVX2-NEXT: [[TMP1:%.*]] = bitcast <2 x i16>* [[P:%.*]] to <8 x i16>*
640+
; AVX2-NEXT: [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* [[TMP1]], align 4
641+
; AVX2-NEXT: [[R:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> undef, <8 x i32> <i32 2, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
642+
; AVX2-NEXT: ret <8 x i16> [[R]]
620643
;
621644
%gep = getelementptr inbounds <2 x i16>, <2 x i16>* %p, i64 1
622645
%l = load <2 x i16>, <2 x i16>* %gep, align 8

0 commit comments

Comments
 (0)