Skip to content

Commit 1c4fedf

Browse files
committed
[LoopVectorize] Don't tail-fold for scalable VFs when there is no scalar tail
Currently in LoopVectorize we avoid tail-folding if we can prove the trip count is always a multiple of the maximum fixed-width VF. This works because we know the vectoriser only ever chooses a VF that is a power of 2. However, if we are also considering scalable VFs then we conservatively bail out of the optimisation because we don't know the value of vscale, which could be an odd or prime number, etc. This patch tries to enable the same optimisation for scalable VFs by asking if vscale is known to be a power of 2. If so, we can then query the maximum value of vscale and use the same logic as we do for fixed-width VFs. I've also added a new TTI hook called isVScaleKnownToBeAPowerOfTwo that does the same thing as the existing TargetLowering hook. Differential Revision: https://reviews.llvm.org/D146199
1 parent 8d3a09d commit 1c4fedf

File tree

11 files changed

+97
-50
lines changed

11 files changed

+97
-50
lines changed

llvm/include/llvm/Analysis/TargetTransformInfo.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,6 +1055,9 @@ class TargetTransformInfo {
10551055
/// \return the value of vscale to tune the cost model for.
10561056
std::optional<unsigned> getVScaleForTuning() const;
10571057

1058+
/// \return true if vscale is known to be a power of 2
1059+
bool isVScaleKnownToBeAPowerOfTwo() const;
1060+
10581061
/// \return True if the vectorization factor should be chosen to
10591062
/// make the vector of the smallest element type match the size of a
10601063
/// vector register. For wider element types, this could result in
@@ -1815,6 +1818,7 @@ class TargetTransformInfo::Concept {
18151818
virtual unsigned getMinVectorRegisterBitWidth() const = 0;
18161819
virtual std::optional<unsigned> getMaxVScale() const = 0;
18171820
virtual std::optional<unsigned> getVScaleForTuning() const = 0;
1821+
virtual bool isVScaleKnownToBeAPowerOfTwo() const = 0;
18181822
virtual bool
18191823
shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const = 0;
18201824
virtual ElementCount getMinimumVF(unsigned ElemWidth,
@@ -2360,6 +2364,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
23602364
std::optional<unsigned> getVScaleForTuning() const override {
23612365
return Impl.getVScaleForTuning();
23622366
}
2367+
bool isVScaleKnownToBeAPowerOfTwo() const override {
2368+
return Impl.isVScaleKnownToBeAPowerOfTwo();
2369+
}
23632370
bool shouldMaximizeVectorBandwidth(
23642371
TargetTransformInfo::RegisterKind K) const override {
23652372
return Impl.shouldMaximizeVectorBandwidth(K);

llvm/include/llvm/Analysis/TargetTransformInfoImpl.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -439,6 +439,7 @@ class TargetTransformInfoImplBase {
439439

440440
std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
441441
std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
442+
bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
442443

443444
bool
444445
shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const {

llvm/include/llvm/CodeGen/BasicTTIImpl.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -714,6 +714,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
714714

715715
std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
716716
std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
717+
bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
717718

718719
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
719720
/// are set if the demanded result elements need to be inserted and/or

llvm/lib/Analysis/TargetTransformInfo.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -680,6 +680,10 @@ std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
680680
return TTIImpl->getVScaleForTuning();
681681
}
682682

683+
bool TargetTransformInfo::isVScaleKnownToBeAPowerOfTwo() const {
684+
return TTIImpl->isVScaleKnownToBeAPowerOfTwo();
685+
}
686+
683687
bool TargetTransformInfo::shouldMaximizeVectorBandwidth(
684688
TargetTransformInfo::RegisterKind K) const {
685689
return TTIImpl->shouldMaximizeVectorBandwidth(K);

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6030,10 +6030,6 @@ bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {
60306030
return !Subtarget->useSVEForFixedLengthVectors();
60316031
}
60326032

6033-
bool AArch64TargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
6034-
return true;
6035-
}
6036-
60376033
bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
60386034
EVT VT, bool OverrideNEON) const {
60396035
if (!VT.isFixedLengthVector() || !VT.isSimple())

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -917,7 +917,7 @@ class AArch64TargetLowering : public TargetLowering {
917917
SDValue Chain, SDValue InFlag,
918918
SDValue PStateSM, bool Entry) const;
919919

920-
bool isVScaleKnownToBeAPowerOfTwo() const override;
920+
bool isVScaleKnownToBeAPowerOfTwo() const override { return true; }
921921

922922
// Normally SVE is only used for byte size vectors that do not fit within a
923923
// NEON vector. This changes when OverrideNEON is true, allowing SVE to be

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,8 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
131131
return ST->getVScaleForTuning();
132132
}
133133

134+
bool isVScaleKnownToBeAPowerOfTwo() const { return true; }
135+
134136
bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;
135137

136138
/// Try to return an estimate cost factor that can be used as a multiplier

llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,10 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
232232
return ST->is64Bit() && !ST->hasVInstructionsI64();
233233
}
234234

235+
bool isVScaleKnownToBeAPowerOfTwo() const {
236+
return TLI->isVScaleKnownToBeAPowerOfTwo();
237+
}
238+
235239
/// \returns How the target needs this vector-predicated operation to be
236240
/// transformed.
237241
TargetTransformInfo::VPLegalization

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5155,16 +5155,26 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
51555155
}
51565156

51575157
FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true);
5158+
51585159
// Avoid tail folding if the trip count is known to be a multiple of any VF
5159-
// we chose.
5160-
// FIXME: The condition below pessimises the case for fixed-width vectors,
5161-
// when scalable VFs are also candidates for vectorization.
5162-
if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5163-
ElementCount MaxFixedVF = MaxFactors.FixedVF;
5164-
assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5160+
// we choose.
5161+
std::optional<unsigned> MaxPowerOf2RuntimeVF =
5162+
MaxFactors.FixedVF.getFixedValue();
5163+
if (MaxFactors.ScalableVF) {
5164+
std::optional<unsigned> MaxVScale = getMaxVScale(*TheFunction, TTI);
5165+
if (MaxVScale && TTI.isVScaleKnownToBeAPowerOfTwo()) {
5166+
MaxPowerOf2RuntimeVF = std::max<unsigned>(
5167+
*MaxPowerOf2RuntimeVF,
5168+
*MaxVScale * MaxFactors.ScalableVF.getKnownMinValue());
5169+
} else
5170+
MaxPowerOf2RuntimeVF = std::nullopt; // Stick with tail-folding for now.
5171+
}
5172+
5173+
if (MaxPowerOf2RuntimeVF) {
5174+
assert((UserVF.isNonZero() || isPowerOf2_32(*MaxPowerOf2RuntimeVF)) &&
51655175
"MaxFixedVF must be a power of 2");
5166-
unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5167-
: MaxFixedVF.getFixedValue();
5176+
unsigned MaxVFtimesIC =
5177+
UserIC ? *MaxPowerOf2RuntimeVF * UserIC : *MaxPowerOf2RuntimeVF;
51685178
ScalarEvolution *SE = PSE.getSE();
51695179
const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
51705180
const SCEV *ExitCount = SE->getAddExpr(
Lines changed: 42 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,51 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
12
; RUN: opt -passes=loop-vectorize -force-target-instruction-cost=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s 2>&1 | FileCheck %s
23

3-
; This test currently fails when the LV calculates a maximums safe
4-
; distance for scalable vectors, because the code to eliminate the tail is
5-
; pessimistic when scalable vectors are considered. This will be addressed
6-
; in a future patch, at which point we should be able to un-XFAIL the
7-
; test. The expected output is to vectorize this loop without predication
8-
; (and thus have unpredicated vector store).
9-
; XFAIL: *
10-
11-
; CHECK: store <4 x i32>
12-
134
target triple = "aarch64"
145
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
156

167

178
define void @f1(ptr %A) #0 {
9+
; CHECK-LABEL: define void @f1
10+
; CHECK-SAME: (ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
11+
; CHECK-NEXT: entry:
12+
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
13+
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
14+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
15+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16+
; CHECK: vector.ph:
17+
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
18+
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
19+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
20+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
21+
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
22+
; CHECK: vector.body:
23+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
24+
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0
25+
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
26+
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 0
27+
; CHECK-NEXT: store <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), ptr [[TMP6]], align 4
28+
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
29+
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
30+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
31+
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
32+
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
33+
; CHECK: middle.block:
34+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
35+
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
36+
; CHECK: scalar.ph:
37+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
38+
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
39+
; CHECK: for.body:
40+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
41+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
42+
; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4
43+
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
44+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 1024
45+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
46+
; CHECK: exit:
47+
; CHECK-NEXT: ret void
48+
;
1849
entry:
1950
br label %for.body
2051

@@ -30,4 +61,4 @@ exit:
3061
ret void
3162
}
3263

33-
attributes #0 = { "target-features"="+sve" }
64+
attributes #0 = { "target-features"="+sve" vscale_range(1,16) }

llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll

Lines changed: 17 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -777,41 +777,32 @@ while.end.loopexit: ; preds = %while.body
777777
define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
778778
; CHECK-LABEL: @simple_memset_trip1024(
779779
; CHECK-NEXT: entry:
780-
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
781-
; CHECK: vector.ph:
782780
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
783781
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
782+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
783+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
784+
; CHECK: vector.ph:
784785
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
785786
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
786-
; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
787-
; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
788-
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
789-
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
790-
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
791-
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
792-
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 1024, [[TMP6]]
793-
; CHECK-NEXT: [[TMP8:%.*]] = icmp ugt i64 1024, [[TMP6]]
794-
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i64 [[TMP7]], i64 0
795-
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1024)
787+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
788+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
796789
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[VAL:%.*]], i64 0
797790
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
798791
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
799792
; CHECK: vector.body:
800793
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
801-
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
802-
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], 0
803-
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP10]]
804-
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP11]], i32 0
805-
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP12]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
806-
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
807-
; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
808-
; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4
809-
; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP14]]
810-
; CHECK-NEXT: [[TMP15:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
811-
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <vscale x 4 x i1> [[TMP15]], i32 0
812-
; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
794+
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX1]], 0
795+
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[TMP4]]
796+
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 0
797+
; CHECK-NEXT: store <vscale x 4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 4
798+
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
799+
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
800+
; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP8]]
801+
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]]
802+
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
813803
; CHECK: middle.block:
814-
; CHECK-NEXT: br i1 true, label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
804+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
805+
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]]
815806
; CHECK: scalar.ph:
816807
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
817808
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
@@ -846,4 +837,4 @@ while.end.loopexit: ; preds = %while.body
846837
!3 = distinct !{!3, !4}
847838
!4 = !{!"llvm.loop.vectorize.width", i32 4}
848839

849-
attributes #0 = { "target-features"="+sve" }
840+
attributes #0 = { "target-features"="+sve" vscale_range(1,16) }

0 commit comments

Comments
 (0)