Skip to content

Commit c3c1c6a

Browse files
committed
[LV][AArch64] Prefer Fixed over Scalable if cost-model is equal (Neoverse V2)
For the Neoverse V2 we would like to prefer fixed width over scalable vectorisation if the cost-model assigns an equal cost to both for certain loops. This improves 7 kernels from TSVC-2 and several production kernels by about 2x, and does not affect SPEC21017 INT and FP. This also adds a new TTI hook that can steer the loop vectorizater to preferring fixed width vectorization, which can be set per CPU. For now, this is only enabled for the Neoverse V2. There are 3 reasons why preferring NEON might be better in the case the cost-model is a tie and the SVE vector size is the same as NEON (128-bit): architectural reasons, micro-architecture reasons, and SVE codegen reasons. The latter will be improved over time, so the more important reasons are the former two. I.e., (micro) architecture reason is the use of LPD/STP instructions which are not available in SVE2 and it avoids predication. For what it is worth: this codegen strategy to generate more NEON is inline with GCC's codegen strategy, which is actually even more aggressive in generating NEON when no predication is required. We could be smarter about the decision making, but this seems to be a first good step in the right direction, and we can always revise this later (for example make the target hook more general).
1 parent 35ddc17 commit c3c1c6a

File tree

8 files changed

+113
-1
lines changed

8 files changed

+113
-1
lines changed

llvm/include/llvm/Analysis/TargetTransformInfo.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1674,6 +1674,11 @@ class TargetTransformInfo {
16741674
false; ///< If op is an fp min/max, whether NaNs may be present.
16751675
};
16761676

1677+
/// \returns True if the targets prefers fixed width vectorization if the
1678+
/// loop vectorizer's cost-model assigns an equal cost to the fixed and
1679+
/// scalable version of the vectorized loop.
1680+
bool preferFixedOverScalableIfEqualCost() const;
1681+
16771682
/// \returns True if the target prefers reductions in loop.
16781683
bool preferInLoopReduction(unsigned Opcode, Type *Ty,
16791684
ReductionFlags Flags) const;
@@ -2143,6 +2148,7 @@ class TargetTransformInfo::Concept {
21432148
virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
21442149
unsigned ChainSizeInBytes,
21452150
VectorType *VecTy) const = 0;
2151+
virtual bool preferFixedOverScalableIfEqualCost() const = 0;
21462152
virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
21472153
ReductionFlags) const = 0;
21482154
virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
@@ -2873,6 +2879,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
28732879
VectorType *VecTy) const override {
28742880
return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
28752881
}
2882+
bool preferFixedOverScalableIfEqualCost() const override {
2883+
return Impl.preferFixedOverScalableIfEqualCost();
2884+
}
28762885
bool preferInLoopReduction(unsigned Opcode, Type *Ty,
28772886
ReductionFlags Flags) const override {
28782887
return Impl.preferInLoopReduction(Opcode, Ty, Flags);

llvm/include/llvm/Analysis/TargetTransformInfoImpl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -913,6 +913,8 @@ class TargetTransformInfoImplBase {
913913
return VF;
914914
}
915915

916+
bool preferFixedOverScalableIfEqualCost() const { return false; }
917+
916918
bool preferInLoopReduction(unsigned Opcode, Type *Ty,
917919
TTI::ReductionFlags Flags) const {
918920
return false;

llvm/lib/Analysis/TargetTransformInfo.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1282,6 +1282,10 @@ unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
12821282
return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
12831283
}
12841284

1285+
bool TargetTransformInfo::preferFixedOverScalableIfEqualCost() const {
1286+
return TTIImpl->preferFixedOverScalableIfEqualCost();
1287+
}
1288+
12851289
bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
12861290
ReductionFlags Flags) const {
12871291
return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);

llvm/lib/Target/AArch64/AArch64Features.td

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,10 @@ def FeatureExperimentalZeroingPseudos
244244
def FeatureUseScalarIncVL : SubtargetFeature<"use-scalar-inc-vl",
245245
"UseScalarIncVL", "true", "Prefer inc/dec over add+cnt">;
246246

247+
def FeatureUseFixedOverScalableIfEqualCost: SubtargetFeature<"use-fixed-over-scalable-if-equal-cost",
248+
"UseFixedOverScalableIfEqualCost", "true",
249+
"Prefer fixed width loop vectorization over scalable if the cost-model assigns equal costs">;
250+
247251
def FeatureBF16 : Extension<"bf16", "BF16",
248252
"Enable BFloat16 Extension (FEAT_BF16)", [],
249253
"FEAT_BF16", "+bf16", 280>;

llvm/lib/Target/AArch64/AArch64Processors.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -489,6 +489,7 @@ def TuneNeoverseV2 : SubtargetFeature<"neoversev2", "ARMProcFamily", "NeoverseV2
489489
FeatureALULSLFast,
490490
FeaturePostRAScheduler,
491491
FeatureEnableSelectOptimize,
492+
FeatureUseFixedOverScalableIfEqualCost,
492493
FeaturePredictableSelectIsExpensive]>;
493494

494495
def TuneNeoverseV3 : SubtargetFeature<"neoversev3", "ARMProcFamily", "NeoverseV3",

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,10 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
371371
return TailFoldingStyle::DataWithoutLaneMask;
372372
}
373373

374+
bool preferFixedOverScalableIfEqualCost() const {
375+
return ST->useFixedOverScalableIfEqualCost();
376+
}
377+
374378
bool preferPredicateOverEpilogue(TailFoldingInfo *TFI);
375379

376380
bool supportsScalableVectors() const { return ST->hasSVE(); }

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4780,7 +4780,9 @@ bool LoopVectorizationPlanner::isMoreProfitable(
47804780
// Assume vscale may be larger than 1 (or the value being tuned for),
47814781
// so that scalable vectorization is slightly favorable over fixed-width
47824782
// vectorization.
4783-
bool PreferScalable = A.Width.isScalable() && !B.Width.isScalable();
4783+
bool PreferScalable = !TTI.preferFixedOverScalableIfEqualCost() &&
4784+
A.Width.isScalable() && !B.Width.isScalable();
4785+
47844786
auto CmpFn = [PreferScalable](const InstructionCost &LHS,
47854787
const InstructionCost &RHS) {
47864788
return PreferScalable ? LHS <= RHS : LHS < RHS;
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
; RUN: opt -S < %s -passes=loop-vectorize -force-target-instruction-cost=1 | FileCheck %s
2+
3+
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
4+
target triple = "aarch64-unknown-linux-gnu"
5+
6+
@a = dso_local local_unnamed_addr global [32000 x float] zeroinitializer, align 64
7+
@b = dso_local local_unnamed_addr global [32000 x float] zeroinitializer, align 64
8+
9+
define void @NeoverseV2() #0 {
10+
; CHECK-LABEL: define void @NeoverseV2(
11+
; CHECK: store <4 x float>
12+
;
13+
entry:
14+
br label %for.body
15+
16+
for.cond.cleanup:
17+
ret void
18+
19+
for.body:
20+
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
21+
%arrayidx = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv
22+
%0 = load float, ptr %arrayidx, align 4
23+
%arrayidx2 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %indvars.iv
24+
%1 = load float, ptr %arrayidx2, align 4
25+
%add = fadd fast float %1, %0
26+
%2 = add nuw nsw i64 %indvars.iv, 16000
27+
%arrayidx5 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %2
28+
store float %add, ptr %arrayidx5, align 4
29+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
30+
%exitcond.not = icmp eq i64 %indvars.iv.next, 16000
31+
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
32+
}
33+
34+
define void @GenericCPU() #1 {
35+
; CHECK-LABEL: define void @GenericCPU(
36+
; CHECK: store <vscale x 4 x float>
37+
;
38+
entry:
39+
br label %for.body
40+
41+
for.cond.cleanup:
42+
ret void
43+
44+
for.body:
45+
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
46+
%arrayidx = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv
47+
%0 = load float, ptr %arrayidx, align 4
48+
%arrayidx2 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %indvars.iv
49+
%1 = load float, ptr %arrayidx2, align 4
50+
%add = fadd fast float %1, %0
51+
%2 = add nuw nsw i64 %indvars.iv, 16000
52+
%arrayidx5 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %2
53+
store float %add, ptr %arrayidx5, align 4
54+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
55+
%exitcond.not = icmp eq i64 %indvars.iv.next, 16000
56+
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
57+
}
58+
59+
define void @GenericCPUPreferFixed() #2 {
60+
; CHECK-LABEL: define void @GenericCPUPreferFixed(
61+
; CHECK: store <vscale x 4 x float>
62+
;
63+
entry:
64+
br label %for.body
65+
66+
for.cond.cleanup:
67+
ret void
68+
69+
for.body:
70+
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
71+
%arrayidx = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %indvars.iv
72+
%0 = load float, ptr %arrayidx, align 4
73+
%arrayidx2 = getelementptr inbounds [32000 x float], ptr @b, i64 0, i64 %indvars.iv
74+
%1 = load float, ptr %arrayidx2, align 4
75+
%add = fadd fast float %1, %0
76+
%2 = add nuw nsw i64 %indvars.iv, 16000
77+
%arrayidx5 = getelementptr inbounds [32000 x float], ptr @a, i64 0, i64 %2
78+
store float %add, ptr %arrayidx5, align 4
79+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
80+
%exitcond.not = icmp eq i64 %indvars.iv.next, 16000
81+
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
82+
}
83+
84+
attributes #0 = { vscale_range(1,16) "target-cpu"="neoverse-v2" "target-features"="+sve,+sve2,+v9a" }
85+
attributes #1 = { vscale_range(1,16) "target-cpu"="generic" "target-features"="+sve,+v9a" }
86+
attributes #2 = { vscale_range(1,16) "target-cpu"="generic" "target-features"="+sve,+v9a,+use-fixed-over-scalable-if-equal-cost" }

0 commit comments

Comments
 (0)