Skip to content

[SLPVectorizer, TTI, X86, SystemZ] Move X86 specific handling into X86TTIImpl. #137830

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions llvm/include/llvm/Analysis/TargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -941,6 +941,7 @@ class TargetTransformInfo {
const APInt &DemandedElts,
bool Insert, bool Extract,
TTI::TargetCostKind CostKind,
bool ForPoisonSrc = true,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It'd be useful if you could include in the doxygen comment what ForPoisonSrc means

ArrayRef<Value *> VL = {}) const;

/// Estimate the overhead of scalarizing an instructions unique
Expand Down
3 changes: 2 additions & 1 deletion llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,8 @@ class TargetTransformInfoImplBase {

virtual InstructionCost getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const {
return 0;
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/include/llvm/CodeGen/BasicTTIImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -871,7 +871,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
/// extracted from vectors.
InstructionCost getScalarizationOverhead(
VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override {
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override {
/// FIXME: a bitfield is not a reasonable abstraction for talking about
/// which elements are needed from a scalable vector
if (isa<ScalableVectorType>(InTy))
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Analysis/TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -630,9 +630,10 @@ bool TargetTransformInfo::isTargetIntrinsicWithStructReturnOverloadAtField(

InstructionCost TargetTransformInfo::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc,
ArrayRef<Value *> VL) const {
return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
CostKind, VL);
CostKind, ForPoisonSrc, VL);
}

InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3854,7 +3854,8 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I,

InstructionCost AArch64TTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc,
ArrayRef<Value *> VL) const {
if (isa<ScalableVectorType>(Ty))
return InstructionCost::getInvalid();
if (Ty->getElementType()->isFloatingPointTy())
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,8 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {

InstructionCost getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override;
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override;

/// Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {

InstructionCost getScalarizationOverhead(
VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override {
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override {
if (!InTy->getElementCount().isFixed())
return InstructionCost::getInvalid();

Expand Down Expand Up @@ -141,7 +142,8 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
Insert = false;
}
return Cost + BaseT::getScalarizationOverhead(InTy, DemandedElts, Insert,
Extract, CostKind, VL);
Extract, CostKind,
ForPoisonSrc, VL);
}

void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -860,7 +860,8 @@ static unsigned isM1OrSmaller(MVT VT) {

InstructionCost RISCVTTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc,
ArrayRef<Value *> VL) const {
if (isa<ScalableVectorType>(Ty))
return InstructionCost::getInvalid();

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {

InstructionCost getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override;
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override;

InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,8 @@ static bool isFreeEltLoad(Value *Op) {

InstructionCost SystemZTTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc,
ArrayRef<Value *> VL) const {
unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
InstructionCost Cost = 0;

Expand All @@ -517,7 +518,7 @@ InstructionCost SystemZTTIImpl::getScalarizationOverhead(
}

Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
CostKind, VL);
CostKind, ForPoisonSrc, VL);
return Cost;
}

Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
bool LSRWithInstrQueries() const override { return true; }
InstructionCost getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override;
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override;
bool supportsEfficientVectorElementLoadStore() const override { return true; }
bool enableInterleavedAccessVectorization() const override { return true; }

Expand Down
22 changes: 20 additions & 2 deletions llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4916,7 +4916,8 @@ InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,

InstructionCost X86TTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
TTI::TargetCostKind CostKind, bool ForPoisonSrc,
ArrayRef<Value *> VL) const {
assert(DemandedElts.getBitWidth() ==
cast<FixedVectorType>(Ty)->getNumElements() &&
"Vector size mismatch");
Expand All @@ -4935,7 +4936,24 @@ InstructionCost X86TTIImpl::getScalarizationOverhead(
assert(NumLegalVectors >= 0 && "Negative cost!");

// For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
// cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
// cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. SLPVectorizer has
// a special heuristic regarding poison input which is passed here in
// ForPoisonSrc.
if (Insert && !ForPoisonSrc) {
// This is nearly identical to BaseT::getScalarizationOverhead(), except
// it is passing nullptr to getVectorInstrCost() for Op0 (instead of
// Constant::getNullValue()), which makes the X86TTIImpl
// getVectorInstrCost() return 0 instead of 1.
for (unsigned I : seq(DemandedElts.getBitWidth())) {
if (!DemandedElts[I])
continue;
Cost += getVectorInstrCost(Instruction::InsertElement, Ty, CostKind, I,
Constant::getNullValue(Ty),
VL.empty() ? nullptr : VL[I]);
}
return Cost;
}

if (Insert) {
if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
(MScalarTy.isInteger() && ST->hasSSE41()) ||
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/X86/X86TargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
Value *Op1) const override;
InstructionCost getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL = {}) const override;
TTI::TargetCostKind CostKind, bool ForPoisonSrc = true,
ArrayRef<Value *> VL = {}) const override;
InstructionCost
getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
const APInt &DemandedDstElts,
Expand Down
22 changes: 2 additions & 20 deletions llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5738,26 +5738,8 @@ getScalarizationOverhead(const TargetTransformInfo &TTI, Type *ScalarTy,
}
return Cost;
}
APInt NewDemandedElts = DemandedElts;
InstructionCost Cost = 0;
if (!ForPoisonSrc && Insert) {
// Handle insert into non-poison vector.
// TODO: Need to teach getScalarizationOverhead about insert elements into
// non-poison input vector to better handle such cases. Currently, it is
// very conservative and may "pessimize" the vectorization.
for (unsigned I : seq(DemandedElts.getBitWidth())) {
if (!DemandedElts[I])
continue;
Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, CostKind,
I, Constant::getNullValue(Ty),
VL.empty() ? nullptr : VL[I]);
}
NewDemandedElts.clearAllBits();
} else if (!NewDemandedElts.isZero()) {
Cost += TTI.getScalarizationOverhead(Ty, NewDemandedElts, Insert, Extract,
CostKind, VL);
}
return Cost;
return TTI.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
CostKind, ForPoisonSrc, VL);
}

/// This is similar to TargetTransformInfo::getVectorInstrCost, but if ScalarTy
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,13 +114,13 @@ define void @fun2(ptr %0, ptr %Dst) {
; CHECK: [[BB4]]:
; CHECK-NEXT: ret void
; CHECK: [[BB5]]:
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 24
; CHECK-NEXT: store i64 [[TMP2]], ptr [[TMP6]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 16
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> <i64 0, i64 poison>, i64 [[TMP2]], i32 1
; CHECK-NEXT: store <2 x i64> [[TMP8]], ptr [[TMP7]], align 8
; CHECK-NEXT: store i64 0, ptr [[TMP7]], align 8
; CHECK-NEXT: br label %[[BB4]]
;
; Looks like there is bug in TTI, where insertion into index 1 is free, while insertion in to index 0 is 1.
; REMARK: Function: fun2
; REMARK-NOT: Function: fun2

%3 = load i64, ptr %0, align 8
%4 = icmp eq i64 %3, 0
Expand Down
42 changes: 22 additions & 20 deletions llvm/test/Transforms/SLPVectorizer/full-overlap-non-schedulable.ll
Original file line number Diff line number Diff line change
@@ -1,35 +1,37 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S --passes=slp-vectorizer < %s | FileCheck %s
; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx < %s \
; RUN: | FileCheck %s
; REQUIRES: x86-registered-target

define void @test(ptr %p1, ptr %0, i32 %1, i1 %c1, ptr %p2) {
; CHECK-LABEL: define void @test(
; CHECK-SAME: ptr [[P1:%.*]], ptr [[TMP0:%.*]], i32 [[TMP1:%.*]], i1 [[C1:%.*]], ptr [[P2:%.*]]) {
; CHECK-SAME: ptr [[P1:%.*]], ptr [[TMP0:%.*]], i32 [[TMP1:%.*]], i1 [[C1:%.*]], ptr [[P2:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TOP:.*:]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i64 8
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP0]], i64 12
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP0]], i64 12
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP0]], i64 16
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0]], i64 20
; CHECK-NEXT: br i1 [[C1]], label %[[L42:.*]], label %[[L41:.*]]
; CHECK: [[L41]]:
; CHECK-NEXT: [[DOTNOT276:%.*]] = icmp eq ptr [[TMP2]], null
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[DOTNOT276]], i32 0, i32 [[TMP10]]
; CHECK-NEXT: [[DOTNOT277:%.*]] = icmp eq ptr [[TMP12]], null
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[DOTNOT276]], i32 0, i32 [[TMP6]]
; CHECK-NEXT: [[DOTNOT277:%.*]] = icmp eq ptr [[TMP3]], null
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP9:%.*]] = select i1 [[DOTNOT277]], i32 0, i32 [[TMP8]]
; CHECK-NEXT: [[DOTNOT278:%.*]] = icmp eq ptr [[TMP4]], null
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[DOTNOT278]], i32 0, i32 [[TMP15]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[DOTNOT278]], i32 0, i32 [[TMP10]]
; CHECK-NEXT: [[DOTNOT279:%.*]] = icmp eq ptr [[TMP5]], null
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP25:%.*]] = select i1 [[DOTNOT279]], i32 0, i32 [[TMP20]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = select i1 [[DOTNOT279]], i32 0, i32 [[TMP12]]
; CHECK-NEXT: br label %[[L112:.*]]
; CHECK: [[L42]]:
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[DOTNOT280:%.*]] = icmp eq i32 [[TMP14]], 0
; CHECK-NEXT: br i1 [[DOTNOT280]], label %[[L112]], label %[[L47:.*]]
; CHECK: [[L47]]:
; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[DOTNOT282:%.*]] = icmp eq ptr [[TMP4]], null
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP4]], align 4
; CHECK-NEXT: [[TMP17:%.*]] = select i1 [[DOTNOT282]], i32 0, i32 [[TMP16]]
Expand All @@ -38,14 +40,14 @@ define void @test(ptr %p1, ptr %0, i32 %1, i1 %c1, ptr %p2) {
; CHECK-NEXT: [[TMP19:%.*]] = select i1 [[DOTNOT283]], i32 0, i32 [[TMP18]]
; CHECK-NEXT: br label %[[L112]]
; CHECK: [[L112]]:
; CHECK-NEXT: [[TMP24:%.*]] = phi i32 [ [[TMP19]], %[[L47]] ], [ [[TMP25]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: [[TMP23:%.*]] = phi i32 [ [[TMP17]], %[[L47]] ], [ [[TMP11]], %[[L41]] ], [ [[TMP1]], %[[L42]] ]
; CHECK-NEXT: [[TMP22:%.*]] = phi i32 [ [[TMP13]], %[[L47]] ], [ [[TMP9]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: [[TMP21:%.*]] = phi i32 [ 0, %[[L47]] ], [ [[TMP7]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: store i32 [[TMP21]], ptr [[P2]], align 4
; CHECK-NEXT: store i32 [[TMP22]], ptr [[P1]], align 4
; CHECK-NEXT: store i32 [[TMP23]], ptr [[P2]], align 4
; CHECK-NEXT: store i32 [[TMP24]], ptr [[P1]], align 4
; CHECK-NEXT: [[VALUE_PHI13336:%.*]] = phi i32 [ [[TMP19]], %[[L47]] ], [ [[TMP13]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: [[VALUE_PHI12335:%.*]] = phi i32 [ [[TMP17]], %[[L47]] ], [ [[TMP11]], %[[L41]] ], [ [[TMP1]], %[[L42]] ]
; CHECK-NEXT: [[VALUE_PHI11334:%.*]] = phi i32 [ [[TMP15]], %[[L47]] ], [ [[TMP9]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: [[VALUE_PHI10333:%.*]] = phi i32 [ 0, %[[L47]] ], [ [[TMP7]], %[[L41]] ], [ 0, %[[L42]] ]
; CHECK-NEXT: store i32 [[VALUE_PHI10333]], ptr [[P2]], align 4
; CHECK-NEXT: store i32 [[VALUE_PHI11334]], ptr [[P1]], align 4
; CHECK-NEXT: store i32 [[VALUE_PHI12335]], ptr [[P2]], align 4
; CHECK-NEXT: store i32 [[VALUE_PHI13336]], ptr [[P1]], align 4
; CHECK-NEXT: ret void
;
top:
Expand Down