Skip to content

[VectorCombine] Add free concats to shuffleToIdentity. #94954

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
143 changes: 102 additions & 41 deletions llvm/lib/Transforms/Vectorize/VectorCombine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1703,23 +1703,73 @@ generateInstLaneVectorFromOperand(ArrayRef<InstLane> Item, int Op) {
return NItem;
}

/// Detect concat of multiple values into a vector
static bool isFreeConcat(ArrayRef<InstLane> Item,
const TargetTransformInfo &TTI) {
auto *Ty = cast<FixedVectorType>(Item.front().first->get()->getType());
unsigned NumElts = Ty->getNumElements();
if (Item.size() == NumElts || NumElts == 1 || Item.size() % NumElts != 0)
return false;

// Check that the concat is free, usually meaning that the type will be split
// during legalization.
SmallVector<int, 16> ConcatMask(NumElts * 2);
std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
if (TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, Ty, ConcatMask,
TTI::TCK_RecipThroughput) != 0)
return false;

unsigned NumSlices = Item.size() / NumElts;
// Currently we generate a tree of shuffles for the concats, which limits us
// to a power2.
if (!isPowerOf2_32(NumSlices))
return false;
for (unsigned Slice = 0; Slice < NumSlices; ++Slice) {
Use *SliceV = Item[Slice * NumElts].first;
if (!SliceV || SliceV->get()->getType() != Ty)
return false;
for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
auto [V, Lane] = Item[Slice * NumElts + Elt];
if (Lane != static_cast<int>(Elt) || SliceV->get() != V->get())
return false;
}
}
return true;
}

static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
const SmallPtrSet<Use *, 4> &IdentityLeafs,
const SmallPtrSet<Use *, 4> &SplatLeafs,
const SmallPtrSet<Use *, 4> &ConcatLeafs,
IRBuilder<> &Builder) {
auto [FrontU, FrontLane] = Item.front();

if (IdentityLeafs.contains(FrontU)) {
return FrontU->get();
}
if (SplatLeafs.contains(FrontU)) {
if (auto *ILI = dyn_cast<Instruction>(FrontU))
Builder.SetInsertPoint(*ILI->getInsertionPointAfterDef());
else if (auto *Arg = dyn_cast<Argument>(FrontU))
Builder.SetInsertPointPastAllocas(Arg->getParent());
SmallVector<int, 16> Mask(Ty->getNumElements(), FrontLane);
return Builder.CreateShuffleVector(FrontU->get(), Mask);
}
if (ConcatLeafs.contains(FrontU)) {
unsigned NumElts =
cast<FixedVectorType>(FrontU->get()->getType())->getNumElements();
SmallVector<Value *> Values(Item.size() / NumElts, nullptr);
for (unsigned S = 0; S < Values.size(); ++S)
Values[S] = Item[S * NumElts].first->get();

while (Values.size() > 1) {
NumElts *= 2;
SmallVector<int, 16> Mask(NumElts, 0);
std::iota(Mask.begin(), Mask.end(), 0);
SmallVector<Value *> NewValues(Values.size() / 2, nullptr);
for (unsigned S = 0; S < NewValues.size(); ++S)
NewValues[S] =
Builder.CreateShuffleVector(Values[S * 2], Values[S * 2 + 1], Mask);
Values = NewValues;
}
return Values[0];
}

auto *I = cast<Instruction>(FrontU->get());
auto *II = dyn_cast<IntrinsicInst>(I);
Expand All @@ -1730,16 +1780,16 @@ static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
Ops[Idx] = II->getOperand(Idx);
continue;
}
Ops[Idx] = generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx),
Ty, IdentityLeafs, SplatLeafs, Builder);
Ops[Idx] =
generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx), Ty,
IdentityLeafs, SplatLeafs, ConcatLeafs, Builder);
}

SmallVector<Value *, 8> ValueList;
for (const auto &Lane : Item)
if (Lane.first)
ValueList.push_back(Lane.first->get());

Builder.SetInsertPoint(I);
Type *DstTy =
FixedVectorType::get(I->getType()->getScalarType(), Ty->getNumElements());
if (auto *BI = dyn_cast<BinaryOperator>(I)) {
Expand Down Expand Up @@ -1790,7 +1840,7 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {

SmallVector<SmallVector<InstLane>> Worklist;
Worklist.push_back(Start);
SmallPtrSet<Use *, 4> IdentityLeafs, SplatLeafs;
SmallPtrSet<Use *, 4> IdentityLeafs, SplatLeafs, ConcatLeafs;
unsigned NumVisited = 0;

while (!Worklist.empty()) {
Expand Down Expand Up @@ -1839,7 +1889,7 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {

// We need each element to be the same type of value, and check that each
// element has a single use.
if (!all_of(drop_begin(Item), [Item](InstLane IL) {
if (all_of(drop_begin(Item), [Item](InstLane IL) {
Value *FrontV = Item.front().first->get();
if (!IL.first)
return true;
Expand All @@ -1860,48 +1910,59 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
return !II || (isa<IntrinsicInst>(FrontV) &&
II->getIntrinsicID() ==
cast<IntrinsicInst>(FrontV)->getIntrinsicID());
}))
return false;

// Check the operator is one that we support. We exclude div/rem in case
// they hit UB from poison lanes.
if ((isa<BinaryOperator>(FrontU) &&
!cast<BinaryOperator>(FrontU)->isIntDivRem()) ||
isa<CmpInst>(FrontU)) {
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
} else if (isa<UnaryOperator, TruncInst, ZExtInst, SExtInst>(FrontU)) {
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
} else if (isa<SelectInst>(FrontU)) {
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 2));
} else if (auto *II = dyn_cast<IntrinsicInst>(FrontU);
II && isTriviallyVectorizable(II->getIntrinsicID())) {
for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op)) {
if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
Value *FrontV = Item.front().first->get();
Use *U = IL.first;
return !U || (cast<Instruction>(U->get())->getOperand(Op) ==
cast<Instruction>(FrontV)->getOperand(Op));
}))
return false;
continue;
})) {
// Check the operator is one that we support.
if (isa<BinaryOperator, CmpInst>(FrontU)) {
// We exclude div/rem in case they hit UB from poison lanes.
if (auto *BO = dyn_cast<BinaryOperator>(FrontU);
BO && BO->isIntDivRem())
return false;
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
continue;
} else if (isa<UnaryOperator, TruncInst, ZExtInst, SExtInst>(FrontU)) {
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
continue;
} else if (isa<SelectInst>(FrontU)) {
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
Worklist.push_back(generateInstLaneVectorFromOperand(Item, 2));
continue;
} else if (auto *II = dyn_cast<IntrinsicInst>(FrontU);
II && isTriviallyVectorizable(II->getIntrinsicID())) {
for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op)) {
if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
Value *FrontV = Item.front().first->get();
Use *U = IL.first;
return !U || (cast<Instruction>(U->get())->getOperand(Op) ==
cast<Instruction>(FrontV)->getOperand(Op));
}))
return false;
continue;
}
Worklist.push_back(generateInstLaneVectorFromOperand(Item, Op));
}
Worklist.push_back(generateInstLaneVectorFromOperand(Item, Op));
continue;
}
} else {
return false;
}

if (isFreeConcat(Item, TTI)) {
ConcatLeafs.insert(FrontU);
continue;
}

return false;
}

if (NumVisited <= 1)
return false;

// If we got this far, we know the shuffles are superfluous and can be
// removed. Scan through again and generate the new tree of instructions.
Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs, Builder);
Builder.SetInsertPoint(&I);
Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs,
ConcatLeafs, Builder);
replaceValue(I, *V);
return true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ define void @add4(ptr noalias noundef %x, ptr noalias noundef %y, i32 noundef %n
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[OFFSET_IDX]], 3
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[TMP2]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
; CHECK-NEXT: store <32 x i16> [[INTERLEAVED_VEC]], ptr [[GEP]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
Expand Down Expand Up @@ -403,12 +403,12 @@ define void @addmul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z, i32
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
; CHECK-NEXT: [[TMP2:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP3]], align 2
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[TMP2]], [[WIDE_VEC36]]
; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[OFFSET_IDX]], 3
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[TMP4]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[X]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP2]], align 2
; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 3
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[INVARIANT_GEP]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[TMP4]], [[WIDE_VEC36]]
; CHECK-NEXT: store <32 x i16> [[INTERLEAVED_VEC]], ptr [[GEP]], align 2
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
Expand Down
Loading
Loading