Skip to content

Commit 4533d92

Browse files
committed
[VPlan] Introduce recipes for VP loads and stores.
Introduce new subclasses of VPWidenMemoryRecipe for VP (vector-predicated) loads and stores to address multiple TODOs from #76172 Note that the introduction of the new recipes also improves code-gen for VP gather/scatters by removing the redundant header mask. With the new approach, it is not sufficient to look at users of the widened canonical IV to find all uses of the header mask. In some cases, a widened IV is used instead of separately widening the canonical IV. To handle those cases, iterate over all recipes in the vector loop region to make sure all widened memory recipes are processed. Depends on #87411.
1 parent 5d31435 commit 4533d92

File tree

8 files changed

+263
-152
lines changed

8 files changed

+263
-152
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 79 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -9316,52 +9316,6 @@ void VPReplicateRecipe::execute(VPTransformState &State) {
93169316
State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, Lane), State);
93179317
}
93189318

9319-
/// Creates either vp_store or vp_scatter intrinsics calls to represent
9320-
/// predicated store/scatter.
9321-
static Instruction *
9322-
lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr,
9323-
Value *StoredVal, bool IsScatter, Value *Mask,
9324-
Value *EVL, const Align &Alignment) {
9325-
CallInst *Call;
9326-
if (IsScatter) {
9327-
Call = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
9328-
Intrinsic::vp_scatter,
9329-
{StoredVal, Addr, Mask, EVL});
9330-
} else {
9331-
VectorBuilder VBuilder(Builder);
9332-
VBuilder.setEVL(EVL).setMask(Mask);
9333-
Call = cast<CallInst>(VBuilder.createVectorInstruction(
9334-
Instruction::Store, Type::getVoidTy(EVL->getContext()),
9335-
{StoredVal, Addr}));
9336-
}
9337-
Call->addParamAttr(
9338-
1, Attribute::getWithAlignment(Call->getContext(), Alignment));
9339-
return Call;
9340-
}
9341-
9342-
/// Creates either vp_load or vp_gather intrinsics calls to represent
9343-
/// predicated load/gather.
9344-
static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder,
9345-
VectorType *DataTy,
9346-
Value *Addr, bool IsGather,
9347-
Value *Mask, Value *EVL,
9348-
const Align &Alignment) {
9349-
CallInst *Call;
9350-
if (IsGather) {
9351-
Call =
9352-
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
9353-
nullptr, "wide.masked.gather");
9354-
} else {
9355-
VectorBuilder VBuilder(Builder);
9356-
VBuilder.setEVL(EVL).setMask(Mask);
9357-
Call = cast<CallInst>(VBuilder.createVectorInstruction(
9358-
Instruction::Load, DataTy, Addr, "vp.op.load"));
9359-
}
9360-
Call->addParamAttr(
9361-
0, Attribute::getWithAlignment(Call->getContext(), Alignment));
9362-
return Call;
9363-
}
9364-
93659319
void VPWidenLoadRecipe::execute(VPTransformState &State) {
93669320
auto *LI = cast<LoadInst>(&Ingredient);
93679321

@@ -9383,24 +9337,7 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
93839337
Mask = Builder.CreateVectorReverse(Mask, "reverse");
93849338
}
93859339

9386-
// TODO: split this into several classes for better design.
9387-
if (State.EVL) {
9388-
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
9389-
"explicit vector length.");
9390-
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
9391-
VPInstruction::ExplicitVectorLength &&
9392-
"EVL must be VPInstruction::ExplicitVectorLength.");
9393-
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
9394-
// If EVL is not nullptr, then EVL must be a valid value set during plan
9395-
// creation, possibly default value = whole vector register length. EVL
9396-
// is created only if TTI prefers predicated vectorization, thus if EVL
9397-
// is not nullptr it also implies preference for predicated
9398-
// vectorization.
9399-
// FIXME: Support reverse loading after vp_reverse is added.
9400-
NewLI = lowerLoadUsingVectorIntrinsics(
9401-
Builder, DataTy, State.get(getAddr(), Part, !CreateGather),
9402-
CreateGather, Mask, EVL, Alignment);
9403-
} else if (CreateGather) {
9340+
if (CreateGather) {
94049341
Value *VectorGep = State.get(getAddr(), Part);
94059342
NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, Mask,
94069343
nullptr, "wide.masked.gather");
@@ -9425,6 +9362,44 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
94259362
}
94269363
}
94279364

9365+
void VPWidenVPLoadRecipe::execute(VPTransformState &State) {
9366+
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
9367+
"explicit vector length.");
9368+
// FIXME: Support reverse loading after vp_reverse is added.
9369+
assert(!isReverse() && "Reverse loads are not implemented yet.");
9370+
9371+
auto *LI = cast<LoadInst>(&Ingredient);
9372+
9373+
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9374+
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9375+
const Align Alignment = getLoadStoreAlignment(&Ingredient);
9376+
bool CreateGather = !isConsecutive();
9377+
9378+
auto &Builder = State.Builder;
9379+
State.setDebugLocFrom(getDebugLoc());
9380+
CallInst *NewLI;
9381+
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
9382+
Value *Addr = State.get(getAddr(), 0, !CreateGather);
9383+
Value *Mask =
9384+
getMask() ? State.get(getMask(), 0)
9385+
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
9386+
if (CreateGather) {
9387+
NewLI =
9388+
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
9389+
nullptr, "wide.masked.gather");
9390+
} else {
9391+
VectorBuilder VBuilder(Builder);
9392+
VBuilder.setEVL(EVL).setMask(Mask);
9393+
NewLI = cast<CallInst>(VBuilder.createVectorInstruction(
9394+
Instruction::Load, DataTy, Addr, "vp.op.load"));
9395+
}
9396+
NewLI->addParamAttr(
9397+
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
9398+
9399+
State.addMetadata(NewLI, LI);
9400+
State.set(this, NewLI, 0);
9401+
}
9402+
94289403
void VPWidenStoreRecipe::execute(VPTransformState &State) {
94299404
auto *SI = cast<StoreInst>(&Ingredient);
94309405

@@ -9448,31 +9423,14 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
94489423

94499424
Value *StoredVal = State.get(StoredVPValue, Part);
94509425
if (isReverse()) {
9451-
assert(!State.EVL && "reversing not yet implemented with EVL");
94529426
// If we store to reverse consecutive memory locations, then we need
94539427
// to reverse the order of elements in the stored value.
94549428
StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
94559429
// We don't want to update the value in the map as it might be used in
94569430
// another expression. So don't call resetVectorValue(StoredVal).
94579431
}
94589432
// TODO: split this into several classes for better design.
9459-
if (State.EVL) {
9460-
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
9461-
"explicit vector length.");
9462-
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
9463-
VPInstruction::ExplicitVectorLength &&
9464-
"EVL must be VPInstruction::ExplicitVectorLength.");
9465-
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
9466-
// If EVL is not nullptr, then EVL must be a valid value set during plan
9467-
// creation, possibly default value = whole vector register length. EVL
9468-
// is created only if TTI prefers predicated vectorization, thus if EVL
9469-
// is not nullptr it also implies preference for predicated
9470-
// vectorization.
9471-
// FIXME: Support reverse store after vp_reverse is added.
9472-
NewSI = lowerStoreUsingVectorIntrinsics(
9473-
Builder, State.get(getAddr(), Part, !CreateScatter), StoredVal,
9474-
CreateScatter, Mask, EVL, Alignment);
9475-
} else if (CreateScatter) {
9433+
if (CreateScatter) {
94769434
Value *VectorGep = State.get(getAddr(), Part);
94779435
NewSI =
94789436
Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, Mask);
@@ -9487,6 +9445,45 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
94879445
}
94889446
}
94899447

9448+
void VPWidenVPStoreRecipe::execute(VPTransformState &State) {
9449+
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
9450+
"explicit vector length.");
9451+
// FIXME: Support reverse loading after vp_reverse is added.
9452+
assert(!isReverse() && "Reverse store are not implemented yet.");
9453+
9454+
auto *SI = cast<StoreInst>(&Ingredient);
9455+
9456+
VPValue *StoredValue = getStoredValue();
9457+
bool CreateScatter = !isConsecutive();
9458+
const Align Alignment = getLoadStoreAlignment(&Ingredient);
9459+
9460+
auto &Builder = State.Builder;
9461+
State.setDebugLocFrom(getDebugLoc());
9462+
9463+
CallInst *NewSI = nullptr;
9464+
Value *StoredVal = State.get(StoredValue, 0);
9465+
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
9466+
// FIXME: Support reverse store after vp_reverse is added.
9467+
Value *Mask =
9468+
getMask() ? State.get(getMask(), 0)
9469+
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
9470+
Value *Addr = State.get(getAddr(), 0, !CreateScatter);
9471+
if (CreateScatter) {
9472+
NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
9473+
Intrinsic::vp_scatter,
9474+
{StoredVal, Addr, Mask, EVL});
9475+
} else {
9476+
VectorBuilder VBuilder(Builder);
9477+
VBuilder.setEVL(EVL).setMask(Mask);
9478+
NewSI = cast<CallInst>(VBuilder.createVectorInstruction(
9479+
Instruction::Store, Type::getVoidTy(EVL->getContext()),
9480+
{StoredVal, Addr}));
9481+
}
9482+
NewSI->addParamAttr(
9483+
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
9484+
9485+
State.addMetadata(NewSI, SI);
9486+
}
94909487
// Determine how to lower the scalar epilogue, which depends on 1) optimising
94919488
// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
94929489
// predication, and 4) a TTI hook that analyses whether the loop is suitable

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 93 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -242,15 +242,6 @@ struct VPTransformState {
242242
ElementCount VF;
243243
unsigned UF;
244244

245-
/// If EVL (Explicit Vector Length) is not nullptr, then EVL must be a valid
246-
/// value set during plan transformation, possibly a default value = whole
247-
/// vector register length. EVL is created only if TTI prefers predicated
248-
/// vectorization, thus if EVL is not nullptr it also implies preference for
249-
/// predicated vectorization.
250-
/// TODO: this is a temporarily solution, the EVL must be explicitly used by
251-
/// the recipes and must be removed here.
252-
VPValue *EVL = nullptr;
253-
254245
/// Hold the indices to generate specific scalar instructions. Null indicates
255246
/// that all instances are to be generated, using either scalar or vector
256247
/// instructions.
@@ -877,6 +868,8 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
877868
case VPRecipeBase::VPBranchOnMaskSC:
878869
case VPRecipeBase::VPWidenLoadSC:
879870
case VPRecipeBase::VPWidenStoreSC:
871+
case VPRecipeBase::VPWidenVPLoadSC:
872+
case VPRecipeBase::VPWidenVPStoreSC:
880873
// TODO: Widened stores don't define a value, but widened loads do. Split
881874
// the recipes to be able to make widened loads VPSingleDefRecipes.
882875
return false;
@@ -2318,11 +2311,15 @@ class VPWidenMemoryRecipe : public VPRecipeBase {
23182311
}
23192312

23202313
public:
2321-
VPWidenMemoryRecipe *clone() override = 0;
2314+
VPWidenMemoryRecipe *clone() override {
2315+
llvm_unreachable("cloning not supported");
2316+
}
23222317

23232318
static inline bool classof(const VPRecipeBase *R) {
2324-
return R->getVPDefID() == VPDef::VPWidenLoadSC ||
2325-
R->getVPDefID() == VPDef::VPWidenStoreSC;
2319+
return R->getVPDefID() == VPRecipeBase::VPWidenLoadSC ||
2320+
R->getVPDefID() == VPRecipeBase::VPWidenStoreSC ||
2321+
R->getVPDefID() == VPRecipeBase::VPWidenVPLoadSC ||
2322+
R->getVPDefID() == VPRecipeBase::VPWidenVPStoreSC;
23262323
}
23272324

23282325
static inline bool classof(const VPUser *U) {
@@ -2390,13 +2387,49 @@ struct VPWidenLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
23902387
bool onlyFirstLaneUsed(const VPValue *Op) const override {
23912388
assert(is_contained(operands(), Op) &&
23922389
"Op must be an operand of the recipe");
2393-
2394-
// Widened, consecutive loads operations only demand the first lane of
2395-
// their address.
2390+
// Widened, consecutive memory operations only demand the first lane of
2391+
// their address, unless the same operand is also stored. That latter can
2392+
// happen with opaque pointers.
23962393
return Op == getAddr() && isConsecutive();
23972394
}
23982395
};
23992396

2397+
/// A recipe for widening load operations with vector-predication intrinsics,
2398+
/// using the address to load from, the explicit vector length and an optional
2399+
/// mask.
2400+
struct VPWidenVPLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
2401+
VPWidenVPLoadRecipe(VPWidenLoadRecipe *L, VPValue *EVL, VPValue *Mask)
2402+
: VPWidenMemoryRecipe(
2403+
VPDef::VPWidenVPLoadSC, *cast<LoadInst>(&L->getIngredient()),
2404+
{L->getAddr(), EVL}, L->isConsecutive(), false, L->getDebugLoc()),
2405+
VPValue(this, &getIngredient()) {
2406+
setMask(Mask);
2407+
}
2408+
2409+
VP_CLASSOF_IMPL(VPDef::VPWidenVPLoadSC)
2410+
2411+
/// Return the EVL operand.
2412+
VPValue *getEVL() const { return getOperand(1); }
2413+
2414+
/// Generate the wide load/store.
2415+
void execute(VPTransformState &State) override;
2416+
2417+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2418+
/// Print the recipe.
2419+
void print(raw_ostream &O, const Twine &Indent,
2420+
VPSlotTracker &SlotTracker) const override;
2421+
#endif
2422+
2423+
/// Returns true if the recipe only uses the first lane of operand \p Op.
2424+
bool onlyFirstLaneUsed(const VPValue *Op) const override {
2425+
assert(is_contained(operands(), Op) &&
2426+
"Op must be an operand of the recipe");
2427+
// Widened loads only demand the first lane of EVL and consecutive loads
2428+
// only demand the first lane of their address.
2429+
return Op == getEVL() || (Op == getAddr() && isConsecutive());
2430+
}
2431+
};
2432+
24002433
/// A recipe for widening store operations, using the stored value, the address
24012434
/// to store to and an optional mask.
24022435
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
@@ -2436,6 +2469,51 @@ struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
24362469
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
24372470
}
24382471
};
2472+
2473+
/// A recipe for widening store operations with vector-predication intrinsics,
2474+
/// using the value to store, the address to store to , the explicit vector
2475+
/// length and an optional mask.
2476+
struct VPWidenVPStoreRecipe final : public VPWidenMemoryRecipe {
2477+
VPWidenVPStoreRecipe(VPWidenStoreRecipe *S, VPValue *EVL, VPValue *Mask)
2478+
: VPWidenMemoryRecipe(VPDef::VPWidenVPStoreSC,
2479+
*cast<StoreInst>(&S->getIngredient()),
2480+
{S->getAddr(), S->getStoredValue(), EVL},
2481+
S->isConsecutive(), false, S->getDebugLoc()) {
2482+
setMask(Mask);
2483+
}
2484+
2485+
VP_CLASSOF_IMPL(VPDef::VPWidenVPStoreSC)
2486+
2487+
/// Return the address accessed by this recipe.
2488+
VPValue *getStoredValue() const { return getOperand(1); }
2489+
2490+
/// Return the EVL operand.
2491+
VPValue *getEVL() const { return getOperand(2); }
2492+
2493+
/// Generate the wide load/store.
2494+
void execute(VPTransformState &State) override;
2495+
2496+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2497+
/// Print the recipe.
2498+
void print(raw_ostream &O, const Twine &Indent,
2499+
VPSlotTracker &SlotTracker) const override;
2500+
#endif
2501+
2502+
/// Returns true if the recipe only uses the first lane of operand \p Op.
2503+
bool onlyFirstLaneUsed(const VPValue *Op) const override {
2504+
assert(is_contained(operands(), Op) &&
2505+
"Op must be an operand of the recipe");
2506+
if (Op == getEVL()) {
2507+
assert(getStoredValue() != Op && "unexpected store of EVL");
2508+
return true;
2509+
}
2510+
// Widened, consecutive memory operations only demand the first lane of
2511+
// their address, unless the same operand is also stored. That latter can
2512+
// happen with opaque pointers.
2513+
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
2514+
}
2515+
};
2516+
24392517
/// Recipe to expand a SCEV expression.
24402518
class VPExpandSCEVRecipe : public VPSingleDefRecipe {
24412519
const SCEV *Expr;

llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
109109
}
110110

111111
Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
112-
assert(isa<VPWidenLoadRecipe>(R) &&
112+
assert((isa<VPWidenLoadRecipe>(R) || isa<VPWidenVPLoadRecipe>(R)) &&
113113
"Store recipes should not define any values");
114114
return cast<LoadInst>(&R->getIngredient())->getType();
115115
}

0 commit comments

Comments
 (0)