Skip to content

[LV] Split checking if tail-folding is possible, collecting masked ops. #77612

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -276,9 +276,12 @@ class LoopVectorizationLegality {
bool canVectorizeFPMath(bool EnableStrictReductions);

/// Return true if we can vectorize this loop while folding its tail by
/// masking, and mark all respective loads/stores for masking.
/// This object's state is only modified iff this function returns true.
bool prepareToFoldTailByMasking();
/// masking.
bool canFoldTailByMasking() const;

/// Mark all respective loads/stores for masking. Must only be called when
/// tail-folding is possible.
void prepareToFoldTailByMasking();

/// Returns the primary induction variable.
PHINode *getPrimaryInduction() { return PrimaryInduction; }
Expand Down
24 changes: 16 additions & 8 deletions llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1543,7 +1543,7 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
return Result;
}

bool LoopVectorizationLegality::prepareToFoldTailByMasking() {
bool LoopVectorizationLegality::canFoldTailByMasking() const {

LLVM_DEBUG(dbgs() << "LV: checking if tail can be folded by masking.\n");

Expand Down Expand Up @@ -1586,23 +1586,31 @@ bool LoopVectorizationLegality::prepareToFoldTailByMasking() {
// The list of pointers that we can safely read and write to remains empty.
SmallPtrSet<Value *, 8> SafePointers;

// Collect masked ops in temporary set first to avoid partially populating
// MaskedOp if a block cannot be predicated.
// Check all blocks for predication, including those that ordinarily do not
// need predication such as the header block.
SmallPtrSet<const Instruction *, 8> TmpMaskedOp;

// Check and mark all blocks for predication, including those that ordinarily
// do not need predication such as the header block.
for (BasicBlock *BB : TheLoop->blocks()) {
if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp)) {
LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking as requested.\n");
LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking.\n");
return false;
}
}

LLVM_DEBUG(dbgs() << "LV: can fold tail by masking.\n");

MaskedOp.insert(TmpMaskedOp.begin(), TmpMaskedOp.end());
return true;
}

void LoopVectorizationLegality::prepareToFoldTailByMasking() {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Above comments worth updating:

  // Collect masked ops in temporary set first to avoid partially populating
  // MaskedOp if a block cannot be predicated.

comment needs rephrasing.

  // Check and mark all blocks for predication, including those that ordinarily
  // do not need predication such as the header block.

only Check, w/o mark

LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking as requested.\n");

drop "as requested"?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated, thanks!

// The list of pointers that we can safely read and write to remains empty.
SmallPtrSet<Value *, 8> SafePointers;

// Mark all blocks for predication, including those that ordinarily do not
// need predication such as the header block.
for (BasicBlock *BB : TheLoop->blocks()) {
[[maybe_unused]] bool R = blockCanBePredicated(BB, SafePointers, MaskedOp);
assert(R && "Must be able to predicate block when tail-folding.");
}
}

} // namespace llvm
5 changes: 4 additions & 1 deletion llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1502,7 +1502,7 @@ class LoopVectorizationCostModel {
/// \param UserIC User specific interleave count.
void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) {
assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet.");
if (!Legal->prepareToFoldTailByMasking()) {
if (!Legal->canFoldTailByMasking()) {
ChosenTailFoldingStyle =
std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None);
return;
Expand Down Expand Up @@ -7226,6 +7226,9 @@ LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
CM.invalidateCostModelingDecisions();
}

if (CM.foldTailByMasking())
Legal->prepareToFoldTailByMasking();

ElementCount MaxUserVF =
UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,35 +96,49 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
;
; NO-VP-LABEL: @interleave(
; NO-VP-NEXT: entry:
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 16
; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; NO-VP: vector.ph:
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]]
; NO-VP: vector.body:
; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; NO-VP-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
; NO-VP-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], i64 [[TMP10]], i32 0
; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[TMP1]], i32 0
; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0
; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
; NO-VP-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4
; NO-VP-NEXT: [[WIDE_VEC1:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; NO-VP-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; NO-VP-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
; NO-VP-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
; NO-VP-NEXT: [[TMP6:%.*]] = add nsw <8 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC]]
; NO-VP-NEXT: [[TMP7:%.*]] = add nsw <8 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC2]]
; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]]
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]]
; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 0
; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 8
; NO-VP-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP12]], align 4
; NO-VP-NEXT: store <8 x i32> [[TMP7]], ptr [[TMP11]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; NO-VP-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0
; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
; NO-VP-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 0
; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 1
; NO-VP-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP10]]
; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], i64 [[TMP6]], i32 0
; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[TMP11]], i32 0
; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0
; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 0
; NO-VP-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP14]], align 4
; NO-VP-NEXT: [[WIDE_VEC1:%.*]] = load <vscale x 8 x i32>, ptr [[TMP15]], align 4
; NO-VP-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
; NO-VP-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; NO-VP-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; NO-VP-NEXT: [[STRIDED_VEC2:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC1]])
; NO-VP-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 0
; NO-VP-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC2]], 1
; NO-VP-NEXT: [[TMP20:%.*]] = add nsw <vscale x 4 x i32> [[TMP17]], [[TMP16]]
; NO-VP-NEXT: [[TMP21:%.*]] = add nsw <vscale x 4 x i32> [[TMP19]], [[TMP18]]
; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP6]]
; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]]
; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i32 0
; NO-VP-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64()
; NO-VP-NEXT: [[TMP26:%.*]] = mul i64 [[TMP25]], 4
; NO-VP-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP26]]
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP20]], ptr [[TMP24]], align 4
; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP21]], ptr [[TMP27]], align 4
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; NO-VP: middle.block:
Expand Down
Loading