Skip to content

[InterleavedAccessPass]: Ensure that dead nodes get erased only once #122643

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 21 additions & 20 deletions llvm/lib/CodeGen/InterleavedAccessPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,21 +100,21 @@ class InterleavedAccessImpl {

/// Transform an interleaved load into target specific intrinsics.
bool lowerInterleavedLoad(LoadInst *LI,
SmallVectorImpl<Instruction *> &DeadInsts);
SmallSetVector<Instruction *, 32> &DeadInsts);

/// Transform an interleaved store into target specific intrinsics.
bool lowerInterleavedStore(StoreInst *SI,
SmallVectorImpl<Instruction *> &DeadInsts);
SmallSetVector<Instruction *, 32> &DeadInsts);

/// Transform a load and a deinterleave intrinsic into target specific
/// instructions.
bool lowerDeinterleaveIntrinsic(IntrinsicInst *II,
SmallVectorImpl<Instruction *> &DeadInsts);
SmallSetVector<Instruction *, 32> &DeadInsts);

/// Transform an interleave intrinsic and a store into target specific
/// instructions.
bool lowerInterleaveIntrinsic(IntrinsicInst *II,
SmallVectorImpl<Instruction *> &DeadInsts);
SmallSetVector<Instruction *, 32> &DeadInsts);

/// Returns true if the uses of an interleaved load by the
/// extractelement instructions in \p Extracts can be replaced by uses of the
Expand Down Expand Up @@ -249,7 +249,7 @@ static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
}

bool InterleavedAccessImpl::lowerInterleavedLoad(
LoadInst *LI, SmallVectorImpl<Instruction *> &DeadInsts) {
LoadInst *LI, SmallSetVector<Instruction *, 32> &DeadInsts) {
if (!LI->isSimple() || isa<ScalableVectorType>(LI->getType()))
return false;

Expand Down Expand Up @@ -348,9 +348,9 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
return !Extracts.empty() || BinOpShuffleChanged;
}

append_range(DeadInsts, Shuffles);
DeadInsts.insert(Shuffles.begin(), Shuffles.end());

DeadInsts.push_back(LI);
DeadInsts.insert(LI);
return true;
}

Expand Down Expand Up @@ -453,7 +453,7 @@ bool InterleavedAccessImpl::tryReplaceExtracts(
}

bool InterleavedAccessImpl::lowerInterleavedStore(
StoreInst *SI, SmallVectorImpl<Instruction *> &DeadInsts) {
StoreInst *SI, SmallSetVector<Instruction *, 32> &DeadInsts) {
if (!SI->isSimple())
return false;

Expand All @@ -473,13 +473,13 @@ bool InterleavedAccessImpl::lowerInterleavedStore(
return false;

// Already have a new target specific interleaved store. Erase the old store.
DeadInsts.push_back(SI);
DeadInsts.push_back(SVI);
DeadInsts.insert(SI);
DeadInsts.insert(SVI);
return true;
}

bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
IntrinsicInst *DI, SmallVectorImpl<Instruction *> &DeadInsts) {
IntrinsicInst *DI, SmallSetVector<Instruction *, 32> &DeadInsts) {
LoadInst *LI = dyn_cast<LoadInst>(DI->getOperand(0));

if (!LI || !LI->hasOneUse() || !LI->isSimple())
Expand All @@ -488,17 +488,19 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
LLVM_DEBUG(dbgs() << "IA: Found a deinterleave intrinsic: " << *DI << "\n");

// Try and match this with target specific intrinsics.
if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LI, DeadInsts))
SmallVector<Instruction *, 4> DeinterleaveDeadInsts;
if (!TLI->lowerDeinterleaveIntrinsicToLoad(DI, LI, DeinterleaveDeadInsts))
return false;

DeadInsts.insert(DeinterleaveDeadInsts.begin(), DeinterleaveDeadInsts.end());
// We now have a target-specific load, so delete the old one.
DeadInsts.push_back(DI);
DeadInsts.push_back(LI);
DeadInsts.insert(DI);
DeadInsts.insert(LI);
return true;
}

bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
IntrinsicInst *II, SmallVectorImpl<Instruction *> &DeadInsts) {
IntrinsicInst *II, SmallSetVector<Instruction *, 32> &DeadInsts) {
if (!II->hasOneUse())
return false;

Expand All @@ -515,16 +517,15 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
return false;

// We now have a target-specific store, so delete the old one.
DeadInsts.push_back(SI);
DeadInsts.push_back(II);
DeadInsts.insert(DeadInsts.end(), InterleaveDeadInsts.begin(),
InterleaveDeadInsts.end());
DeadInsts.insert(SI);
DeadInsts.insert(II);
DeadInsts.insert(InterleaveDeadInsts.begin(), InterleaveDeadInsts.end());
return true;
}

bool InterleavedAccessImpl::runOnFunction(Function &F) {
// Holds dead instructions that will be erased later.
SmallVector<Instruction *, 32> DeadInsts;
SmallSetVector<Instruction *, 32> DeadInsts;
bool Changed = false;

for (auto &I : instructions(F)) {
Expand Down
14 changes: 14 additions & 0 deletions llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleave4.ll
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,17 @@ define void @mix_interleave4_interleave2(ptr %dst1, ptr %dst2, <vscale x 4 x i32
store <vscale x 8 x i32> %interleaved, ptr %dst2, align 4
ret void
}

; This case tests when the interleave is using same parameter twice,
; the dead parameter will not get deleted twice.
define void @duplicate_by_interleave(<vscale x 4 x i32> %A, <vscale x 4 x i32> %B, ptr writeonly %AB_duplicate) {
; CHECK-LABEL: define void @duplicate_by_interleave
; CHECK-SAME: (<vscale x 4 x i32> [[A:%.*]], <vscale x 4 x i32> [[B:%.*]], ptr writeonly [[AB_DUPLICATE:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[B]], <vscale x 4 x i1> splat (i1 true), ptr [[AB_DUPLICATE]])
; CHECK-NEXT: ret void
;
%interleave = tail call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> %A, <vscale x 4 x i32> %B)
%duplicate_by_interleave = tail call <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32> %interleave, <vscale x 8 x i32> %interleave)
store <vscale x 16 x i32> %duplicate_by_interleave, ptr %AB_duplicate, align 4
ret void
}
Loading