Skip to content

AMDGPU: Expand flat atomics that may access private memory #109407

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
145 changes: 112 additions & 33 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/ModRef.h"
Expand Down Expand Up @@ -16308,12 +16309,45 @@ atomicSupportedIfLegalIntType(const AtomicRMWInst *RMW) {
: TargetLowering::AtomicExpansionKind::CmpXChg;
}

/// Return if a flat address space atomicrmw can access private memory.
static bool flatInstrMayAccessPrivate(const Instruction *I) {
const MDNode *NoaliasAddrSpaceMD =
I->getMetadata(LLVMContext::MD_noalias_addrspace);
if (!NoaliasAddrSpaceMD)
return true;

for (unsigned I = 0, E = NoaliasAddrSpaceMD->getNumOperands() / 2; I != E;
++I) {
auto *Low = mdconst::extract<ConstantInt>(
NoaliasAddrSpaceMD->getOperand(2 * I + 0));
auto *High = mdconst::extract<ConstantInt>(
NoaliasAddrSpaceMD->getOperand(2 * I + 1));

if (Low->getValue().uge(AMDGPUAS::PRIVATE_ADDRESS) &&
High->getValue().ult(AMDGPUAS::PRIVATE_ADDRESS))
return true;
}

return false;
}

TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
unsigned AS = RMW->getPointerAddressSpace();
if (AS == AMDGPUAS::PRIVATE_ADDRESS)
return AtomicExpansionKind::NotAtomic;

// 64-bit flat atomics that dynamically reside in private memory will silently
// be dropped.
//
// Note that we will emit a new copy of the original atomic in the expansion,
// which will be incrementally relegalized.
const DataLayout &DL = RMW->getFunction()->getDataLayout();
if (AS == AMDGPUAS::FLAT_ADDRESS &&
DL.getTypeSizeInBits(RMW->getType()) == 64 &&
flatInstrMayAccessPrivate(RMW))
return AtomicExpansionKind::Expand;

auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) {
OptimizationRemarkEmitter ORE(RMW->getFunction());
ORE.emit([=]() {
Expand Down Expand Up @@ -16714,20 +16748,34 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {

if (Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Or ||
Op == AtomicRMWInst::Xor) {
// atomicrmw or %ptr, 0 -> atomicrmw add %ptr, 0
assert(cast<Constant>(AI->getValOperand())->isNullValue() &&
"this cannot be replaced with add");
AI->setOperation(AtomicRMWInst::Add);
return;
if (auto *ConstVal = dyn_cast<Constant>(AI->getValOperand());
ConstVal && ConstVal->isNullValue()) {
// atomicrmw or %ptr, 0 -> atomicrmw add %ptr, 0
AI->setOperation(AtomicRMWInst::Add);

// TODO: Turn the below private handling into a no-op for idempotent
// cases.
}
}

assert(Subtarget->hasAtomicFaddInsts() &&
"target should have atomic fadd instructions");
assert(AI->getType()->isFloatTy() &&
AI->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS &&
"generic atomicrmw expansion only supports FP32 operand in flat "
"address space");
assert(Op == AtomicRMWInst::FAdd && "only fadd is supported for now");
// The non-flat expansions should only perform the de-canonicalization of
// identity values.
if (AI->getPointerAddressSpace() != AMDGPUAS::FLAT_ADDRESS)
return;

// FullFlatEmulation is true if we need to issue the private, shared, and
// global cases.
//
// If this is false, we are only dealing with the flat-targeting-private case,
// where we only insert a check for private and still use the flat instruction
// for global and shared.

// TODO: Avoid the private check for the fadd case depending on
// noalias.addrspace.

bool FullFlatEmulation = Op == AtomicRMWInst::FAdd &&
Subtarget->hasAtomicFaddInsts() &&
AI->getType()->isFloatTy();

// Given: atomicrmw fadd ptr %addr, float %val ordering
//
Expand Down Expand Up @@ -16767,6 +16815,10 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
//
// atomicrmw.end:
// [...]
//
//
// For 64-bit atomics which may reside in private memory, we perform a simpler
// version that only inserts the private check, and uses the flat operation.

IRBuilder<> Builder(AI);
LLVMContext &Ctx = Builder.getContext();
Expand All @@ -16778,9 +16830,15 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
Function *F = BB->getParent();
BasicBlock *ExitBB =
BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
BasicBlock *SharedBB = BasicBlock::Create(Ctx, "atomicrmw.shared", F, ExitBB);
BasicBlock *CheckPrivateBB =
BasicBlock::Create(Ctx, "atomicrmw.check.private", F, ExitBB);
BasicBlock *SharedBB = nullptr;

BasicBlock *CheckPrivateBB = BB;
if (FullFlatEmulation) {
SharedBB = BasicBlock::Create(Ctx, "atomicrmw.shared", F, ExitBB);
CheckPrivateBB =
BasicBlock::Create(Ctx, "atomicrmw.check.private", F, ExitBB);
}

BasicBlock *PrivateBB =
BasicBlock::Create(Ctx, "atomicrmw.private", F, ExitBB);
BasicBlock *GlobalBB = BasicBlock::Create(Ctx, "atomicrmw.global", F, ExitBB);
Expand All @@ -16793,23 +16851,26 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {

std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
CallInst *IsShared = Builder.CreateIntrinsic(Intrinsic::amdgcn_is_shared, {},
{Addr}, nullptr, "is.shared");
Builder.CreateCondBr(IsShared, SharedBB, CheckPrivateBB);

Builder.SetInsertPoint(SharedBB);
Value *CastToLocal = Builder.CreateAddrSpaceCast(
Addr, PointerType::get(Ctx, AMDGPUAS::LOCAL_ADDRESS));
Value *LoadedShared = nullptr;
if (FullFlatEmulation) {
CallInst *IsShared = Builder.CreateIntrinsic(
Intrinsic::amdgcn_is_shared, {}, {Addr}, nullptr, "is.shared");
Builder.CreateCondBr(IsShared, SharedBB, CheckPrivateBB);
Builder.SetInsertPoint(SharedBB);
Value *CastToLocal = Builder.CreateAddrSpaceCast(
Addr, PointerType::get(Ctx, AMDGPUAS::LOCAL_ADDRESS));

Instruction *Clone = AI->clone();
Clone->insertInto(SharedBB, SharedBB->end());
Clone->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
.set(CastToLocal);
Instruction *LoadedShared = Clone;
Instruction *Clone = AI->clone();
Clone->insertInto(SharedBB, SharedBB->end());
Clone->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
.set(CastToLocal);
LoadedShared = Clone;

Builder.CreateBr(PhiBB);
Builder.CreateBr(PhiBB);
Builder.SetInsertPoint(CheckPrivateBB);
}

Builder.SetInsertPoint(CheckPrivateBB);
CallInst *IsPrivate = Builder.CreateIntrinsic(
Intrinsic::amdgcn_is_private, {}, {Addr}, nullptr, "is.private");
Builder.CreateCondBr(IsPrivate, PrivateBB, GlobalBB);
Expand All @@ -16826,23 +16887,41 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
Builder.CreateBr(PhiBB);

Builder.SetInsertPoint(GlobalBB);
Value *CastToGlobal = Builder.CreateAddrSpaceCast(
Addr, PointerType::get(Ctx, AMDGPUAS::GLOBAL_ADDRESS));
Value *LoadedGlobal = AI;

AI->getOperandUse(AtomicRMWInst::getPointerOperandIndex()).set(CastToGlobal);
// Continue using a flat instruction if we only emitted the check for private.
Instruction *LoadedGlobal = AI;
if (FullFlatEmulation) {
Value *CastToGlobal = Builder.CreateAddrSpaceCast(
Addr, PointerType::get(Ctx, AMDGPUAS::GLOBAL_ADDRESS));
AI->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
.set(CastToGlobal);
}

AI->removeFromParent();
AI->insertInto(GlobalBB, GlobalBB->end());

// The new atomicrmw may go through another round of legalization later.
if (!FullFlatEmulation) {
// We inserted the runtime check already, make sure we do not try to
// re-expand this.
// TODO: Should union with any existing metadata.
MDBuilder MDB(F->getContext());
MDNode *RangeNotPrivate =
MDB.createRange(APInt(32, AMDGPUAS::PRIVATE_ADDRESS),
APInt(32, AMDGPUAS::PRIVATE_ADDRESS + 1));
LoadedGlobal->setMetadata(LLVMContext::MD_noalias_addrspace,
RangeNotPrivate);
}

Builder.CreateBr(PhiBB);

Builder.SetInsertPoint(PhiBB);

if (ReturnValueIsUsed) {
PHINode *Loaded = Builder.CreatePHI(ValTy, 3);
AI->replaceAllUsesWith(Loaded);
Loaded->addIncoming(LoadedShared, SharedBB);
if (FullFlatEmulation)
Loaded->addIncoming(LoadedShared, SharedBB);
Loaded->addIncoming(LoadedPrivate, PrivateBB);
Loaded->addIncoming(LoadedGlobal, GlobalBB);
Loaded->takeName(AI);
Expand Down
5 changes: 3 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1332,7 +1332,7 @@ define double @flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory(ptr
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_setpc_b64 s[30:31]
%result = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%result = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %result
}

Expand Down Expand Up @@ -1482,7 +1482,7 @@ define void @flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory(ptr
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_setpc_b64 s[30:31]
%unused = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%unused = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
}

Expand Down Expand Up @@ -2215,3 +2215,4 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_
}

!0 = !{}
!1 = !{i32 5, i32 6}
5 changes: 3 additions & 2 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1332,7 +1332,7 @@ define double @flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory(ptr
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_setpc_b64 s[30:31]
%result = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%result = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret double %result
}

Expand Down Expand Up @@ -1482,7 +1482,7 @@ define void @flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory(ptr
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_setpc_b64 s[30:31]
%unused = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%unused = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
ret void
}

Expand Down Expand Up @@ -2215,3 +2215,4 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_
}

!0 = !{}
!1 = !{i32 5, i32 6}
19 changes: 11 additions & 8 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1645,7 +1645,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
store i64 %result, ptr %out, align 4
ret void
}
Expand Down Expand Up @@ -1747,7 +1747,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
store i64 %result, ptr %out, align 4
ret void
}
Expand Down Expand Up @@ -1820,7 +1820,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
ret void
}

Expand Down Expand Up @@ -1899,7 +1899,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
ret void
}

Expand Down Expand Up @@ -1978,7 +1978,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
%result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8
%result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
ret void
}

Expand Down Expand Up @@ -2106,7 +2106,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%out.gep = getelementptr i64, ptr %out, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
store i64 %result, ptr %out.gep, align 4
ret void
}
Expand Down Expand Up @@ -2205,7 +2205,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #1
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
ret void
}

Expand Down Expand Up @@ -3312,7 +3312,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out,
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #2
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0
%result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8
%result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
store i32 %idx.0, ptr addrspace(1) %add_use, align 4
store i64 %result, ptr addrspace(1) %out, align 4
ret void
Expand All @@ -3321,5 +3321,8 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out,
attributes #0 = { nounwind speculatable willreturn memory(none) }
attributes #1 = { nounwind }
attributes #2 = { nounwind memory(none) }

!0 = !{i32 5, i32 6}

;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
Loading
Loading