-
Notifications
You must be signed in to change notification settings - Fork 14.3k
AMDGPU: Expand remaining system atomic operations #80798
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-llvm-transforms @llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesSystem scope atomics need to use cmpxchg Don't expand xchg and add, those theoretically should work over PCIe. Patch is 913.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/80798.diff 9 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3d4adb16a27162..fad3cf06b9a263 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -15796,7 +15796,8 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
SSID == SyncScope::System ||
SSID == RMW->getContext().getOrInsertSyncScopeID("one-as");
- switch (RMW->getOperation()) {
+ auto Op = RMW->getOperation();
+ switch (Op) {
case AtomicRMWInst::FAdd: {
Type *Ty = RMW->getType();
@@ -15871,18 +15872,28 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
return AtomicExpansionKind::CmpXChg;
}
- case AtomicRMWInst::FMin:
- case AtomicRMWInst::FMax:
- case AtomicRMWInst::Min:
+ case AtomicRMWInst::Xchg:
+ case AtomicRMWInst::Add:
+ // PCIe supports add and xchg for system atomics.
+ break;
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::And:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
case AtomicRMWInst::Max:
+ case AtomicRMWInst::Min:
+ case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
- case AtomicRMWInst::UMax: {
+ case AtomicRMWInst::FMin:
+ case AtomicRMWInst::FMax:
+ case AtomicRMWInst::UIncWrap:
+ case AtomicRMWInst::UDecWrap: {
if (AMDGPU::isFlatGlobalAddrSpace(AS)) {
- if (RMW->getType()->isFloatTy() &&
+ if (AtomicRMWInst::isFPOperation(Op) &&
unsafeFPAtomicsDisabled(RMW->getFunction()))
return AtomicExpansionKind::CmpXChg;
- // Always expand system scope min/max atomics.
+ // Always expand system scope atomics.
if (HasSystemScope)
return AtomicExpansionKind::CmpXChg;
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
index 25cee87244975e..cd13b15e263e9e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
@@ -443,73 +443,165 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(ptr addrspace(1) %ou
define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_system(ptr addrspace(1) %out, ptr addrspace(1) %ptr) #1 {
; CI-LABEL: global_atomic_dec_ret_i32_offset_system:
; CI: ; %bb.0:
-; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT: v_mov_b32_e32 v2, 42
+; CI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[2:3], 0
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s2, s2, 16
-; CI-NEXT: s_addc_u32 s3, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: s_add_u32 s0, s6, 16
+; CI-NEXT: s_load_dword s6, s[6:7], 0x4
+; CI-NEXT: s_addc_u32 s1, s7, 0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v2, s6
+; CI-NEXT: .LBB6_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_mov_b32_e32 v3, v2
+; CI-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; CI-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v3
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_cndmask_b32_e64 v2, v2, 42, s[0:1]
+; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; CI-NEXT: s_cbranch_execnz .LBB6_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: v_mov_b32_e32 v1, s5
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_ret_i32_offset_system:
; VI: ; %bb.0:
-; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT: v_mov_b32_e32 v2, 42
+; VI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[2:3], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s2, s2, 16
-; VI-NEXT: s_addc_u32 s3, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: s_add_u32 s0, s6, 16
+; VI-NEXT: s_load_dword s6, s[6:7], 0x10
+; VI-NEXT: s_addc_u32 s1, s7, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s6
+; VI-NEXT: .LBB6_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_mov_b32_e32 v3, v2
+; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; VI-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v3
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_cndmask_b32_e64 v2, v2, 42, s[0:1]
+; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; VI-NEXT: s_cbranch_execnz .LBB6_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_or_b64 exec, exec, s[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX9-NEXT: v_mov_b32_e32 v0, 42
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
+; GFX9-NEXT: s_load_dword s0, s[6:7], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v2
+; GFX9-NEXT: v_subrev_u32_e32 v1, 1, v2
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 42, s[0:1]
+; GFX9-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[6:7] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB6_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: global_store_dword v0, v1, s[4:5]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX10: ; %bb.0:
-; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v0, 42
-; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
+; GFX10-NEXT: s_load_dword s0, s[6:7], 0x10
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v1, s0
+; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT: v_cmp_lt_u32_e64 s0, 42, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v1, 1, v2
+; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 42, s0
+; GFX10-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[6:7] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
-; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX10-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
+; GFX10-NEXT: s_cbranch_execnz .LBB6_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
-; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
+; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x0
+; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_atomic_dec_u32 v0, v1, v0, s[2:3] offset:16 glc
+; GFX11-NEXT: s_load_b32 s0, s[6:7], 0x10
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
+; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX11-NEXT: v_cmp_lt_u32_e64 s0, 42, v2
+; GFX11-NEXT: v_subrev_nc_u32_e32 v1, 1, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 42, s0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[6:7] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
-; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
+; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: global_store_b32 v0, v1, s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
@@ -652,63 +744,146 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_system(ptr addrspa
; CI-LABEL: global_atomic_dec_noret_i32_offset_system:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; CI-NEXT: v_mov_b32_e32 v2, 42
+; CI-NEXT: s_mov_b64 s[2:3], 0
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s0, s0, 16
-; CI-NEXT: s_addc_u32 s1, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_atomic_dec v[0:1], v2
+; CI-NEXT: s_add_u32 s4, s0, 16
+; CI-NEXT: s_load_dword s0, s[0:1], 0x4
+; CI-NEXT: s_addc_u32 s5, s1, 0
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: v_mov_b32_e32 v1, s5
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v3, s0
+; CI-NEXT: .LBB9_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; CI-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v3
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_cndmask_b32_e64 v2, v2, 42, s[0:1]
+; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; CI-NEXT: v_mov_b32_e32 v3, v2
+; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; CI-NEXT: s_cbranch_execnz .LBB9_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_noret_i32_offset_system:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; VI-NEXT: v_mov_b32_e32 v2, 42
+; VI-NEXT: s_mov_b64 s[2:3], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s0, s0, 16
-; VI-NEXT: s_addc_u32 s1, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_atomic_dec v[0:1], v2
+; VI-NEXT: s_add_u32 s4, s0, 16
+; VI-NEXT: s_load_dword s0, s[0:1], 0x10
+; VI-NEXT: s_addc_u32 s5, s1, 0
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: v_mov_b32_e32 v1, s5
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s0
+; VI-NEXT: .LBB9_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; VI-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v3
+; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; VI-NEXT: v_cndmask_b32_e64 v2, v2, 42, s[0:1]
+; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; VI-NEXT: v_mov_b32_e32 v3, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; VI-NEXT: s_cbranch_execnz .LBB9_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX9-NEXT: v_mov_b32_e32 v0, 42
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
+; GFX9-NEXT: s_load_dword s0, s[2:3], 0x10
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v1
+; GFX9-NEXT: v_subrev_u32_e32 v0, 1, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 42, s[0:1]
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_execnz .LBB9_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX10: ; %bb.0:
-; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v0, 42
-; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_mov_b32 s1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
-; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_load_dword s0, s[2:3], 0x10
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v1, s0
+; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX10-NEXT: v_cmp_lt_u32_e64 s0, 42, v1
+; GFX10-NEXT: v_subrev_nc_u32_e32 v0, 1, v1
+; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 42, s0
+; GFX10-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: buffer_gl1_inv
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX10-NEXT: v_mov_b32_e32 v1, v0
+; GFX10-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
+; GFX10-NEXT: s_cbranch_execnz .LBB9_1
+; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX11: ; %bb.0:
-; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
+; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_atomic_dec_u32 v1, v0, s[0:1] offset:16
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_load_b32 s0, s[2:3], 0x10
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s0
+; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cmp_lt_u32_e64 s0, 42, v1
+; GFX11-NEXT: v_subrev_nc_u32_e32 v0, 1, v1
+; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 42, s0
+; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[2:3] offset:16 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: buffer_gl1_inv
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
+; GFX11-NEXT: v_mov_b32_e32 v1, v0
+; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
@@ -1052,82 +1227,165 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(ptr %out, ptr %ptr) #1
define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %ptr) #1 {
; CI-LABEL: flat_atomic_dec_ret_i32_offset_system:
; CI: ; %bb.0:
-; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; CI-NEXT: v_mov_b32_e32 v2, 42
+; CI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; CI-NEXT: s_mov_b64 s[2:3], 0
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s2, s2, 16
-; CI-NEXT: s_addc_u32 s3, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: s_add_u32 s0, s6, 16
+; CI-NEXT: s_addc_u32 s1, s7, 0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: flat_load_dword v2, v[0:1]
+; CI-NEXT: .LBB14_1: ; %atomicrmw.start
+; CI-NEXT: ; =>This Inner Loop Header: Depth=1
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: v_mov_b32_e32 v3, v2
+; CI-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; CI-NEXT: v_cmp_lt_u32_e64 s[0:1], 42, v3
+; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; CI-NEXT: v_cndmask_b32_e64 v2, v2, 42, s[0:1]
+; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CI-NEXT: buffer_wbinvl1_vol
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
+; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; CI-NEXT: s_cbranch_execnz .LBB14_1
+; CI-NEXT: ; %bb.2: ; %atomicrmw.end
+; CI-NEXT: s_or_b64 exec, exec, s[2:3]
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: v_mov_b32_e32 v1, s5
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_dec_ret_i32_offset_system:
; VI: ; %bb.0:
-; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
-; VI-NEXT: v_mov_b32_e32 v2, 42
+; VI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
+; VI-NEXT: s_mov_b64 s[2:3], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s2, s2, 16
-; VI-NEXT: s_addc_u32 s3, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: buffer_wbinv...
[truncated]
|
System scope atomics need to use cmpxchg loops. aea5980 started this, this expands the set to cover the remaining integer operations. Don't expand xchg and add, those theoretically should work over PCIe.
Reposted as #122137 |
System scope atomics need to use cmpxchg
loops. aea5980 started this, this expands the set to cover the remaining integer operations.
Don't expand xchg and add, those theoretically should work over PCIe.