Skip to content

[AMDGPU] Remove AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough #71407

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,20 +148,6 @@ void AMDGPUAsmPrinter::emitEndOfAsmFile(Module &M) {
}
}

bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough(
const MachineBasicBlock *MBB) const {
if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB))
return false;

if (MBB->empty())
return true;

// If this is a block implementing a long branch, an expression relative to
// the start of the block is needed. to the start of the block.
// XXX - Is there a smarter way to check this?
return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64);
}

void AMDGPUAsmPrinter::emitFunctionBodyStart() {
const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,6 @@ class AMDGPUAsmPrinter final : public AsmPrinter {

void emitEndOfAsmFile(Module &M) override;

bool isBlockOnlyReachableByFallthrough(
const MachineBasicBlock *MBB) const override;

bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
const char *ExtraCode, raw_ostream &O) override;

Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ define amdgpu_kernel void @spill(ptr addrspace(1) %arg, i32 %cnd) #0 {
; CHECK-NEXT: s_mov_b32 vcc_hi, 0
; CHECK-NEXT: ;;#ASMEND
; CHECK-NEXT: s_cbranch_scc0 .LBB0_1
; CHECK-NEXT: .LBB0_3: ; %entry
; CHECK-NEXT: ; %bb.3: ; %entry
; CHECK-NEXT: s_not_b64 exec, exec
; CHECK-NEXT: buffer_store_dword v0, off, s[96:99], 0
; CHECK-NEXT: v_writelane_b32 v0, s0, 0
Expand Down Expand Up @@ -1290,7 +1290,7 @@ define void @spill_func(ptr addrspace(1) %arg) #0 {
; CHECK-NEXT: s_mov_b32 vcc_hi, 0
; CHECK-NEXT: ;;#ASMEND
; CHECK-NEXT: s_cbranch_scc0 .LBB1_1
; CHECK-NEXT: .LBB1_3: ; %entry
; CHECK-NEXT: ; %bb.3: ; %entry
; CHECK-NEXT: s_not_b64 exec, exec
; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:8
; CHECK-NEXT: v_writelane_b32 v2, s0, 0
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# block as the branch expansion.

# GCN-LABEL: long_branch_dbg_value:
# GCN: .LBB0_5: ; %bb
# GCN: ; %bb.5: ; %bb
# GCN-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- [DW_OP_plus_uconst 12, DW_OP_stack_value]
# GCN-NEXT: .loc 1 0 42 is_stmt 0 ; /tmp/test_debug_value.cl:0:42
# GCN-NEXT: s_getpc_b64 s[[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]]
Expand Down
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_cmp_eq_u32 s2, 0
; GCN-NEXT: s_cbranch_scc0 .LBB1_1
; GCN-NEXT: .LBB1_3: ; %bb0
; GCN-NEXT: ; %bb.3: ; %bb0
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc0:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB1_2-.Lpost_getpc0)&4294967295
Expand Down Expand Up @@ -115,7 +115,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr
; GCN-NEXT: v_cmp_eq_f32_e64 s[4:5], s2, 0
; GCN-NEXT: s_and_b64 vcc, exec, s[4:5]
; GCN-NEXT: s_cbranch_vccz .LBB2_1
; GCN-NEXT: .LBB2_3: ; %bb0
; GCN-NEXT: ; %bb.3: ; %bb0
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc1:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB2_2-.Lpost_getpc1)&4294967295
Expand Down Expand Up @@ -172,7 +172,7 @@ define amdgpu_kernel void @min_long_forward_vbranch(ptr addrspace(1) %arg) #0 {
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GCN-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GCN-NEXT: s_cbranch_execnz .LBB3_1
; GCN-NEXT: .LBB3_3: ; %bb
; GCN-NEXT: ; %bb.3: ; %bb
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc2:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB3_2-.Lpost_getpc2)&4294967295
Expand Down Expand Up @@ -228,7 +228,7 @@ define amdgpu_kernel void @long_backward_sbranch(ptr addrspace(1) %arg) #0 {
; GCN-NEXT: v_nop_e64
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_cbranch_scc0 .LBB4_2
; GCN-NEXT: .LBB4_3: ; %bb2
; GCN-NEXT: ; %bb.3: ; %bb2
; GCN-NEXT: ; in Loop: Header=BB4_1 Depth=1
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: .Lpost_getpc3:
Expand Down Expand Up @@ -266,7 +266,7 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add
; GCN-NEXT: s_cmp_eq_u32 s2, 0
; GCN-NEXT: s_mov_b64 s[2:3], -1
; GCN-NEXT: s_cbranch_scc0 .LBB5_1
; GCN-NEXT: .LBB5_7: ; %bb0
; GCN-NEXT: ; %bb.7: ; %bb0
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: .Lpost_getpc5:
; GCN-NEXT: s_add_u32 s2, s2, (.LBB5_4-.Lpost_getpc5)&4294967295
Expand Down Expand Up @@ -299,7 +299,7 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add
; GCN-NEXT: v_nop_e64
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_cbranch_execnz .LBB5_5
; GCN-NEXT: .LBB5_9: ; %bb3
; GCN-NEXT: ; %bb.9: ; %bb3
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: .Lpost_getpc6:
; GCN-NEXT: s_add_u32 s2, s2, (.LBB5_2-.Lpost_getpc6)&4294967295
Expand Down Expand Up @@ -347,7 +347,7 @@ define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(ptr ad
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_mov_b64 vcc, vcc
; GCN-NEXT: s_cbranch_vccz .LBB6_2
; GCN-NEXT: .LBB6_3: ; %loop
; GCN-NEXT: ; %bb.3: ; %loop
; GCN-NEXT: ; in Loop: Header=BB6_1 Depth=1
; GCN-NEXT: s_getpc_b64 s[0:1]
; GCN-NEXT: .Lpost_getpc7:
Expand Down Expand Up @@ -389,7 +389,7 @@ define amdgpu_kernel void @expand_requires_expand(i32 %cond0) #0 {
; GCN-NEXT: .LBB7_2: ; %Flow
; GCN-NEXT: s_andn2_b64 vcc, exec, s[0:1]
; GCN-NEXT: s_cbranch_vccz .LBB7_3
; GCN-NEXT: .LBB7_5: ; %Flow
; GCN-NEXT: ; %bb.5: ; %Flow
; GCN-NEXT: s_getpc_b64 s[0:1]
; GCN-NEXT: .Lpost_getpc8:
; GCN-NEXT: s_add_u32 s0, s0, (.LBB7_4-.Lpost_getpc8)&4294967295
Expand Down Expand Up @@ -446,7 +446,7 @@ define amdgpu_kernel void @uniform_inside_divergent(ptr addrspace(1) %out, i32 %
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, 16, v0
; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT: s_cbranch_execnz .LBB8_1
; GCN-NEXT: .LBB8_4: ; %entry
; GCN-NEXT: ; %bb.4: ; %entry
; GCN-NEXT: s_getpc_b64 s[0:1]
; GCN-NEXT: .Lpost_getpc9:
; GCN-NEXT: s_add_u32 s0, s0, (.LBB8_3-.Lpost_getpc9)&4294967295
Expand Down Expand Up @@ -512,7 +512,7 @@ define amdgpu_kernel void @analyze_mask_branch() #0 {
; GCN-NEXT: .LBB9_2: ; %Flow1
; GCN-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GCN-NEXT: s_cbranch_execnz .LBB9_3
; GCN-NEXT: .LBB9_6: ; %Flow1
; GCN-NEXT: ; %bb.6: ; %Flow1
; GCN-NEXT: s_getpc_b64 s[0:1]
; GCN-NEXT: .Lpost_getpc10:
; GCN-NEXT: s_add_u32 s0, s0, (.LBB9_5-.Lpost_getpc10)&4294967295
Expand All @@ -534,7 +534,7 @@ define amdgpu_kernel void @analyze_mask_branch() #0 {
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_mov_b64 vcc, vcc
; GCN-NEXT: s_cbranch_vccnz .LBB9_5
; GCN-NEXT: .LBB9_8: ; %loop
; GCN-NEXT: ; %bb.8: ; %loop
; GCN-NEXT: ; in Loop: Header=BB9_4 Depth=1
; GCN-NEXT: s_getpc_b64 s[0:1]
; GCN-NEXT: .Lpost_getpc11:
Expand Down Expand Up @@ -580,7 +580,7 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32
; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_cmp_lt_i32 s7, 6
; GCN-NEXT: s_cbranch_scc1 .LBB10_1
; GCN-NEXT: .LBB10_8: ; %bb
; GCN-NEXT: ; %bb.8: ; %bb
; GCN-NEXT: s_getpc_b64 s[8:9]
; GCN-NEXT: .Lpost_getpc12:
; GCN-NEXT: s_add_u32 s8, s8, (.LBB10_2-.Lpost_getpc12)&4294967295
Expand All @@ -606,7 +606,7 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32
; GCN-NEXT: .LBB10_4: ; %Flow5
; GCN-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-NEXT: s_cbranch_vccz .LBB10_5
; GCN-NEXT: .LBB10_10: ; %Flow5
; GCN-NEXT: ; %bb.10: ; %Flow5
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: .Lpost_getpc13:
; GCN-NEXT: s_add_u32 s2, s2, (.LBB10_6-.Lpost_getpc13)&4294967295
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ declare void @llvm.amdgcn.s.sleep(i32) #0
; GCN-LABEL: {{^}}branch_offset_test:
; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0
; GCN-NEXT: s_cbranch_scc0 [[BB2:.LBB[0-9]+_[0-9]+]]
; GCN-NEXT: .LBB{{[0-9]+}}_{{[0-9]+}}: ; %bb
; GCN-NEXT: ; %bb.3: ; %bb
; GCN-NEXT: s_getpc_b64 s[[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]]
; GCN-NEXT: [[POST_GETPC:.Lpost_getpc[0-9]+]]:{{$}}
; GCN-NEXT: s_add_u32 s[[PC_LO]], s[[PC_LO]], ([[BB3:.LBB[0-9]+_[0-9]+]]-[[POST_GETPC]])&4294967295
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AMDGPU/long-branch-reserve-register.ll
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_cmp_eq_u32 s2, 0
; GCN-NEXT: s_cbranch_scc0 .LBB1_1
; GCN-NEXT: .LBB1_3: ; %bb0
; GCN-NEXT: ; %bb.3: ; %bb0
; GCN-NEXT: s_getpc_b64 s[8:9]
; GCN-NEXT: .Lpost_getpc0:
; GCN-NEXT: s_add_u32 s8, s8, (.LBB1_2-.Lpost_getpc0)&4294967295
Expand Down Expand Up @@ -107,7 +107,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr
; GCN-NEXT: v_cmp_eq_f32_e64 s[4:5], s2, 0
; GCN-NEXT: s_and_b64 vcc, exec, s[4:5]
; GCN-NEXT: s_cbranch_vccz .LBB2_1
; GCN-NEXT: .LBB2_3: ; %bb0
; GCN-NEXT: ; %bb.3: ; %bb0
; GCN-NEXT: s_getpc_b64 s[8:9]
; GCN-NEXT: .Lpost_getpc1:
; GCN-NEXT: s_add_u32 s8, s8, (.LBB2_2-.Lpost_getpc1)&4294967295
Expand Down Expand Up @@ -164,7 +164,7 @@ define amdgpu_kernel void @min_long_forward_vbranch(ptr addrspace(1) %arg) #0 {
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GCN-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GCN-NEXT: s_cbranch_execnz .LBB3_1
; GCN-NEXT: .LBB3_3: ; %bb
; GCN-NEXT: ; %bb.3: ; %bb
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc2:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB3_2-.Lpost_getpc2)&4294967295
Expand Down Expand Up @@ -220,7 +220,7 @@ define amdgpu_kernel void @long_backward_sbranch(ptr addrspace(1) %arg) #0 {
; GCN-NEXT: v_nop_e64
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_cbranch_scc0 .LBB4_2
; GCN-NEXT: .LBB4_3: ; %bb2
; GCN-NEXT: ; %bb.3: ; %bb2
; GCN-NEXT: ; in Loop: Header=BB4_1 Depth=1
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: .Lpost_getpc3:
Expand Down Expand Up @@ -259,7 +259,7 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add
; GCN-NEXT: s_cmp_eq_u32 s2, 0
; GCN-NEXT: s_mov_b64 s[2:3], -1
; GCN-NEXT: s_cbranch_scc0 .LBB5_1
; GCN-NEXT: .LBB5_7: ; %bb0
; GCN-NEXT: ; %bb.7: ; %bb0
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc5:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB5_4-.Lpost_getpc5)&4294967295
Expand Down Expand Up @@ -293,7 +293,7 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add
; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_mov_b64 vcc, exec
; GCN-NEXT: s_cbranch_execnz .LBB5_5
; GCN-NEXT: .LBB5_9: ; %bb3
; GCN-NEXT: ; %bb.9: ; %bb3
; GCN-NEXT: s_getpc_b64 s[4:5]
; GCN-NEXT: .Lpost_getpc6:
; GCN-NEXT: s_add_u32 s4, s4, (.LBB5_2-.Lpost_getpc6)&4294967295
Expand Down