Skip to content

[AMDGPU] Fix undefined scc register in successor block of SI_KILL terminators #134718

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions llvm/lib/CodeGen/FinalizeISel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ static std::pair<bool, bool> runImpl(MachineFunction &MF) {
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();

TLI->finalizeLowering(MF);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you really need this reordering? I'd expect this to be called after the custom insertions

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we need this reordering. Without this reordering, llvm::addLiveIns() invoked from MachineBasicBlock::splitAt() triggers an assertion failure due to the check for reservedRegsFrozen() when reserved registers are not yet finalized.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This API is bad. We probably should split these into separate steps, but given no tests apparently rely on this order I guess this is fine for now


// Iterate through each instruction in the function, looking for pseudos.
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
Expand Down Expand Up @@ -74,9 +76,6 @@ static std::pair<bool, bool> runImpl(MachineFunction &MF) {
}
}
}

TLI->finalizeLowering(MF);

return {Changed, PreserveCFG};
}

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4513,7 +4513,7 @@ Register SITargetLowering::getRegisterByName(const char *RegName, LLT VT,
MachineBasicBlock *
SITargetLowering::splitKillBlock(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/);
MachineBasicBlock *SplitBB = BB->splitAt(MI, /*UpdateLiveIns=*/true);
const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode()));
return SplitBB;
Expand Down
73 changes: 73 additions & 0 deletions llvm/test/CodeGen/AMDGPU/finalize-isel-kill-scc-vcc.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -run-pass finalize-isel -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs %s -o - | FileCheck %s
---
name: phi_use_def_before_kill
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: phi_use_def_before_kill
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: liveins: $sgpr0, $sgpr1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216
; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, killed [[S_MOV_B32_]], 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
; CHECK-NEXT: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = nofpexcept V_CMP_GT_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[S_MOV_B32_1]], 0, implicit $mode, implicit $exec
; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 -1082130432
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_2]]
; CHECK-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[S_MOV_B32_1]], 0, [[COPY2]], killed [[V_CMP_GT_F32_e64_]], implicit $exec
; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY [[V_CNDMASK_B32_e64_]]
; CHECK-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; CHECK-NEXT: S_CMP_LG_U32 [[COPY]], killed [[S_MOV_B32_3]], implicit-def $scc
; CHECK-NEXT: SI_KILL_F32_COND_IMM_TERMINATOR [[V_ADD_F32_e64_]], 0, 2, implicit-def $vcc_lo, implicit $exec
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $vcc_lo, $scc
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: S_CBRANCH_SCC1 %bb.1, implicit $scc
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc_lo
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
; CHECK-NEXT: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32 = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, killed [[S_MOV_B32_4]], 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_ENDPGM 0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
; CHECK-NEXT: [[V_CMP_EQ_F32_e64_1:%[0-9]+]]:sreg_32 = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, killed [[S_MOV_B32_5]], 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_ENDPGM 0

bb.0:
liveins: $sgpr0, $sgpr1
%3:sgpr_32 = COPY $sgpr1
%2:sgpr_32 = COPY $sgpr0
%5:sgpr_32 = S_MOV_B32 1065353216
%6:vgpr_32 = nofpexcept V_ADD_F32_e64 0, %2:sgpr_32, 0, killed %5:sgpr_32, 0, 0, implicit $mode, implicit $exec
%7:sgpr_32 = S_MOV_B32 0
%8:sreg_32_xm0_xexec = nofpexcept V_CMP_GT_F32_e64 0, %6:vgpr_32, 0, %7:sgpr_32, 0, implicit $mode, implicit $exec
%9:sgpr_32 = S_MOV_B32 -1082130432
%11:vgpr_32 = COPY killed %9:sgpr_32
%10:vgpr_32 = V_CNDMASK_B32_e64 0, %7:sgpr_32, 0, %11:vgpr_32, killed %8:sreg_32_xm0_xexec, implicit $exec
%0:sgpr_32 = COPY %10:vgpr_32
%12:sreg_32 = S_MOV_B32 0
S_CMP_LG_U32 %3:sgpr_32, killed %12:sreg_32, implicit-def $scc
SI_KILL_F32_COND_IMM_PSEUDO %6:vgpr_32, 0, 2, implicit-def $vcc, implicit $exec
S_CBRANCH_SCC1 %bb.1, implicit $scc
S_CBRANCH_VCCNZ %bb.2, implicit $vcc
S_BRANCH %bb.2

bb.1:
%13:sgpr_32 = S_MOV_B32 0
%14:sreg_32 = nofpexcept V_CMP_EQ_F32_e64 0, %3:sgpr_32, 0, killed %13:sgpr_32, 0, implicit $mode, implicit $exec
S_ENDPGM 0

bb.2:
%15:sgpr_32 = S_MOV_B32 0
%16:sreg_32 = nofpexcept V_CMP_EQ_F32_e64 0, %3:sgpr_32, 0, killed %15:sgpr_32, 0, implicit $mode, implicit $exec
S_ENDPGM 0

...
159 changes: 159 additions & 0 deletions llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1956,6 +1956,165 @@ bb.1:
ret void
}

define amdgpu_ps void @scc_use_after_kill_inst(float inreg %x, i32 inreg %y) #0 {
; SI-LABEL: scc_use_after_kill_inst:
; SI: ; %bb.0: ; %bb
; SI-NEXT: v_add_f32_e64 v1, s0, 1.0
; SI-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1
; SI-NEXT: s_mov_b64 s[2:3], exec
; SI-NEXT: s_cmp_lg_u32 s1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v1
; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], vcc
; SI-NEXT: s_cbranch_scc0 .LBB17_6
; SI-NEXT: ; %bb.1: ; %bb
; SI-NEXT: s_andn2_b64 exec, exec, vcc
; SI-NEXT: s_cbranch_scc0 .LBB17_3
; SI-NEXT: ; %bb.2: ; %bb8
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, 4.0
; SI-NEXT: .LBB17_3: ; %phibb
; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_vccz .LBB17_5
; SI-NEXT: ; %bb.4: ; %bb10
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: .LBB17_5: ; %end
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB17_6:
; SI-NEXT: s_mov_b64 exec, 0
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-WAVE64-LABEL: scc_use_after_kill_inst:
; GFX10-WAVE64: ; %bb.0: ; %bb
; GFX10-WAVE64-NEXT: v_add_f32_e64 v1, s0, 1.0
; GFX10-WAVE64-NEXT: s_mov_b64 s[2:3], exec
; GFX10-WAVE64-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-WAVE64-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1
; GFX10-WAVE64-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
; GFX10-WAVE64-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v1
; GFX10-WAVE64-NEXT: s_andn2_b64 s[2:3], s[2:3], vcc
; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB17_6
; GFX10-WAVE64-NEXT: ; %bb.1: ; %bb
; GFX10-WAVE64-NEXT: s_andn2_b64 exec, exec, vcc
; GFX10-WAVE64-NEXT: s_cbranch_scc0 .LBB17_3
; GFX10-WAVE64-NEXT: ; %bb.2: ; %bb8
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v1, 8
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v0, 4.0
; GFX10-WAVE64-NEXT: global_store_dword v[0:1], v1, off
; GFX10-WAVE64-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-WAVE64-NEXT: .LBB17_3: ; %phibb
; GFX10-WAVE64-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; GFX10-WAVE64-NEXT: s_cbranch_vccz .LBB17_5
; GFX10-WAVE64-NEXT: ; %bb.4: ; %bb10
; GFX10-WAVE64-NEXT: v_mov_b32_e32 v0, 9
; GFX10-WAVE64-NEXT: global_store_dword v[0:1], v0, off
; GFX10-WAVE64-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-WAVE64-NEXT: .LBB17_5: ; %end
; GFX10-WAVE64-NEXT: s_endpgm
; GFX10-WAVE64-NEXT: .LBB17_6:
; GFX10-WAVE64-NEXT: s_mov_b64 exec, 0
; GFX10-WAVE64-NEXT: exp null off, off, off, off done vm
; GFX10-WAVE64-NEXT: s_endpgm
;
; GFX10-WAVE32-LABEL: scc_use_after_kill_inst:
; GFX10-WAVE32: ; %bb.0: ; %bb
; GFX10-WAVE32-NEXT: v_add_f32_e64 v1, s0, 1.0
; GFX10-WAVE32-NEXT: s_mov_b32 s2, exec_lo
; GFX10-WAVE32-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-WAVE32-NEXT: v_cmp_lt_f32_e32 vcc_lo, 0, v1
; GFX10-WAVE32-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc_lo
; GFX10-WAVE32-NEXT: v_cmp_nlt_f32_e32 vcc_lo, 0, v1
; GFX10-WAVE32-NEXT: s_andn2_b32 s2, s2, vcc_lo
; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB17_6
; GFX10-WAVE32-NEXT: ; %bb.1: ; %bb
; GFX10-WAVE32-NEXT: s_andn2_b32 exec_lo, exec_lo, vcc_lo
; GFX10-WAVE32-NEXT: s_cbranch_scc0 .LBB17_3
; GFX10-WAVE32-NEXT: ; %bb.2: ; %bb8
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v1, 8
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v0, 4.0
; GFX10-WAVE32-NEXT: global_store_dword v[0:1], v1, off
; GFX10-WAVE32-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-WAVE32-NEXT: .LBB17_3: ; %phibb
; GFX10-WAVE32-NEXT: v_cmp_eq_f32_e32 vcc_lo, 0, v0
; GFX10-WAVE32-NEXT: s_cbranch_vccz .LBB17_5
; GFX10-WAVE32-NEXT: ; %bb.4: ; %bb10
; GFX10-WAVE32-NEXT: v_mov_b32_e32 v0, 9
; GFX10-WAVE32-NEXT: global_store_dword v[0:1], v0, off
; GFX10-WAVE32-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-WAVE32-NEXT: .LBB17_5: ; %end
; GFX10-WAVE32-NEXT: s_endpgm
; GFX10-WAVE32-NEXT: .LBB17_6:
; GFX10-WAVE32-NEXT: s_mov_b32 exec_lo, 0
; GFX10-WAVE32-NEXT: exp null off, off, off, off done vm
; GFX10-WAVE32-NEXT: s_endpgm
;
; GFX11-LABEL: scc_use_after_kill_inst:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: v_add_f32_e64 v1, s0, 1.0
; GFX11-NEXT: s_mov_b64 s[2:3], exec
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v1
; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], vcc
; GFX11-NEXT: s_cbranch_scc0 .LBB17_6
; GFX11-NEXT: ; %bb.1: ; %bb
; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
; GFX11-NEXT: ; %bb.2: ; %bb8
; GFX11-NEXT: v_mov_b32_e32 v1, 8
; GFX11-NEXT: v_mov_b32_e32 v0, 4.0
; GFX11-NEXT: global_store_b32 v[0:1], v1, off dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: .LBB17_3: ; %phibb
; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; GFX11-NEXT: s_cbranch_vccz .LBB17_5
; GFX11-NEXT: ; %bb.4: ; %bb10
; GFX11-NEXT: v_mov_b32_e32 v0, 9
; GFX11-NEXT: global_store_b32 v[0:1], v0, off dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: .LBB17_5: ; %end
; GFX11-NEXT: s_endpgm
; GFX11-NEXT: .LBB17_6:
; GFX11-NEXT: s_mov_b64 exec, 0
; GFX11-NEXT: exp mrt0 off, off, off, off done
; GFX11-NEXT: s_endpgm
bb:
%tmp = fadd float %x, 1.000000e+00
%tmp1 = fcmp olt float 0.000000e+00, %tmp
%tmp2 = select i1 %tmp1, float -1.000000e+00, float 0.000000e+00
%cmp.tmp2 = fcmp olt float %tmp2, 0.000000e+00
%uniform.cond = icmp eq i32 %y, 0
call void @llvm.amdgcn.kill(i1 %cmp.tmp2)
br i1 %uniform.cond, label %phibb, label %bb8

phibb: ; preds = %bb8, %bb
%tmp5 = phi float [ %tmp2, %bb ], [ 4.000000e+00, %bb8 ]
%tmp6 = fcmp oeq float %tmp5, 0.000000e+00
br i1 %tmp6, label %bb10, label %end

bb8: ; preds = %bb
store volatile i32 8, ptr addrspace(1) poison, align 4
br label %phibb

bb10: ; preds = %phibb
store volatile i32 9, ptr addrspace(1) poison, align 4
br label %end

end: ; preds = %bb10, %phibb
ret void
}

declare void @llvm.amdgcn.exp.f32(i32 immarg, i32 immarg, float, float, float, float, i1 immarg, i1 immarg) #3
declare float @llvm.amdgcn.image.sample.l.2darray.f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #1
declare <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
Expand Down
Loading