Skip to content

AMDGPU: Default to selecting frame indexes to SGPRs #115060

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged

Conversation

arsenm
Copy link
Contributor

@arsenm arsenm commented Nov 5, 2024

Only select to a VGPR if it's trivally used in VGPR only contexts.
This fixes mishandling frame indexes used in SGPR only contexts,
like inline assembly constraints.

This is suboptimal in the common case where the frame index
is transitively used by only VALU ops. We make up for this by later
folding the copy to VALU plus scalar op in SIFoldOperands.

Copy link
Contributor Author

arsenm commented Nov 5, 2024

@llvmbot
Copy link
Member

llvmbot commented Nov 5, 2024

@llvm/pr-subscribers-backend-amdgpu

Author: Matt Arsenault (arsenm)

Changes

Only select to a VGPR if it's trivally used in VGPR only contexts.
This fixes mishandling frame indexes used in SGPR only contexts,
like inline assembly constraints.

This is suboptimal in the common case where the frame index
is transitively used by only VALU ops. We make up for this by later
folding the copy to VALU plus scalar op in SIFoldOperands.


Patch is 147.55 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/115060.diff

19 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/SIInstructions.td (+4-1)
  • (modified) llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll (+3-2)
  • (modified) llvm/test/CodeGen/AMDGPU/captured-frame-index.ll (+31-41)
  • (modified) llvm/test/CodeGen/AMDGPU/commute-compares.ll (+2-2)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll (+55-67)
  • (modified) llvm/test/CodeGen/AMDGPU/flat-scratch.ll (+95-60)
  • (modified) llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll (+21-26)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll (+36-36)
  • (modified) llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll (+36-36)
  • (modified) llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll (+12-12)
  • (modified) llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll (+6-8)
  • (modified) llvm/test/CodeGen/AMDGPU/large-alloca-compute.ll (+1-1)
  • (modified) llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll (+50-40)
  • (modified) llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll (+111-133)
  • (modified) llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll (+390-501)
  • (modified) llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll (+4-4)
  • (modified) llvm/test/CodeGen/AMDGPU/required-export-priority.ll (+2-2)
  • (modified) llvm/test/CodeGen/AMDGPU/scratch-buffer.ll (+9-5)
  • (modified) llvm/test/CodeGen/AMDGPU/scratch-simple.ll (+16-11)
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index c8a46217190a1d..423d63931a4755 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2175,8 +2175,11 @@ foreach vt = [i32, p3, p5, p6, p2] in {
   >;
 }
 
+// FIXME: The register bank of the frame index should depend on the
+// users, and transitive users of the add. We may require an
+// unnecessary copy from SGPR to VGPR.
 def : GCNPat <
-  (p5 frameindex:$fi),
+  (VGPRImm<(p5 frameindex)>:$fi),
   (V_MOV_B32_e32 (p5 (frameindex_to_targetframeindex $fi)))
 >;
 
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll b/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
index 5889af70a8f092..c1a957dec3e867 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll
@@ -364,9 +364,10 @@ entry:
 
 ; FUNC-LABEL: ptrtoint:
 ; SI-NOT: ds_write
+; SI: s_add_i32 [[S_ADD_OFFSET:s[0-9]+]], s{{[0-9]+}}, 5
 ; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen
-; SI: v_add_{{[iu]}}32_e32 [[ADD_OFFSET:v[0-9]+]], vcc, 5,
-; SI: buffer_load_dword v{{[0-9]+}}, [[ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offen ;
+; SI: v_mov_b32_e32 [[V_ADD_OFFSET:v[0-9]+]], [[S_ADD_OFFSET]]
+; SI: buffer_load_dword v{{[0-9]+}}, [[V_ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offen ;
 define amdgpu_kernel void @ptrtoint(ptr addrspace(1) %out, i32 %a, i32 %b) #0 {
   %alloca = alloca [16 x i32], addrspace(5)
   %tmp0 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %a
diff --git a/llvm/test/CodeGen/AMDGPU/captured-frame-index.ll b/llvm/test/CodeGen/AMDGPU/captured-frame-index.ll
index ca0c669056ee33..2ec4c074a892dc 100644
--- a/llvm/test/CodeGen/AMDGPU/captured-frame-index.ll
+++ b/llvm/test/CodeGen/AMDGPU/captured-frame-index.ll
@@ -147,19 +147,14 @@ define amdgpu_kernel void @stored_fi_to_global_2_small_objects(ptr addrspace(1)
 
 ; GCN-LABEL: {{^}}kernel_stored_fi_to_global_huge_frame_offset:
 ; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
-; GCN: buffer_store_dword [[BASE_0]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4{{$}}
 
-; FIXME: Re-initialize
-; GCN: v_mov_b32_e32 [[BASE_0_1:v[0-9]+]], 4{{$}}
+; GCN: buffer_store_dword [[BASE_0]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4{{$}}
 
 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
-; GCN-DAG: v_add_i32_e32 [[BASE_1_OFF_1:v[0-9]+]], vcc, 0x3ffc, [[BASE_0_1]]
-
+; GCN-DAG: v_mov_b32_e32 [[V_BASE_1_OFF:v[0-9]+]], 0x4000{{$}}
+; GCN: buffer_store_dword [[K]], [[V_BASE_1_OFF]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
 
-; GCN: v_add_i32_e32 [[BASE_1_OFF_2:v[0-9]+]], vcc, 56, [[BASE_0_1]]
-; GCN: buffer_store_dword [[K]], [[BASE_1_OFF_1]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
-
-; GCN: buffer_store_dword [[BASE_1_OFF_2]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; GCN: buffer_store_dword [[V_BASE_1_OFF]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 define amdgpu_kernel void @kernel_stored_fi_to_global_huge_frame_offset(ptr addrspace(1) %ptr) #0 {
   %tmp0 = alloca [4096 x i32], addrspace(5)
   %tmp1 = alloca [4096 x i32], addrspace(5)
@@ -171,20 +166,20 @@ define amdgpu_kernel void @kernel_stored_fi_to_global_huge_frame_offset(ptr addr
   ret void
 }
 
+; FIXME: Shift of SP repeated twice
 ; GCN-LABEL: {{^}}func_stored_fi_to_global_huge_frame_offset:
-; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
+; GCN-DAG: v_lshr_b32_e64 [[FI_TMP_0:v[0-9]+]], s32, 6
+; GCN-DAG: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
 ; GCN: buffer_store_dword [[BASE_0]], off, s{{\[[0-9]+:[0-9]+\]}}, s32 offset:4{{$}}
 
-; GCN: v_lshr_b32_e64 [[FI_TMP:v[0-9]+]], s32, 6
-; GCN: v_add_i32_e32 [[BASE_0_1:v[0-9]+]], vcc, 4, [[FI_TMP]]{{$}}
 
+; GCN-DAG: v_add_i32_e32 [[FI_0:v[0-9]+]], vcc, 0x4000, [[FI_TMP_0]]{{$}}
 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
-; GCN-DAG: v_add_i32_e32 [[BASE_1_OFF_1:v[0-9]+]], vcc, 0x3ffc, [[BASE_0_1]]
 
-; GCN: v_add_i32_e32 [[BASE_1_OFF_2:v[0-9]+]], vcc, 56, [[BASE_0_1]]
-; GCN: buffer_store_dword [[K]], [[BASE_1_OFF_1]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
-
-; GCN: buffer_store_dword [[BASE_1_OFF_2]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
+; GCN: buffer_store_dword [[K]], [[FI_0]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
+; GCN: v_lshr_b32_e64 [[FI_TMP_1:v[0-9]+]], s32, 6
+; GCN: v_add_i32_e32 [[BASE_0_1:v[0-9]+]], vcc, 60, [[FI_TMP_1]]{{$}}
+; GCN: buffer_store_dword [[BASE_0_1]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
 define void @func_stored_fi_to_global_huge_frame_offset(ptr addrspace(1) %ptr) #0 {
   %tmp0 = alloca [4096 x i32], addrspace(5)
   %tmp1 = alloca [4096 x i32], addrspace(5)
@@ -217,9 +212,9 @@ entry:
   ret void
 }
 
-; FIXME: This is broken, and the sgpr input just gets replaced with a VGPR
 ; GCN-LABEL: {{^}}func_alloca_offset0__use_asm_sgpr:
-; GCN: v_lshr_b32_e64 [[FI:v[0-9]+]], s32, 6
+; GCN: s_lshr_b32 [[FI:s[0-9]+]], s32, 6
+; GCN-NOT: [[FI]]
 ; GCN: ; use [[FI]]
 define void @func_alloca_offset0__use_asm_sgpr() {
   %alloca = alloca i32, addrspace(5)
@@ -238,9 +233,9 @@ define void @func_alloca_offset0__use_asm_vgpr() {
 }
 
 ; GCN-LABEL: {{^}}func_alloca_offset0__use_asm_phys_sgpr:
-; GCN: s_lshr_b32 s8, s32, 6
+; GCN: s_lshr_b32 [[FI:s[0-9]+]], s32, 6
 ; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: ; use s8
+; GCN-NEXT: ; use [[FI]]
 define void @func_alloca_offset0__use_asm_phys_sgpr() {
   %alloca = alloca i32, addrspace(5)
   call void asm sideeffect "; use $0", "{s8}"(ptr addrspace(5) %alloca)
@@ -258,12 +253,11 @@ define void @func_alloca_offset0__use_asm_phys_vgpr() {
 }
 
 ; GCN-LABEL: {{^}}func_alloca_offset_use_asm_sgpr:
-; GCN: v_lshr_b32_e64 [[FI0_TMP0:v[0-9]+]], s32, 6
-; GCN-NEXT: v_add_i32_e32 [[FI0:v[0-9]+]], vcc, 16, [[FI0_TMP0]]
+; GCN: s_lshr_b32 [[FI0_TMP0:s[0-9]+]], s32, 6
+; GCN-NEXT: s_add_i32 [[FI0:s[0-9]+]], [[FI0_TMP0]], 16
 
-; GCN: v_lshr_b32_e64 [[TMP:v[0-9]+]], s32, 6
-; GCN-NEXT: s_movk_i32 vcc_lo, 0x4010
-; GCN-NEXT: v_add_i32_e32 [[TMP]], vcc, vcc_lo, [[TMP]]
+; GCN: s_lshr_b32 [[TMP:s[0-9]+]], s32, 6
+; GCN-NEXT: s_addk_i32 [[TMP]], 0x4010
 ; GCN-NEXT: ;;#ASMSTART
 ; GCN: ; use [[TMP]]
 define void @func_alloca_offset_use_asm_sgpr() {
@@ -274,19 +268,17 @@ define void @func_alloca_offset_use_asm_sgpr() {
   ret void
 }
 
-; FIXME: Shouldn't need to materialize constant
 ; GCN-LABEL: {{^}}func_alloca_offset_use_asm_vgpr:
-; GCN: v_lshr_b32_e64 [[FI0_TMP:v[0-9]+]], s32, 6
-; GCN-NEXT: v_add_i32_e32 [[FI0:v[0-9]+]], vcc, 16, [[FI0_TMP]]
+; GCN: s_lshr_b32 [[S_FI:s[0-9]+]], s32, 6
+; GCN: v_lshr_b32_e64 [[V_FI:v[0-9]+]], s32, 6
+; GCN: s_movk_i32 vcc_lo, 0x4010
+; GCN: s_add_i32 [[S_FI]], [[S_FI]], 16
 ; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: ; use [[FI0]]
+; GCN-NEXT: ; use [[S_FI]]
 ; GCN-NEXT: ;;#ASMEND
-
-; GCN: v_lshr_b32_e64 [[FI1_TMP:v[0-9]+]], s32, 6
-; GCN-NEXT: s_movk_i32 vcc_lo, 0x4010
-; GCN-NEXT: v_add_i32_e32 [[FI1:v[0-9]+]], vcc, vcc_lo, [[FI1_TMP]]
+; GCN-NEXT: v_add_i32_e32 [[V_FI:v[0-9]+]], vcc, vcc_lo, [[V_FI]]
 ; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: ; use [[FI1]]
+; GCN-NEXT: ; use [[V_FI]]
 ; GCN-NEXT: ;;#ASMEND
 define void @func_alloca_offset_use_asm_vgpr() {
   %alloca0 = alloca [4096 x i32], align 16, addrspace(5)
@@ -296,17 +288,15 @@ define void @func_alloca_offset_use_asm_vgpr() {
   ret void
 }
 
-; FIXME: Using VGPR for SGPR input
 ; GCN-LABEL: {{^}}kernel_alloca_offset_use_asm_sgpr:
-; GCN: v_mov_b32_e32 v0, 16
+; GCN: s_mov_b32 [[FI0:s[0-9]+]], 16
 ; GCN-NOT: v0
 ; GCN: ;;#ASMSTART
-; GCN-NEXT: ; use v0
+; GCN-NEXT: ; use [[FI0]]
 ; GCN-NEXT: ;;#ASMEND
-
-; GCN: v_mov_b32_e32 v0, 0x4010
+; GCN: s_movk_i32 [[FI1:s[0-9]+]], 0x4010
 ; GCN-NEXT: ;;#ASMSTART
-; GCN-NEXT: ; use v0
+; GCN-NEXT: ; use [[FI1]]
 ; GCN-NEXT: ;;#ASMEND
 define amdgpu_kernel void @kernel_alloca_offset_use_asm_sgpr() {
   %alloca0 = alloca [4096 x i32], align 16, addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/commute-compares.ll b/llvm/test/CodeGen/AMDGPU/commute-compares.ll
index d94e75c8c8e223..d36dcc247331c7 100644
--- a/llvm/test/CodeGen/AMDGPU/commute-compares.ll
+++ b/llvm/test/CodeGen/AMDGPU/commute-compares.ll
@@ -699,8 +699,8 @@ define amdgpu_kernel void @commute_uno_2.0_f64(ptr addrspace(1) %out, ptr addrsp
 ; GCN-LABEL: {{^}}commute_frameindex:
 ; XGCN: v_cmp_eq_u32_e32 vcc, 0, v{{[0-9]+}}
 
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0{{$}}
-; GCN: v_cmp_eq_u32_e32 vcc, v{{[0-9]+}}, [[FI]]
+; GCN: s_mov_b32 [[FI:s[0-9]+]], 0{{$}}
+; GCN: v_cmp_eq_u32_e32 vcc,  [[FI]], v{{[0-9]+}}
 define amdgpu_kernel void @commute_frameindex(ptr addrspace(1) nocapture %out) #0 {
 entry:
   %stack0 = alloca i32, addrspace(5)
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
index 9d9d5b239a12c8..c145ce06f9dd21 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-svs.ll
@@ -15,18 +15,16 @@ define amdgpu_kernel void @soff1_voff1(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff1_voff1:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, v1, v0
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 1, v0
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, s0, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v3, 2, v0
+; GFX940-SDAG-NEXT:    scratch_store_byte v2, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
+; GFX940-SDAG-NEXT:    scratch_store_byte v3, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
@@ -145,18 +143,17 @@ define amdgpu_kernel void @soff1_voff2(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff1_voff2:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 1, v0
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v2
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v3, 2, v0
+; GFX940-SDAG-NEXT:    scratch_store_byte v2, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
+; GFX940-SDAG-NEXT:    scratch_store_byte v3, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
@@ -282,18 +279,17 @@ define amdgpu_kernel void @soff1_voff4(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff1_voff4:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 1, v0
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v2
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v3, 2, v0
+; GFX940-SDAG-NEXT:    scratch_store_byte v2, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
+; GFX940-SDAG-NEXT:    scratch_store_byte v3, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
@@ -419,19 +415,17 @@ define amdgpu_kernel void @soff2_voff1(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff2_voff1:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, v1, v0
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 1, v0
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, s0, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v3, 2, v0
+; GFX940-SDAG-NEXT:    scratch_store_byte v2, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
+; GFX940-SDAG-NEXT:    scratch_store_byte v3, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
@@ -556,14 +550,13 @@ define amdgpu_kernel void @soff2_voff2(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff2_voff2:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v1
-; GFX940-SDAG-NEXT:    scratch_store_byte v0, v2, off offset:1 sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v2
+; GFX940-SDAG-NEXT:    scratch_store_byte v0, v1, off offset:1 sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
@@ -698,14 +691,13 @@ define amdgpu_kernel void @soff2_voff4(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff2_voff4:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v1
-; GFX940-SDAG-NEXT:    scratch_store_byte v0, v2, off offset:1 sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v2
+; GFX940-SDAG-NEXT:    scratch_store_byte v0, v1, off offset:1 sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
@@ -840,19 +832,17 @@ define amdgpu_kernel void @soff4_voff1(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff4_voff1:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 2
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, v1, v0
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 1, v0
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, s0, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX940-SDAG-NEXT:    v_add_u32_e32 v3, 2, v0
+; GFX940-SDAG-NEXT:    scratch_store_byte v2, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v1, v2, off sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
+; GFX940-SDAG-NEXT:    scratch_store_byte v3, v1, off sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
@@ -977,14 +967,13 @@ define amdgpu_kernel void @soff4_voff2(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff4_voff2:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 2
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v1
-; GFX940-SDAG-NEXT:    scratch_store_byte v0, v2, off offset:1 sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 1, v2
+; GFX940-SDAG-NEXT:    scratch_store_byte v0, v1, off offset:1 sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, 2, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
@@ -1119,17 +1108,16 @@ define amdgpu_kernel void @soff4_voff4(i32 %soff) {
 ; GFX940-SDAG-LABEL: soff4_voff4:
 ; GFX940-SDAG:       ; %bb.0: ; %bb
 ; GFX940-SDAG-NEXT:    s_load_dword s0, s[2:3], 0x24
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX940-SDAG-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v2, 2
 ; GFX940-SDAG-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX940-SDAG-NEXT:    s_lshl_b32 s0, s0, 2
-; GFX940-SDAG-NEXT:    v_add_u32_e32 v1, s0, v1
-; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v1
-; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 2
-; GFX940-SDAG-NEXT:    scratch_store_byte v0, v2, off offset:1 sc0 sc1
+; GFX940-SDAG-NEXT:    v_mov_b32_e32 v3, s0
+; GFX940-SDAG-NEXT:    v_lshl_add_u32 v0, v0, 2, v3
+; GFX940-SDAG-NEXT:    scratch_store_byte v0, v1, off offset:1 sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
-; GFX940-SDAG-NEXT:    scratch_store_byte v0, v1, off offset:2 sc0 sc1
+; GFX940-SDAG-NEXT:    scratch_store_byte v0, v2, off offset:2 sc0 sc1
 ; GFX940-SDAG-NEXT:    s_waitcnt vmcnt(0)
 ; GFX940-SDAG-NEXT:    v_add_u32_e32 v0, 4, v0
 ; GFX940-SDAG-NEXT:    v_mov_b32_e32 v1, 4
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index 105174d7c9b3b7..0c1d3b25463038 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -4688,13 +4688,13 @@ define amdgpu_ps void @large_offset() {
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    scratch_load_dwordx4 v[0:3], off, s0 offset:3024 glc
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, 16
+; GFX9-NEXT:    s_mov_b32 s0, 16
 ; GFX9-NEXT:    ;;#ASMSTART
-; GFX9-NEXT:    ; use v0
+; GFX9-NEXT:    ; use s0
 ; GFX9-NEXT:    ;;#ASMEND
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x810
+; GFX9-NEXT:    s_movk_i32 s0, 0x810
 ; GFX9-NEXT:    ;;#ASMSTART
-; GFX9-NEXT:    ; use v0
+; GFX9-NEXT:    ; use s0
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_endpgm
 ;
@@ -4705,27 +4705,29 @@ define amdgpu_ps vo...
[truncated]

@arsenm arsenm marked this pull request as ready for review November 5, 2024 20:41
@arsenm arsenm force-pushed the users/arsenm/amdgpu-si-fold-operands-copy-more-scalar-ops-to-vector branch from 493a45c to 222beef Compare November 6, 2024 17:12
@arsenm arsenm force-pushed the users/arsenm/amdgpu-add-vgprimm-predicate-frameindex-pattern branch from a5cd268 to bbf5b14 Compare November 6, 2024 17:13
Base automatically changed from users/arsenm/amdgpu-si-fold-operands-copy-more-scalar-ops-to-vector to main November 8, 2024 03:02
@arsenm arsenm force-pushed the users/arsenm/amdgpu-add-vgprimm-predicate-frameindex-pattern branch from bbf5b14 to f0c57c9 Compare November 8, 2024 03:04
Only select to a VGPR if it's trivally used in VGPR only contexts.
This fixes mishandling frame indexes used in SGPR only contexts,
like inline assembly constraints.

This is suboptimal in the common case where the frame index
is transitively used by only VALU ops. We make up for this by later
folding the copy to VALU plus scalar op in SIFoldOperands.
@arsenm arsenm force-pushed the users/arsenm/amdgpu-add-vgprimm-predicate-frameindex-pattern branch from f0c57c9 to a59b6b5 Compare November 8, 2024 18:24
Copy link
Contributor Author

arsenm commented Nov 9, 2024

Merge activity

  • Nov 8, 9:59 PM EST: A user started a stack merge that includes this pull request via Graphite.
  • Nov 8, 10:02 PM EST: A user merged this pull request with Graphite.

@arsenm arsenm merged commit 1bf385f into main Nov 9, 2024
6 of 8 checks passed
@arsenm arsenm deleted the users/arsenm/amdgpu-add-vgprimm-predicate-frameindex-pattern branch November 9, 2024 03:02
@llvm-ci
Copy link
Collaborator

llvm-ci commented Nov 9, 2024

LLVM Buildbot has detected a new failure on builder openmp-offload-libc-amdgpu-runtime running on omp-vega20-1 while building llvm at step 11 "Add check check-libc-amdgcn-amd-amdhsa".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/73/builds/8292

Here is the relevant piece of the build log for the reference
Step 11 (Add check check-libc-amdgcn-amd-amdhsa) failure: test (failure)
...
[1693/2683] Linking CXX executable libc/test/src/ctype/libc.test.src.ctype.isspace_test.__hermetic__.__build__
[1694/2683] Running hermetic test libc.test.src.ctype.isprint_test.__hermetic__
[==========] Running 1 test from 1 test suite.
[ RUN      ] LlvmLibcIsPrint.DefaultLocale
[       OK ] LlvmLibcIsPrint.DefaultLocale (81 us)
Ran 1 tests.  PASS: 1  FAIL: 0
[1695/2683] Linking CXX executable libc/test/src/ctype/libc.test.src.ctype.isupper_test.__hermetic__.__build__
[1696/2683] Linking CXX executable libc/test/src/stdbit/libc.test.src.stdbit.stdc_leading_zeros_uc_test.__hermetic__.__build__
[1697/2683] Linking CXX executable libc/test/src/ctype/libc.test.src.ctype.islower_test.__hermetic__.__build__
[1698/2683] Running hermetic test libc.test.src.__support.block_test.__hermetic__
FAILED: libc/test/src/__support/libc.test.src.__support.block_test.__hermetic__-cmd /home/ompworker/bbot/openmp-offload-libc-amdgpu-runtime/llvm.build/runtimes/runtimes-amdgcn-amd-amdhsa-bins/libc/test/src/__support/libc.test.src.__support.block_test.__hermetic__-cmd 
cd /home/ompworker/bbot/openmp-offload-libc-amdgpu-runtime/llvm.build/runtimes/runtimes-amdgcn-amd-amdhsa-bins/libc/test/src/__support && /home/ompworker/bbot/openmp-offload-libc-amdgpu-runtime/llvm.build/bin/amdhsa-loader /home/ompworker/bbot/openmp-offload-libc-amdgpu-runtime/llvm.build/runtimes/runtimes-amdgcn-amd-amdhsa-bins/libc/test/src/__support/libc.test.src.__support.block_test.__hermetic__.__build__
[==========] Running 26 tests from 1 test suite.
[ RUN      ] LlvmLibcBlockTestCanCreateSingleAlignedBlock.CanCreateSingleAlignedBlock
[       OK ] LlvmLibcBlockTestCanCreateSingleAlignedBlock.CanCreateSingleAlignedBlock (6 us)
[ RUN      ] LlvmLibcBlockTestCanCreateUnalignedSingleBlock.CanCreateUnalignedSingleBlock
[       OK ] LlvmLibcBlockTestCanCreateUnalignedSingleBlock.CanCreateUnalignedSingleBlock (2 us)
[ RUN      ] LlvmLibcBlockTestCannotCreateTooSmallBlock.CannotCreateTooSmallBlock
[       OK ] LlvmLibcBlockTestCannotCreateTooSmallBlock.CannotCreateTooSmallBlock (2 us)
[ RUN      ] LlvmLibcBlockTest.CannotCreateTooLargeBlock
[       OK ] LlvmLibcBlockTest.CannotCreateTooLargeBlock (1 us)
[ RUN      ] LlvmLibcBlockTestCanSplitBlock.CanSplitBlock
[       OK ] LlvmLibcBlockTestCanSplitBlock.CanSplitBlock (6 us)
[ RUN      ] LlvmLibcBlockTestCanSplitBlockUnaligned.CanSplitBlockUnaligned
[       OK ] LlvmLibcBlockTestCanSplitBlockUnaligned.CanSplitBlockUnaligned (5 us)
[ RUN      ] LlvmLibcBlockTestCanSplitMidBlock.CanSplitMidBlock
[       OK ] LlvmLibcBlockTestCanSplitMidBlock.CanSplitMidBlock (5 us)
[ RUN      ] LlvmLibcBlockTestCannotSplitTooSmallBlock.CannotSplitTooSmallBlock
[       OK ] LlvmLibcBlockTestCannotSplitTooSmallBlock.CannotSplitTooSmallBlock (3 us)
[ RUN      ] LlvmLibcBlockTestCannotSplitBlockWithoutHeaderSpace.CannotSplitBlockWithoutHeaderSpace
[       OK ] LlvmLibcBlockTestCannotSplitBlockWithoutHeaderSpace.CannotSplitBlockWithoutHeaderSpace (3 us)
[ RUN      ] LlvmLibcBlockTestCannotMakeBlockLargerInSplit.CannotMakeBlockLargerInSplit
[       OK ] LlvmLibcBlockTestCannotMakeBlockLargerInSplit.CannotMakeBlockLargerInSplit (3 us)
[ RUN      ] LlvmLibcBlockTestCannotMakeSecondBlockLargerInSplit.CannotMakeSecondBlockLargerInSplit
[       OK ] LlvmLibcBlockTestCannotMakeSecondBlockLargerInSplit.CannotMakeSecondBlockLargerInSplit (3 us)
[ RUN      ] LlvmLibcBlockTestCannotMakeZeroSizeFirstBlock.CannotMakeZeroSizeFirstBlock
[       OK ] LlvmLibcBlockTestCannotMakeZeroSizeFirstBlock.CannotMakeZeroSizeFirstBlock (2 us)
[ RUN      ] LlvmLibcBlockTestCanMakeMinimalSizeFirstBlock.CanMakeMinimalSizeFirstBlock
[       OK ] LlvmLibcBlockTestCanMakeMinimalSizeFirstBlock.CanMakeMinimalSizeFirstBlock (4 us)
[ RUN      ] LlvmLibcBlockTestCanMakeMinimalSizeSecondBlock.CanMakeMinimalSizeSecondBlock
[       OK ] LlvmLibcBlockTestCanMakeMinimalSizeSecondBlock.CanMakeMinimalSizeSecondBlock (5 us)
[ RUN      ] LlvmLibcBlockTestCanMarkBlockUsed.CanMarkBlockUsed
[       OK ] LlvmLibcBlockTestCanMarkBlockUsed.CanMarkBlockUsed (5 us)
[ RUN      ] LlvmLibcBlockTestCannotSplitUsedBlock.CannotSplitUsedBlock
[       OK ] LlvmLibcBlockTestCannotSplitUsedBlock.CannotSplitUsedBlock (3 us)
[ RUN      ] LlvmLibcBlockTestCanMergeWithNextBlock.CanMergeWithNextBlock
[       OK ] LlvmLibcBlockTestCanMergeWithNextBlock.CanMergeWithNextBlock (6 us)
[ RUN      ] LlvmLibcBlockTestCannotMergeWithFirstOrLastBlock.CannotMergeWithFirstOrLastBlock
[       OK ] LlvmLibcBlockTestCannotMergeWithFirstOrLastBlock.CannotMergeWithFirstOrLastBlock (5 us)

@llvm-ci
Copy link
Collaborator

llvm-ci commented Nov 9, 2024

LLVM Buildbot has detected a new failure on builder llvm-clang-x86_64-expensive-checks-ubuntu running on as-builder-4 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/187/builds/2434

Here is the relevant piece of the build log for the reference
Step 6 (test-build-unified-tree-check-all) failure: test (failure)
******************** TEST 'LLVM :: CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll' FAILED ********************
Exit Code: 2

Command Output (stderr):
--
RUN: at line 2: /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll | /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/FileCheck --check-prefix=MUBUF /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+ /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906
+ /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/build/bin/FileCheck --check-prefix=MUBUF /home/buildbot/worker/as-builder-4/ramdisk/expensive-checks/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll

# After Prologue/Epilogue Insertion & Frame Finalization
# Machine code for function func_local_stack_offset_uses_sp: NoPHIs, TracksLiveness, NoVRegs, TiedOpsRewritten, TracksDebugUserValues
Frame Objects:
  fi#0: size=4, align=8192, at location [SP+8192]
  fi#1: size=8480, align=4096, at location [SP+12288]
  fi#2: size=4, align=4, at location [SP]
Function Live Ins: $vgpr0, $vgpr1

bb.0.entry:
  successors: %bb.1(0x80000000); %bb.1(100.00%)
  liveins: $sgpr5, $vgpr0, $vgpr1
  $sgpr5 = frame-setup COPY $sgpr33
  $sgpr33 = frame-setup S_ADD_I32 $sgpr32, 524224, implicit-def $scc
  $sgpr33 = frame-setup S_AND_B32 killed $sgpr33, 4294443008, implicit-def dead $scc
  $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 2097152, implicit-def dead $scc
  renamable $vgpr3 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr2 = V_ADD_U32_e64 12352, killed $vgpr3, 0, implicit $exec
  renamable $vgpr3 = V_MOV_B32_e32 0, implicit $exec
  $vgpr4 = V_MOV_B32_e32 8192, implicit $exec
  BUFFER_STORE_DWORD_OFFEN renamable $vgpr3, killed $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr33, 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.pin.low, align 8192, addrspace 5)
  renamable $sgpr4 = S_MOV_B32 0

bb.1.loadstoreloop:
; predecessors: %bb.0, %bb.1
  successors: %bb.2(0x04000000), %bb.1(0x7c000000); %bb.2(3.12%), %bb.1(96.88%)
  liveins: $sgpr4, $sgpr5, $vgpr2, $vgpr3, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr5 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 $sgpr4, killed $vgpr5, 0, implicit $exec
  $vgpr5 = V_MOV_B32_e32 12288, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 killed $vgpr5, killed $vgpr4, 0, implicit $exec
  renamable $sgpr4 = S_ADD_I32 killed renamable $sgpr4, 1, implicit-def dead $scc
  S_CMPK_LT_U32 renamable $sgpr4, 8480, implicit-def $scc
  BUFFER_STORE_BYTE_OFFEN renamable $vgpr3, killed renamable $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile store (s8) into %ir.1, addrspace 5)
  S_CBRANCH_SCC1 %bb.1, implicit killed $scc
  S_BRANCH %bb.2

bb.2.split:
; predecessors: %bb.1
  liveins: $sgpr5, $vgpr2, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr4 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr3 = V_ADD_U32_e32 20688, killed $vgpr4, implicit $exec
...

@llvm-ci
Copy link
Collaborator

llvm-ci commented Nov 9, 2024

LLVM Buildbot has detected a new failure on builder llvm-clang-x86_64-expensive-checks-debian running on gribozavr4 while building llvm at step 6 "test-build-unified-tree-check-all".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/16/builds/8579

Here is the relevant piece of the build log for the reference
Step 6 (test-build-unified-tree-check-all) failure: test (failure)
******************** TEST 'LLVM :: CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll' FAILED ********************
Exit Code: 2

Command Output (stderr):
--
RUN: at line 2: /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll | /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/FileCheck --check-prefix=MUBUF /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+ /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906
+ /b/1/llvm-clang-x86_64-expensive-checks-debian/build/bin/FileCheck --check-prefix=MUBUF /b/1/llvm-clang-x86_64-expensive-checks-debian/llvm-project/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll

# After Prologue/Epilogue Insertion & Frame Finalization
# Machine code for function func_local_stack_offset_uses_sp: NoPHIs, TracksLiveness, NoVRegs, TiedOpsRewritten, TracksDebugUserValues
Frame Objects:
  fi#0: size=4, align=8192, at location [SP+8192]
  fi#1: size=8480, align=4096, at location [SP+12288]
  fi#2: size=4, align=4, at location [SP]
Function Live Ins: $vgpr0, $vgpr1

bb.0.entry:
  successors: %bb.1(0x80000000); %bb.1(100.00%)
  liveins: $sgpr5, $vgpr0, $vgpr1
  $sgpr5 = frame-setup COPY $sgpr33
  $sgpr33 = frame-setup S_ADD_I32 $sgpr32, 524224, implicit-def $scc
  $sgpr33 = frame-setup S_AND_B32 killed $sgpr33, 4294443008, implicit-def dead $scc
  $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 2097152, implicit-def dead $scc
  renamable $vgpr3 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr2 = V_ADD_U32_e64 12352, killed $vgpr3, 0, implicit $exec
  renamable $vgpr3 = V_MOV_B32_e32 0, implicit $exec
  $vgpr4 = V_MOV_B32_e32 8192, implicit $exec
  BUFFER_STORE_DWORD_OFFEN renamable $vgpr3, killed $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr33, 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.pin.low, align 8192, addrspace 5)
  renamable $sgpr4 = S_MOV_B32 0

bb.1.loadstoreloop:
; predecessors: %bb.0, %bb.1
  successors: %bb.2(0x04000000), %bb.1(0x7c000000); %bb.2(3.12%), %bb.1(96.88%)
  liveins: $sgpr4, $sgpr5, $vgpr2, $vgpr3, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr5 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 $sgpr4, killed $vgpr5, 0, implicit $exec
  $vgpr5 = V_MOV_B32_e32 12288, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 killed $vgpr5, killed $vgpr4, 0, implicit $exec
  renamable $sgpr4 = S_ADD_I32 killed renamable $sgpr4, 1, implicit-def dead $scc
  S_CMPK_LT_U32 renamable $sgpr4, 8480, implicit-def $scc
  BUFFER_STORE_BYTE_OFFEN renamable $vgpr3, killed renamable $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile store (s8) into %ir.1, addrspace 5)
  S_CBRANCH_SCC1 %bb.1, implicit killed $scc
  S_BRANCH %bb.2

bb.2.split:
; predecessors: %bb.1
  liveins: $sgpr5, $vgpr2, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr4 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr3 = V_ADD_U32_e32 20688, killed $vgpr4, implicit $exec
...

@jplehr
Copy link
Contributor

jplehr commented Nov 11, 2024

Hi, our libc on GPU bot is red since this change landed: https://lab.llvm.org/buildbot/#/builders/73/builds/8292

@mikaelholmen
Copy link
Collaborator

Hi @arsenm

Running the testcase local-stack-alloc-block-sp-reference.ll with verifiers on with this patch like

llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll -verify-machineinstrs

fails with

# After Prologue/Epilogue Insertion & Frame Finalization
# Machine code for function func_local_stack_offset_uses_sp: NoPHIs, TracksLiveness, NoVRegs, TiedOpsRewritten, TracksDebugUserValues
Frame Objects:
  fi#0: size=4, align=8192, at location [SP+8192]
  fi#1: size=8480, align=4096, at location [SP+12288]
  fi#2: size=4, align=4, at location [SP]
Function Live Ins: $vgpr0, $vgpr1

bb.0.entry:
  successors: %bb.1(0x80000000); %bb.1(100.00%)
  liveins: $sgpr5, $vgpr0, $vgpr1
  $sgpr5 = frame-setup COPY $sgpr33
  $sgpr33 = frame-setup S_ADD_I32 $sgpr32, 524224, implicit-def $scc
  $sgpr33 = frame-setup S_AND_B32 killed $sgpr33, 4294443008, implicit-def dead $scc
  $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 2097152, implicit-def dead $scc
  renamable $vgpr3 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr2 = V_ADD_U32_e64 12352, killed $vgpr3, 0, implicit $exec
  renamable $vgpr3 = V_MOV_B32_e32 0, implicit $exec
  $vgpr4 = V_MOV_B32_e32 8192, implicit $exec
  BUFFER_STORE_DWORD_OFFEN renamable $vgpr3, killed $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr33, 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.pin.low, align 8192, addrspace 5)
  renamable $sgpr4 = S_MOV_B32 0

bb.1.loadstoreloop:
; predecessors: %bb.0, %bb.1
  successors: %bb.2(0x04000000), %bb.1(0x7c000000); %bb.2(3.12%), %bb.1(96.88%)
  liveins: $sgpr4, $sgpr5, $vgpr2, $vgpr3, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr5 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 $sgpr4, killed $vgpr5, 0, implicit $exec
  $vgpr5 = V_MOV_B32_e32 12288, implicit $exec
  renamable $vgpr4 = V_ADD_U32_e64 killed $vgpr5, killed $vgpr4, 0, implicit $exec
  renamable $sgpr4 = S_ADD_I32 killed renamable $sgpr4, 1, implicit-def dead $scc
  S_CMPK_LT_U32 renamable $sgpr4, 8480, implicit-def $scc
  BUFFER_STORE_BYTE_OFFEN renamable $vgpr3, killed renamable $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile store (s8) into %ir.1, addrspace 5)
  S_CBRANCH_SCC1 %bb.1, implicit killed $scc
  S_BRANCH %bb.2

bb.2.split:
; predecessors: %bb.1
  liveins: $sgpr5, $vgpr2, $vgpr0_vgpr1:0x000000000000000F
  renamable $vgpr4 = V_LSHRREV_B32_e64 6, $sgpr33, implicit $exec
  renamable $vgpr3 = V_ADD_U32_e32 20688, killed $vgpr4, implicit $exec
  renamable $vgpr4 = BUFFER_LOAD_DWORD_OFFEN renamable $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load (s32) from %ir.gep.large.offset, align 16, addrspace 5)
  renamable $vgpr5 = BUFFER_LOAD_DWORD_OFFEN renamable $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (volatile dereferenceable load (s32) from %ir.gep.large.offset + 4, basealign 16, addrspace 5)
  renamable $vgpr6 = BUFFER_LOAD_DWORD_OFFEN renamable $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile dereferenceable load (s32) from %ir.gep.small.offset, align 64, addrspace 5)
  renamable $vgpr7 = BUFFER_LOAD_DWORD_OFFEN renamable $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (volatile dereferenceable load (s32) from %ir.gep.small.offset + 4, basealign 64, addrspace 5)
  KILL killed renamable $vgpr2
  KILL killed renamable $vgpr3
  renamable $vgpr2, renamable $vcc = V_ADD_CO_U32_e64 killed $vgpr4, killed $vgpr6, 0, implicit $exec
  renamable $vgpr3, dead renamable $vcc = V_ADDC_U32_e64 killed $vgpr5, killed $vgpr7, killed $vcc, 0, implicit $exec
  GLOBAL_STORE_DWORDX2 killed renamable $vgpr0_vgpr1, killed renamable $vgpr2_vgpr3, 0, 0, implicit $exec :: (volatile store (s64) into %ir.out, addrspace 1)
  $sgpr32 = frame-destroy S_ADD_I32 $sgpr32, -2097152, implicit-def dead $scc
  $sgpr33 = frame-destroy COPY $sgpr5
  SI_RETURN

# End machine code for function func_local_stack_offset_uses_sp.

*** Bad machine code: VOP3 instruction uses literal ***
- function:    func_local_stack_offset_uses_sp
- basic block: %bb.0 entry (0x557d8286b118)
- instruction: renamable $vgpr2 = V_ADD_U32_e64 12352, killed $vgpr3, 0, implicit $exec
LLVM ERROR: Found 1 machine code errors.

@arsenm
Copy link
Contributor Author

arsenm commented Nov 11, 2024

Running the testcase local-stack-alloc-block-sp-reference.ll with verifiers on with this patch like

I have a patch for this (#115747)

@bcahoon
Copy link
Contributor

bcahoon commented Nov 13, 2024

Hi @arsenm, can you take a look at the following case. It still fails with #115747. Thanks.

%"struct.ck::Tuple.1296.524.974.1094.1109.1114.1144.1159.1174" = type { %"struct.ck::detail::TupleImpl.1297.523.973.1093.1108.1113.1143.1158.1173" }
%"struct.ck::detail::TupleImpl.1297.523.973.1093.1108.1113.1143.1158.1173" = type { %"struct.ck::detail::TupleElementKeyData.241.522.972.1092.1107.1112.1142.1157.1172" }
%"struct.ck::detail::TupleElementKeyData.241.522.972.1092.1107.1112.1142.1157.1172" = type { %"struct.ck::vector_type.7.521.971.1091.1106.1111.1141.1156.1171" }
%"struct.ck::vector_type.7.521.971.1091.1106.1111.1141.1156.1171" = type { %union.anon.8.520.970.1090.1105.1110.1140.1155.1170 }
%union.anon.8.520.970.1090.1105.1110.1140.1155.1170 = type { <4 x float> }

define amdgpu_kernel void @test() {
entry:
  %src_vectors.i1.i.i.i.i.i.i.i.i = alloca %"struct.ck::Tuple.1296.524.974.1094.1109.1114.1144.1159.1174", i32 0, align 16, addrspace(5)
  %src_vectors.ascast.i3.i.i.i.i.i.i.i.i = addrspacecast ptr addrspace(5) %src_vectors.i1.i.i.i.i.i.i.i.i to ptr
  %add.ptr.i.i.i.i.i.i.i.i.i.i.i.i12.i.i.i.i.i.i.i.i = getelementptr i8, ptr %src_vectors.ascast.i3.i.i.i.i.i.i.i.i, i64 4
  %vtable.i.i.i.i2.i.i.i15.i.i.i.i.i.i.i.i = load ptr addrspace(1), ptr null, align 8
  %0 = load ptr addrspace(1), ptr addrspace(1) %vtable.i.i.i.i2.i.i.i15.i.i.i.i.i.i.i.i, align 8
  call addrspace(1) void %0(ptr null, ptr null, ptr %add.ptr.i.i.i.i.i.i.i.i.i.i.i.i12.i.i.i.i.i.i.i.i)
  ret void
}
  llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a test.ll -o test.s 

@arsenm
Copy link
Contributor Author

arsenm commented Nov 13, 2024

Hi @arsenm, can you take a look at the following case. It still fails with #115747. Thanks.

This one is #115977

Groverkss pushed a commit to iree-org/llvm-project that referenced this pull request Nov 15, 2024
Only select to a VGPR if it's trivally used in VGPR only contexts.
This fixes mishandling frame indexes used in SGPR only contexts,
like inline assembly constraints.

This is suboptimal in the common case where the frame index
is transitively used by only VALU ops. We make up for this by later
folding the copy to VALU plus scalar op in SIFoldOperands.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

7 participants