Skip to content

[NFC][AMDGPU] Update tests to use autogened CHECKs #140648

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 20, 2025

Conversation

chinmaydd
Copy link
Contributor

@chinmaydd chinmaydd commented May 20, 2025

Related to re-enabling CopyConstain DAG Mutation in the default scheduler

Change-Id: Ice86b0200ec3bfac43dc07d0c22a5523b07142b3
@llvmbot
Copy link
Member

llvmbot commented May 20, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Chinmay Deshpande (chinmaydd)

Changes

Patch is 515.12 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/140648.diff

5 Files Affected:

  • (modified) llvm/test/CodeGen/AMDGPU/fceil64.ll (+706-54)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.format.d16.ll (+90-48)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.format.d16.ll (+90-48)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.format.d16.ll (+100-52)
  • (modified) llvm/test/CodeGen/AMDGPU/scratch-simple.ll (+7133-319)
diff --git a/llvm/test/CodeGen/AMDGPU/fceil64.ll b/llvm/test/CodeGen/AMDGPU/fceil64.ll
index f34a64c470c4e..367bbe7eb68e9 100644
--- a/llvm/test/CodeGen/AMDGPU/fceil64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fceil64.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=SI -check-prefix=FUNC %s
 ; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=CI -check-prefix=FUNC %s
 ; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefix=CI -check-prefix=FUNC %s
@@ -9,36 +10,98 @@ declare <4 x double> @llvm.ceil.v4f64(<4 x double>) nounwind readnone
 declare <8 x double> @llvm.ceil.v8f64(<8 x double>) nounwind readnone
 declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
 
-; FUNC-LABEL: {{^}}fceil_f64:
-; CI: v_ceil_f64_e32
-; SI: s_bfe_u32 [[SEXP:s[0-9]+]], {{s[0-9]+}}, 0xb0014
-; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000
-; SI-DAG: s_addk_i32 [[SEXP]], 0xfc01
-; SI-DAG: s_lshr_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], [[SEXP]]
-; SI-DAG: s_andn2_b64
-; SI-DAG: cmp_gt_i32
-; SI-DAG: s_cselect_b32
-; SI-DAG: s_cselect_b32
-; SI-DAG: cmp_lt_i32
-; SI-DAG: s_cselect_b32
-; SI-DAG: s_cselect_b32
-; SI-DAG: v_cmp_gt_f64_e64 [[FCMP:s[[0-9]+:[0-9]+]]]
-; SI-DAG: v_cmp_lg_f64_e32 vcc
-; SI-DAG: s_and_b64 [[AND1:s[[0-9]+:[0-9]+]]], [[FCMP]], vcc
-; SI-DAG: s_and_b64 [[AND1]], [[AND1]], exec
-; SI-DAG: s_cselect_b32 s{{[0-9]+}}, 0x3ff00000, 0
-; SI: v_add_f64
-; SI: s_endpgm
 define amdgpu_kernel void @fceil_f64(ptr addrspace(1) %out, double %x) {
+; SI-LABEL: fceil_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s5, 0xfffff
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s8, s3, 0xb0014
+; SI-NEXT:    s_and_b32 s9, s3, 0x80000000
+; SI-NEXT:    s_addk_i32 s8, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[4:5], s8
+; SI-NEXT:    s_andn2_b64 s[4:5], s[2:3], s[4:5]
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
+; SI-NEXT:    s_cselect_b32 s4, 0, s4
+; SI-NEXT:    s_cselect_b32 s5, s9, s5
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
+; SI-NEXT:    s_cselect_b32 s9, s3, s5
+; SI-NEXT:    s_cselect_b32 s8, s2, s4
+; SI-NEXT:    v_cmp_gt_f64_e64 s[4:5], s[2:3], 0
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[2:3], v[0:1]
+; SI-NEXT:    s_and_b64 s[2:3], s[4:5], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    v_add_f64 v[0:1], s[8:9], v[0:1]
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
   %y = call double @llvm.ceil.f64(double %x) nounwind readnone
   store double %y, ptr addrspace(1) %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fceil_v2f64:
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
 define amdgpu_kernel void @fceil_v2f64(ptr addrspace(1) %out, <2 x double> %x) {
+; SI-LABEL: fceil_v2f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_mov_b32 s9, 0xfffff
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_mov_b32 s8, s2
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s10, s7, 0xb0014
+; SI-NEXT:    s_and_b32 s12, s7, 0x80000000
+; SI-NEXT:    s_add_i32 s13, s10, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[10:11], s[8:9], s13
+; SI-NEXT:    s_andn2_b64 s[10:11], s[6:7], s[10:11]
+; SI-NEXT:    s_cmp_lt_i32 s13, 0
+; SI-NEXT:    s_cselect_b32 s10, 0, s10
+; SI-NEXT:    s_cselect_b32 s11, s12, s11
+; SI-NEXT:    s_cmp_gt_i32 s13, 51
+; SI-NEXT:    s_cselect_b32 s11, s7, s11
+; SI-NEXT:    s_cselect_b32 s10, s6, s10
+; SI-NEXT:    v_cmp_gt_f64_e64 s[12:13], s[6:7], 0
+; SI-NEXT:    v_mov_b32_e32 v1, s10
+; SI-NEXT:    v_mov_b32_e32 v2, s11
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[6:7], v[1:2]
+; SI-NEXT:    s_and_b64 s[6:7], s[12:13], vcc
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s12, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s6, s5, 0xb0014
+; SI-NEXT:    s_and_b32 s13, s5, 0x80000000
+; SI-NEXT:    s_add_i32 s14, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[8:9], s14
+; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
+; SI-NEXT:    s_cmp_lt_i32 s14, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s13, s7
+; SI-NEXT:    s_cmp_gt_i32 s14, 51
+; SI-NEXT:    s_cselect_b32 s7, s5, s7
+; SI-NEXT:    s_cselect_b32 s6, s4, s6
+; SI-NEXT:    v_cmp_gt_f64_e64 s[8:9], s[4:5], 0
+; SI-NEXT:    v_mov_b32_e32 v1, s12
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[4:5], v[2:3]
+; SI-NEXT:    s_and_b64 s[4:5], s[8:9], vcc
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_add_f64 v[2:3], s[10:11], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, s4
+; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[0:1]
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
   %y = call <2 x double> @llvm.ceil.v2f64(<2 x double> %x) nounwind readnone
   store <2 x double> %y, ptr addrspace(1) %out
   ret void
@@ -54,51 +117,640 @@ define amdgpu_kernel void @fceil_v2f64(ptr addrspace(1) %out, <2 x double> %x) {
 ;   ret void
 ; }
 
-; FUNC-LABEL: {{^}}fceil_v4f64:
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
 define amdgpu_kernel void @fceil_v4f64(ptr addrspace(1) %out, <4 x double> %x) {
+; SI-LABEL: fceil_v4f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x11
+; SI-NEXT:    s_mov_b32 s11, 0xf000
+; SI-NEXT:    s_mov_b32 s10, -1
+; SI-NEXT:    s_mov_b32 s13, 0xfffff
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_mov_b32 s12, s10
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s18, s3, 0xb0014
+; SI-NEXT:    s_and_b32 s20, s3, 0x80000000
+; SI-NEXT:    v_cmp_gt_f64_e64 s[14:15], s[2:3], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[16:17], s[0:1], 0
+; SI-NEXT:    s_add_i32 s21, s18, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[18:19], s[12:13], s21
+; SI-NEXT:    s_andn2_b64 s[18:19], s[2:3], s[18:19]
+; SI-NEXT:    s_cmp_lt_i32 s21, 0
+; SI-NEXT:    s_cselect_b32 s18, 0, s18
+; SI-NEXT:    s_cselect_b32 s19, s20, s19
+; SI-NEXT:    s_cmp_gt_i32 s21, 51
+; SI-NEXT:    s_cselect_b32 s19, s3, s19
+; SI-NEXT:    s_cselect_b32 s18, s2, s18
+; SI-NEXT:    v_cmp_gt_f64_e64 s[20:21], s[6:7], 0
+; SI-NEXT:    v_mov_b32_e32 v1, s18
+; SI-NEXT:    v_mov_b32_e32 v2, s19
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[2:3], v[1:2]
+; SI-NEXT:    s_and_b64 s[2:3], s[14:15], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s22, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s2, s1, 0xb0014
+; SI-NEXT:    s_and_b32 s14, s1, 0x80000000
+; SI-NEXT:    s_add_i32 s15, s2, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s15
+; SI-NEXT:    s_andn2_b64 s[2:3], s[0:1], s[2:3]
+; SI-NEXT:    s_cmp_lt_i32 s15, 0
+; SI-NEXT:    s_cselect_b32 s2, 0, s2
+; SI-NEXT:    s_cselect_b32 s3, s14, s3
+; SI-NEXT:    s_cmp_gt_i32 s15, 51
+; SI-NEXT:    s_cselect_b32 s3, s1, s3
+; SI-NEXT:    s_cselect_b32 s2, s0, s2
+; SI-NEXT:    v_cmp_gt_f64_e64 s[14:15], s[4:5], 0
+; SI-NEXT:    v_mov_b32_e32 v1, s22
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    v_mov_b32_e32 v3, s3
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[0:1], v[2:3]
+; SI-NEXT:    s_and_b64 s[0:1], s[16:17], vcc
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; SI-NEXT:    s_cselect_b32 s16, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s0, s7, 0xb0014
+; SI-NEXT:    s_and_b32 s17, s7, 0x80000000
+; SI-NEXT:    s_add_i32 s22, s0, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[12:13], s22
+; SI-NEXT:    s_andn2_b64 s[0:1], s[6:7], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s22, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s17, s1
+; SI-NEXT:    s_cmp_gt_i32 s22, 51
+; SI-NEXT:    s_cselect_b32 s1, s7, s1
+; SI-NEXT:    s_cselect_b32 s0, s6, s0
+; SI-NEXT:    v_add_f64 v[4:5], s[18:19], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, s16
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_mov_b32_e32 v2, s0
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[6:7], v[2:3]
+; SI-NEXT:    s_and_b64 s[6:7], s[20:21], vcc
+; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_cselect_b32 s16, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s6, s5, 0xb0014
+; SI-NEXT:    s_and_b32 s17, s5, 0x80000000
+; SI-NEXT:    s_add_i32 s18, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s18
+; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
+; SI-NEXT:    s_cmp_lt_i32 s18, 0
+; SI-NEXT:    s_cselect_b32 s6, 0, s6
+; SI-NEXT:    s_cselect_b32 s7, s17, s7
+; SI-NEXT:    s_cmp_gt_i32 s18, 51
+; SI-NEXT:    s_cselect_b32 s7, s5, s7
+; SI-NEXT:    s_cselect_b32 s6, s4, s6
+; SI-NEXT:    v_add_f64 v[2:3], s[2:3], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, s16
+; SI-NEXT:    v_mov_b32_e32 v6, s6
+; SI-NEXT:    v_mov_b32_e32 v7, s7
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[4:5], v[6:7]
+; SI-NEXT:    s_and_b64 s[2:3], s[14:15], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    v_add_f64 v[8:9], s[0:1], v[0:1]
+; SI-NEXT:    v_mov_b32_e32 v1, s2
+; SI-NEXT:    v_add_f64 v[6:7], s[6:7], v[0:1]
+; SI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[8:11], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[2:5], off, s[8:11], 0
+; SI-NEXT:    s_endpgm
   %y = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x) nounwind readnone
   store <4 x double> %y, ptr addrspace(1) %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fceil_v8f64:
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
 define amdgpu_kernel void @fceil_v8f64(ptr addrspace(1) %out, <8 x double> %x) {
+; SI-LABEL: fceil_v8f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx2 s[16:17], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx16 s[0:15], s[4:5], 0x19
+; SI-NEXT:    s_mov_b32 s19, 0xf000
+; SI-NEXT:    s_mov_b32 s18, -1
+; SI-NEXT:    s_mov_b32 s21, 0xfffff
+; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    s_mov_b32 s20, s18
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s33, s3, 0xb0014
+; SI-NEXT:    s_and_b32 s40, s3, 0x80000000
+; SI-NEXT:    v_cmp_gt_f64_e64 s[22:23], s[2:3], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[26:27], s[0:1], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[30:31], s[6:7], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[36:37], s[4:5], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[24:25], s[10:11], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[28:29], s[8:9], 0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[34:35], s[14:15], 0
+; SI-NEXT:    s_addk_i32 s33, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[38:39], s[20:21], s33
+; SI-NEXT:    s_andn2_b64 s[38:39], s[2:3], s[38:39]
+; SI-NEXT:    s_cmp_lt_i32 s33, 0
+; SI-NEXT:    s_cselect_b32 s38, 0, s38
+; SI-NEXT:    s_cselect_b32 s39, s40, s39
+; SI-NEXT:    s_cmp_gt_i32 s33, 51
+; SI-NEXT:    s_cselect_b32 s41, s3, s39
+; SI-NEXT:    s_cselect_b32 s40, s2, s38
+; SI-NEXT:    v_cmp_gt_f64_e64 s[38:39], s[12:13], 0
+; SI-NEXT:    v_mov_b32_e32 v0, s40
+; SI-NEXT:    v_mov_b32_e32 v1, s41
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[2:3], v[0:1]
+; SI-NEXT:    s_and_b64 s[2:3], s[22:23], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s3, s1, 0xb0014
+; SI-NEXT:    s_and_b32 s22, s1, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    s_add_i32 s23, s3, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[2:3], s[20:21], s23
+; SI-NEXT:    s_andn2_b64 s[2:3], s[0:1], s[2:3]
+; SI-NEXT:    s_cmp_lt_i32 s23, 0
+; SI-NEXT:    s_cselect_b32 s2, 0, s2
+; SI-NEXT:    s_cselect_b32 s3, s22, s3
+; SI-NEXT:    s_cmp_gt_i32 s23, 51
+; SI-NEXT:    s_cselect_b32 s3, s1, s3
+; SI-NEXT:    s_cselect_b32 s2, s0, s2
+; SI-NEXT:    v_add_f64 v[2:3], s[40:41], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[0:1], v[0:1]
+; SI-NEXT:    s_and_b64 s[0:1], s[26:27], vcc
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; SI-NEXT:    s_cselect_b32 s0, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s1, s7, 0xb0014
+; SI-NEXT:    s_and_b32 s22, s7, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s0
+; SI-NEXT:    s_add_i32 s23, s1, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[20:21], s23
+; SI-NEXT:    s_andn2_b64 s[0:1], s[6:7], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s23, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s22, s1
+; SI-NEXT:    s_cmp_gt_i32 s23, 51
+; SI-NEXT:    s_cselect_b32 s1, s7, s1
+; SI-NEXT:    s_cselect_b32 s0, s6, s0
+; SI-NEXT:    v_add_f64 v[0:1], s[2:3], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v6, s1
+; SI-NEXT:    v_mov_b32_e32 v5, s0
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[6:7], v[5:6]
+; SI-NEXT:    s_and_b64 s[2:3], s[30:31], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
+; SI-NEXT:    s_and_b32 s6, s5, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    s_add_i32 s7, s3, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[2:3], s[20:21], s7
+; SI-NEXT:    s_andn2_b64 s[2:3], s[4:5], s[2:3]
+; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_cselect_b32 s2, 0, s2
+; SI-NEXT:    s_cselect_b32 s3, s6, s3
+; SI-NEXT:    s_cmp_gt_i32 s7, 51
+; SI-NEXT:    s_cselect_b32 s3, s5, s3
+; SI-NEXT:    s_cselect_b32 s2, s4, s2
+; SI-NEXT:    v_add_f64 v[7:8], s[0:1], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v6, s3
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[4:5], v[5:6]
+; SI-NEXT:    s_and_b64 s[0:1], s[36:37], vcc
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; SI-NEXT:    s_cselect_b32 s0, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s1, s11, 0xb0014
+; SI-NEXT:    s_and_b32 s4, s11, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s0
+; SI-NEXT:    v_add_f64 v[5:6], s[2:3], v[4:5]
+; SI-NEXT:    s_add_i32 s2, s1, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[20:21], s2
+; SI-NEXT:    s_andn2_b64 s[0:1], s[10:11], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s2, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s4, s1
+; SI-NEXT:    s_cmp_gt_i32 s2, 51
+; SI-NEXT:    s_cselect_b32 s1, s11, s1
+; SI-NEXT:    s_cselect_b32 s0, s10, s0
+; SI-NEXT:    buffer_store_dwordx4 v[5:8], off, s[16:19], 0 offset:16
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v6, s1
+; SI-NEXT:    v_mov_b32_e32 v5, s0
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[10:11], v[5:6]
+; SI-NEXT:    s_and_b64 s[2:3], s[24:25], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s2, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s3, s9, 0xb0014
+; SI-NEXT:    s_and_b32 s4, s9, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    s_add_i32 s5, s3, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[2:3], s[20:21], s5
+; SI-NEXT:    s_andn2_b64 s[2:3], s[8:9], s[2:3]
+; SI-NEXT:    s_cmp_lt_i32 s5, 0
+; SI-NEXT:    s_cselect_b32 s2, 0, s2
+; SI-NEXT:    s_cselect_b32 s3, s4, s3
+; SI-NEXT:    s_cmp_gt_i32 s5, 51
+; SI-NEXT:    s_cselect_b32 s3, s9, s3
+; SI-NEXT:    s_cselect_b32 s2, s8, s2
+; SI-NEXT:    v_add_f64 v[7:8], s[0:1], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v6, s3
+; SI-NEXT:    v_mov_b32_e32 v5, s2
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[8:9], v[5:6]
+; SI-NEXT:    s_and_b64 s[0:1], s[28:29], vcc
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; SI-NEXT:    s_cselect_b32 s0, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s1, s15, 0xb0014
+; SI-NEXT:    s_and_b32 s4, s15, 0x80000000
+; SI-NEXT:    v_mov_b32_e32 v5, s0
+; SI-NEXT:    s_add_i32 s5, s1, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[20:21], s5
+; SI-NEXT:    s_andn2_b64 s[0:1], s[14:15], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s5, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s4, s1
+; SI-NEXT:    s_cmp_gt_i32 s5, 51
+; SI-NEXT:    s_cselect_b32 s1, s15, s1
+; SI-NEXT:    s_cselect_b32 s0, s14, s0
+; SI-NEXT:    v_add_f64 v[5:6], s[2:3], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v10, s1
+; SI-NEXT:    v_mov_b32_e32 v9, s0
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[14:15], v[9:10]
+; SI-NEXT:    s_and_b64 s[2:3], s[34:35], vcc
+; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s2, s13, 0xb0014
+; SI-NEXT:    s_and_b32 s5, s13, 0x80000000
+; SI-NEXT:    s_add_i32 s6, s2, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[2:3], s[20:21], s6
+; SI-NEXT:    s_andn2_b64 s[2:3], s[12:13], s[2:3]
+; SI-NEXT:    s_cmp_lt_i32 s6, 0
+; SI-NEXT:    s_cselect_b32 s2, 0, s2
+; SI-NEXT:    s_cselect_b32 s3, s5, s3
+; SI-NEXT:    s_cmp_gt_i32 s6, 51
+; SI-NEXT:    s_cselect_b32 s3, s13, s3
+; SI-NEXT:    s_cselect_b32 s2, s12, s2
+; SI-NEXT:    buffer_store_dwordx4 v[5:8], off, s[16:19], 0 offset:32
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v5, s4
+; SI-NEXT:    v_mov_b32_e32 v7, s3
+; SI-NEXT:    v_mov_b32_e32 v6, s2
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[12:13], v[6:7]
+; SI-NEXT:    s_and_b64 s[4:5], s[38:39], vcc
+; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_add_f64 v[6:7], s[0:1], v[4:5]
+; SI-NEXT:    v_mov_b32_e32 v5, s4
+; SI-NEXT:    v_add_f64 v[4:5], s[2:3], v[4:5]
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[16:19], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[16:19], 0
+; SI-NEXT:    s_endpgm
   %y = call <8 x double> @llvm.ceil.v8f64(<8 x double> %x) nounwind readnone
   store <8 x double> %y, ptr addrspace(1) %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}fceil_v16f64:
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
-; CI: v_ceil_f64_e32
 define amdgpu_kernel void @fceil_v16f64(ptr addrspace(1) %out, <16 x double> %x) {
+; SI-LABEL: fceil_v16f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x29
+; SI-NEXT:    s_mov_b32 s26, -1
+; SI-NEXT:    s_mov_b32 s29, 0xfffff
+; SI-NEXT:    s_load_dwordx2 s[24:25], s[4:5], 0x9
+; SI-NEXT:    v_mov_b32_e32 v8, 0
+; SI-NEXT:    s_mov_b32 s28, s26
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_bfe_u32 s0, s11, 0xb0014
+; SI-NEXT:    s_and_b32 s2, s11, 0x80000000
+; SI-NEXT:    s_add_i32 s3, s0, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[28:29], s3
+; SI-NEXT:    s_andn2_b64 s[0:1], s[10:11], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s2, s1
+; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s3, s11, s1
+; SI-NEXT:    s_cselect_b32 s2, s10, s0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[0:1], s[10:11], 0
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[10:11], v[0:1]
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], exec
+; SI-NEXT:    s_cselect_b32 s10, 0x3ff00000, 0
+; SI-NEXT:    s_bfe_u32 s0, s9, 0xb0014
+; SI-NEXT:    s_and_b32 s6, s9, 0x80000000
+; SI-NEXT:    s_add_i32 s7, s0, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[0:1], s[28:29], s7
+; SI-NEXT:    s_andn2_b64 s[0:1], s[8:9], s[0:1]
+; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, s6, s1
+; SI-NEXT:    s_cmp_gt_i32 s7, 51
+; SI-NEXT:    s_cselect_b32 s7, s9, s1
+; SI-NEXT:    s_cselect_b32 s6, s8, s0
+; SI-NEXT:    v_cmp_gt_f64_e64 s[0:1], s[8:9], 0
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    v_cmp_lg_f64_e32 vcc, s[8:9], v[0:1]
+; SI-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
+; SI-NEXT:    s_and_b64 s[0:1...
[truncated]

@chinmaydd chinmaydd merged commit e264cff into llvm:main May 20, 2025
13 checks passed
@chinmaydd chinmaydd deleted the chinmaydd/test-update-2 branch May 20, 2025 02:47
sivan-shani pushed a commit to sivan-shani/llvm-project that referenced this pull request Jun 3, 2025
ajaden-codes pushed a commit to Jaddyen/llvm-project that referenced this pull request Jun 6, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants