-
Notifications
You must be signed in to change notification settings - Fork 14.4k
[AMDGPU] Fix wrong reverse operations for v_cmpx_le_u32
#146398
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-backend-amdgpu Author: Shilei Tian (shiltian) ChangesFixes SWDEV-538616. Patch is 1.09 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/146398.diff 119 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index ca5ed5cd24603..eb002567f3ef5 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -799,72 +799,72 @@ defm V_CMPX_T_U16 : VOPCX_I16 <"v_cmpx_t_u16">;
} // End SubtargetPredicate = Has16BitInsts
defm V_CMP_F_I32 : VOPC_I32 <"v_cmp_f_i32">;
-defm V_CMP_LT_I32 : VOPC_I32 <"v_cmp_lt_i32", COND_SLT, "v_cmp_gt_i32">;
+defm V_CMP_LT_I32 : VOPC_I32 <"v_cmp_lt_i32", COND_SLT, "v_cmp_ge_i32">;
defm V_CMP_EQ_I32 : VOPC_I32 <"v_cmp_eq_i32">;
-defm V_CMP_LE_I32 : VOPC_I32 <"v_cmp_le_i32", COND_SLE, "v_cmp_ge_i32">;
+defm V_CMP_LE_I32 : VOPC_I32 <"v_cmp_le_i32", COND_SLE, "v_cmp_gt_i32">;
defm V_CMP_GT_I32 : VOPC_I32 <"v_cmp_gt_i32", COND_SGT>;
defm V_CMP_NE_I32 : VOPC_I32 <"v_cmp_ne_i32">;
defm V_CMP_GE_I32 : VOPC_I32 <"v_cmp_ge_i32", COND_SGE>;
defm V_CMP_T_I32 : VOPC_I32 <"v_cmp_t_i32">;
defm V_CMPX_F_I32 : VOPCX_I32 <"v_cmpx_f_i32">;
-defm V_CMPX_LT_I32 : VOPCX_I32 <"v_cmpx_lt_i32", "v_cmpx_gt_i32">;
+defm V_CMPX_LT_I32 : VOPCX_I32 <"v_cmpx_lt_i32", "v_cmpx_ge_i32">;
defm V_CMPX_EQ_I32 : VOPCX_I32 <"v_cmpx_eq_i32">;
-defm V_CMPX_LE_I32 : VOPCX_I32 <"v_cmpx_le_i32", "v_cmpx_ge_i32">;
+defm V_CMPX_LE_I32 : VOPCX_I32 <"v_cmpx_le_i32", "v_cmpx_gt_i32">;
defm V_CMPX_GT_I32 : VOPCX_I32 <"v_cmpx_gt_i32">;
defm V_CMPX_NE_I32 : VOPCX_I32 <"v_cmpx_ne_i32">;
defm V_CMPX_GE_I32 : VOPCX_I32 <"v_cmpx_ge_i32">;
defm V_CMPX_T_I32 : VOPCX_I32 <"v_cmpx_t_i32">;
defm V_CMP_F_I64 : VOPC_I64 <"v_cmp_f_i64">;
-defm V_CMP_LT_I64 : VOPC_I64 <"v_cmp_lt_i64", COND_SLT, "v_cmp_gt_i64">;
+defm V_CMP_LT_I64 : VOPC_I64 <"v_cmp_lt_i64", COND_SLT, "v_cmp_ge_i64">;
defm V_CMP_EQ_I64 : VOPC_I64 <"v_cmp_eq_i64">;
-defm V_CMP_LE_I64 : VOPC_I64 <"v_cmp_le_i64", COND_SLE, "v_cmp_ge_i64">;
+defm V_CMP_LE_I64 : VOPC_I64 <"v_cmp_le_i64", COND_SLE, "v_cmp_gt_i64">;
defm V_CMP_GT_I64 : VOPC_I64 <"v_cmp_gt_i64", COND_SGT>;
defm V_CMP_NE_I64 : VOPC_I64 <"v_cmp_ne_i64">;
defm V_CMP_GE_I64 : VOPC_I64 <"v_cmp_ge_i64", COND_SGE>;
defm V_CMP_T_I64 : VOPC_I64 <"v_cmp_t_i64">;
defm V_CMPX_F_I64 : VOPCX_I64 <"v_cmpx_f_i64">;
-defm V_CMPX_LT_I64 : VOPCX_I64 <"v_cmpx_lt_i64", "v_cmpx_gt_i64">;
+defm V_CMPX_LT_I64 : VOPCX_I64 <"v_cmpx_lt_i64", "v_cmpx_ge_i64">;
defm V_CMPX_EQ_I64 : VOPCX_I64 <"v_cmpx_eq_i64">;
-defm V_CMPX_LE_I64 : VOPCX_I64 <"v_cmpx_le_i64", "v_cmpx_ge_i64">;
+defm V_CMPX_LE_I64 : VOPCX_I64 <"v_cmpx_le_i64", "v_cmpx_gt_i64">;
defm V_CMPX_GT_I64 : VOPCX_I64 <"v_cmpx_gt_i64">;
defm V_CMPX_NE_I64 : VOPCX_I64 <"v_cmpx_ne_i64">;
defm V_CMPX_GE_I64 : VOPCX_I64 <"v_cmpx_ge_i64">;
defm V_CMPX_T_I64 : VOPCX_I64 <"v_cmpx_t_i64">;
defm V_CMP_F_U32 : VOPC_I32 <"v_cmp_f_u32">;
-defm V_CMP_LT_U32 : VOPC_I32 <"v_cmp_lt_u32", COND_ULT, "v_cmp_gt_u32">;
+defm V_CMP_LT_U32 : VOPC_I32 <"v_cmp_lt_u32", COND_ULT, "v_cmp_ge_u32">;
defm V_CMP_EQ_U32 : VOPC_I32 <"v_cmp_eq_u32", COND_EQ>;
-defm V_CMP_LE_U32 : VOPC_I32 <"v_cmp_le_u32", COND_ULE, "v_cmp_ge_u32">;
+defm V_CMP_LE_U32 : VOPC_I32 <"v_cmp_le_u32", COND_ULE, "v_cmp_gt_u32">;
defm V_CMP_GT_U32 : VOPC_I32 <"v_cmp_gt_u32", COND_UGT>;
defm V_CMP_NE_U32 : VOPC_I32 <"v_cmp_ne_u32", COND_NE>;
defm V_CMP_GE_U32 : VOPC_I32 <"v_cmp_ge_u32", COND_UGE>;
defm V_CMP_T_U32 : VOPC_I32 <"v_cmp_t_u32">;
defm V_CMPX_F_U32 : VOPCX_I32 <"v_cmpx_f_u32">;
-defm V_CMPX_LT_U32 : VOPCX_I32 <"v_cmpx_lt_u32", "v_cmpx_gt_u32">;
+defm V_CMPX_LT_U32 : VOPCX_I32 <"v_cmpx_lt_u32", "v_cmpx_ge_u32">;
defm V_CMPX_EQ_U32 : VOPCX_I32 <"v_cmpx_eq_u32">;
-defm V_CMPX_LE_U32 : VOPCX_I32 <"v_cmpx_le_u32", "v_cmpx_le_u32">;
+defm V_CMPX_LE_U32 : VOPCX_I32 <"v_cmpx_le_u32", "v_cmpx_gt_u32">;
defm V_CMPX_GT_U32 : VOPCX_I32 <"v_cmpx_gt_u32">;
defm V_CMPX_NE_U32 : VOPCX_I32 <"v_cmpx_ne_u32">;
defm V_CMPX_GE_U32 : VOPCX_I32 <"v_cmpx_ge_u32">;
defm V_CMPX_T_U32 : VOPCX_I32 <"v_cmpx_t_u32">;
defm V_CMP_F_U64 : VOPC_I64 <"v_cmp_f_u64">;
-defm V_CMP_LT_U64 : VOPC_I64 <"v_cmp_lt_u64", COND_ULT, "v_cmp_gt_u64">;
+defm V_CMP_LT_U64 : VOPC_I64 <"v_cmp_lt_u64", COND_ULT, "v_cmp_ge_u64">;
defm V_CMP_EQ_U64 : VOPC_I64 <"v_cmp_eq_u64", COND_EQ>;
-defm V_CMP_LE_U64 : VOPC_I64 <"v_cmp_le_u64", COND_ULE, "v_cmp_ge_u64">;
+defm V_CMP_LE_U64 : VOPC_I64 <"v_cmp_le_u64", COND_ULE, "v_cmp_gt_u64">;
defm V_CMP_GT_U64 : VOPC_I64 <"v_cmp_gt_u64", COND_UGT>;
defm V_CMP_NE_U64 : VOPC_I64 <"v_cmp_ne_u64", COND_NE>;
defm V_CMP_GE_U64 : VOPC_I64 <"v_cmp_ge_u64", COND_UGE>;
defm V_CMP_T_U64 : VOPC_I64 <"v_cmp_t_u64">;
defm V_CMPX_F_U64 : VOPCX_I64 <"v_cmpx_f_u64">;
-defm V_CMPX_LT_U64 : VOPCX_I64 <"v_cmpx_lt_u64", "v_cmpx_gt_u64">;
+defm V_CMPX_LT_U64 : VOPCX_I64 <"v_cmpx_lt_u64", "v_cmpx_ge_u64">;
defm V_CMPX_EQ_U64 : VOPCX_I64 <"v_cmpx_eq_u64">;
-defm V_CMPX_LE_U64 : VOPCX_I64 <"v_cmpx_le_u64", "v_cmpx_ge_u64">;
+defm V_CMPX_LE_U64 : VOPCX_I64 <"v_cmpx_le_u64", "v_cmpx_gt_u64">;
defm V_CMPX_GT_U64 : VOPCX_I64 <"v_cmpx_gt_u64">;
defm V_CMPX_NE_U64 : VOPCX_I64 <"v_cmpx_ne_u64">;
defm V_CMPX_GE_U64 : VOPCX_I64 <"v_cmpx_ge_u64">;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
index 38374d1689366..27668752120e8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/addo.ll
@@ -205,7 +205,7 @@ define i32 @v_saddo_i32(i32 %a, i32 %b) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_add_i32_e32 v2, vcc, v0, v1
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v2, v0
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v1
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
@@ -216,7 +216,7 @@ define i32 @v_saddo_i32(i32 %a, i32 %b) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v0, v1
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v2, v0
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v1
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v2, v0
@@ -227,7 +227,7 @@ define i32 @v_saddo_i32(i32 %a, i32 %b) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v2, v0, v1
; GFX9-NEXT: v_cmp_lt_i32_e32 vcc, v2, v0
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v1
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v1
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT: v_add_u32_e32 v0, v2, v0
@@ -247,7 +247,7 @@ define i64 @v_saddo_i64(i64 %a, i64 %b) {
; GFX7-NEXT: v_add_i32_e32 v4, vcc, v0, v2
; GFX7-NEXT: v_addc_u32_e32 v5, vcc, v1, v3, vcc
; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
-; GFX7-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX7-NEXT: v_cmp_ge_i64_e64 s[4:5], 0, v[2:3]
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v4, v0
@@ -260,7 +260,7 @@ define i64 @v_saddo_i64(i64 %a, i64 %b) {
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v0, v2
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v1, v3, vcc
; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
-; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX8-NEXT: v_cmp_ge_i64_e64 s[4:5], 0, v[2:3]
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v4, v0
@@ -273,7 +273,7 @@ define i64 @v_saddo_i64(i64 %a, i64 %b) {
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
-; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX9-NEXT: v_cmp_ge_i64_e64 s[4:5], 0, v[2:3]
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v4, v0
@@ -295,8 +295,8 @@ define <2 x i32> @v_saddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
; GFX7-NEXT: v_add_i32_e32 v5, vcc, v1, v3
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v4, v0
; GFX7-NEXT: v_cmp_lt_i32_e64 s[4:5], v5, v1
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[6:7], 0, v2
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[6:7], 0, v2
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[8:9], 0, v3
; GFX7-NEXT: s_xor_b64 s[6:7], s[6:7], vcc
; GFX7-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
@@ -312,8 +312,8 @@ define <2 x i32> @v_saddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v1, v3
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v4, v0
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v5, v1
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[6:7], 0, v2
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[6:7], 0, v2
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[8:9], 0, v3
; GFX8-NEXT: s_xor_b64 s[6:7], s[6:7], vcc
; GFX8-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
@@ -329,8 +329,8 @@ define <2 x i32> @v_saddo_v2i32(<2 x i32> %a, <2 x i32> %b) {
; GFX9-NEXT: v_add_u32_e32 v5, v1, v3
; GFX9-NEXT: v_cmp_lt_i32_e32 vcc, v4, v0
; GFX9-NEXT: v_cmp_lt_i32_e64 s[4:5], v5, v1
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[6:7], 0, v2
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[8:9], 0, v3
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[6:7], 0, v2
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[8:9], 0, v3
; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], vcc
; GFX9-NEXT: s_xor_b64 s[4:5], s[8:9], s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
@@ -355,7 +355,7 @@ define i8 @v_saddo_i8(i8 %a, i8 %b) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT: v_bfe_i32 v0, v1, 0, 8
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
@@ -369,7 +369,7 @@ define i8 @v_saddo_i8(i8 %a, i8 %b) {
; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 8
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u16_e32 v0, v2, v0
@@ -403,7 +403,7 @@ define i7 @v_saddo_i7(i7 %a, i7 %b) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
@@ -417,7 +417,7 @@ define i7 @v_saddo_i7(i7 %a, i7 %b) {
; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u16_e32 v0, v2, v0
@@ -431,7 +431,7 @@ define i7 @v_saddo_i7(i7 %a, i7 %b) {
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX9-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX9-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT: v_add_u16_e32 v0, v2, v0
@@ -802,7 +802,7 @@ define i8 @s_saddo_i8(i8 %a, i8 %b) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT: v_bfe_i32 v0, v1, 0, 8
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
@@ -816,7 +816,7 @@ define i8 @s_saddo_i8(i8 %a, i8 %b) {
; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 8
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u16_e32 v0, v2, v0
@@ -850,7 +850,7 @@ define i7 @s_saddo_i7(i7 %a, i7 %b) {
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX7-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX7-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX7-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v2, v0
@@ -864,7 +864,7 @@ define i7 @s_saddo_i7(i7 %a, i7 %b) {
; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX8-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX8-NEXT: v_add_u16_e32 v0, v2, v0
@@ -878,7 +878,7 @@ define i7 @s_saddo_i7(i7 %a, i7 %b) {
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 7
; GFX9-NEXT: v_cmp_lt_i32_e32 vcc, v3, v0
; GFX9-NEXT: v_bfe_i32 v0, v1, 0, 7
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[4:5], 0, v0
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT: v_add_u16_e32 v0, v2, v0
@@ -969,8 +969,8 @@ define amdgpu_ps i32 @saddo_i32_sv(i32 inreg %a, i32 %b) {
; GFX7-LABEL: saddo_i32_sv:
; GFX7: ; %bb.0:
; GFX7-NEXT: v_add_i32_e32 v1, vcc, s0, v0
-; GFX7-NEXT: v_cmp_gt_i32_e32 vcc, s0, v1
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e32 vcc, s0, v1
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[0:1], 0, v0
; GFX7-NEXT: s_xor_b64 s[0:1], s[0:1], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v1, v0
@@ -980,8 +980,8 @@ define amdgpu_ps i32 @saddo_i32_sv(i32 inreg %a, i32 %b) {
; GFX8-LABEL: saddo_i32_sv:
; GFX8: ; %bb.0:
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v0
-; GFX8-NEXT: v_cmp_gt_i32_e32 vcc, s0, v1
-; GFX8-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX8-NEXT: v_cmp_ge_i32_e32 vcc, s0, v1
+; GFX8-NEXT: v_cmp_ge_i32_e64 s[0:1], 0, v0
; GFX8-NEXT: s_xor_b64 s[0:1], s[0:1], vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v1, v0
@@ -991,8 +991,8 @@ define amdgpu_ps i32 @saddo_i32_sv(i32 inreg %a, i32 %b) {
; GFX9-LABEL: saddo_i32_sv:
; GFX9: ; %bb.0:
; GFX9-NEXT: v_add_u32_e32 v1, s0, v0
-; GFX9-NEXT: v_cmp_gt_i32_e32 vcc, s0, v1
-; GFX9-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX9-NEXT: v_cmp_ge_i32_e32 vcc, s0, v1
+; GFX9-NEXT: v_cmp_ge_i32_e64 s[0:1], 0, v0
; GFX9-NEXT: s_xor_b64 s[0:1], s[0:1], vcc
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX9-NEXT: v_add_u32_e32 v0, v1, v0
@@ -1013,8 +1013,8 @@ define amdgpu_ps i16 @saddo_i16_sv(i16 inreg %a, i16 %b) {
; GFX7-NEXT: v_bfe_i32 v2, v1, 0, 16
; GFX7-NEXT: s_sext_i32_i16 s0, s0
; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX7-NEXT: v_cmp_gt_i32_e32 vcc, s0, v2
-; GFX7-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v0
+; GFX7-NEXT: v_cmp_ge_i32_e32 vcc, s0, v2
+; GFX7-NEXT: v_cmp_ge_i32_e64 s[0:1], 0, v0
; GFX7-NEXT: s_xor_b64 s[0:1], s[0:1], vcc
; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v1, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
index 353c09b4b0bfb..3f61fdd8309ec 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
@@ -1666,7 +1666,7 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
; GFX6-NEXT: v_ashrrev_i32_e32 v8, 31, v5
; GFX6-NEXT: v_ashr_i64 v[4:5], v[4:5], v2
; GFX6-NEXT: v_or_b32_e32 v7, v7, v9
-; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GFX6-NEXT: v_cmp_ge_u32_e32 vcc, 64, v3
; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
@@ -1689,7 +1689,7 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
; GFX8-NEXT: v_ashrrev_i32_e32 v8, 31, v5
; GFX8-NEXT: v_ashrrev_i64 v[4:5], v2, v[4:5]
; GFX8-NEXT: v_or_b32_e32 v7, v7, v9
-; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GFX8-NEXT: v_cmp_ge_u32_e32 vcc, 64, v3
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
@@ -1712,7 +1712,7 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v5
; GFX9-NEXT: v_ashrrev_i64 v[4:5], v2, v[4:5]
; GFX9-NEXT: v_or_b32_e32 v7, v7, v9
-; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
+; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, 64, v3
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
@@ -1728,7 +1728,7 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
; GFX10-NEXT: v_sub_nc_u32_e32 v2, 64, v3
; GFX10-NEXT: v_add_nc_u32_e32 v10, 0xffffffc0, v3
; GFX10-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v3
+; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, 64, v3
; GFX10-NEXT: v_ashrrev_i32_e32 v5, 31, v4
; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 0, v3
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v2, v[4:5]
@@ -1750,7 +1750,7 @@ define i65 @v_ashr_i65(i65 %value, i65 %amount) {
; GFX11-NEXT: v_bfe_i32 v4, v2, 0, 1
; GFX11-NEXT: v_sub_nc_u32_e32 v2, 64, v3
; GFX11-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v3
+; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, 64, v3
; GFX11-NEXT: v_cmp_eq_u32_e64 s0, 0, v3
; GFX11-NEXT: v_ashrrev_i32_e32 v5, 31, v4
; GFX11-NEXT: v_lshlrev_b64 v[8:9], v2, v[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index 11acd451d98d7..0810342185c64 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -30,11 +30,11 @@ define amdgpu_ps void @divergent_i1_phi_uniform_branch(ptr addrspace(1) %out, i3
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc0 .LBB0_2
; GFX10-NEXT: ; %bb.1:
-; GFX10-NEXT: v_cmp_le_u32_e64 s0, 6, v2
+; GFX10-NEXT: v_cmp_lt_u32_e64 s0, 6, v2
; GFX10-NEXT: s_branch .LBB0_3
; GFX10-NEXT: .LBB0_2: ; %dummy
; GFX10-NEXT: v_mov_b32_e32 v5, 0x7b
-; GFX10-NEXT: v_cmp_gt_u32_e64 s0, 1, v2
+; GFX10-NEXT: v_cmp_ge_u32_e64 s0, 1, v2
; GFX10-NEXT: global_store_dword v[3:4], v5, off
; GFX10-NEXT: .LBB0_3: ; %exit
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, -1, s0
@@ -65,11 +65,11 @@ exit:
define amdgpu_ps void @divergent_i1_phi_uniform_branch_simple(ptr addrspace(1) %out, i32 %tid, i32 inreg %cond) {
; GFX10-LABEL: divergent_i1_phi_uniform_branch_simple:
; GFX10: ; %bb.0: ; %A
-; GFX10-NEXT: v_cmp_le_u32_e64 s1, 6, v2
+; GFX10-NEXT: v_cmp_lt_u32_e64 s1, 6, v2
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc1 .LBB1_2
; GFX10-NEXT: ; %bb.1: ; %B
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 1, v2
+; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, 1, v2
; GFX10-NEXT: s_andn2_b32 s0, s1, exec_lo
; GFX10-NEXT: s_and_b32 s1, exec_lo, vcc_lo
; GFX10-NEXT: s_or_b32 s1, s0, s1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index be90b02a6dd65..9c9be2db98acf...
[truncated]
|
v_cmp_*
v_cmpx_le_u32
0ee04d1
to
b169654
Compare
b169654
to
bb92714
Compare
jayfoad
reviewed
Jul 1, 2025
bb92714
to
2b6aa12
Compare
Fixes SWDEV-538616.
2b6aa12
to
cbf3eac
Compare
jayfoad
approved these changes
Jul 1, 2025
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Fixes: SWDEV-538616