Skip to content

[AMDGPU] prevent shrinking udiv/urem if either operand is in (SignedMax,UnsignedMax] #116733

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 29 additions & 13 deletions llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1193,19 +1193,35 @@ int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
Value *Den, unsigned AtLeast,
bool IsSigned) const {
const DataLayout &DL = Mod->getDataLayout();
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
if (LHSSignBits < AtLeast)
return -1;

unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
if (RHSSignBits < AtLeast)
return -1;

unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
if (IsSigned)
++DivBits;
return DivBits;
if (IsSigned) {
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
if (LHSSignBits < AtLeast)
return -1;

unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
if (RHSSignBits < AtLeast)
return -1;
Comment on lines +1201 to +1203
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should try RHS first as it's canonically simpler


unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
return DivBits + 1;
} else {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No else after return

KnownBits Known = computeKnownBits(Num, DL, 0, AC, &I);
// We know all bits are used for division for Num or Den in range
// (SignedMax, UnsignedMax]
if (Known.isNegative() || !Known.isNonNegative())
return -1;
Comment on lines +1210 to +1213
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this reproducing the logic of computeKnownBits for division? Can you just do KnownLHS.udiv/urem(KnowRHS)?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately we need the KnownBits of the operands for shrinking as div/rem gives us the known of the final output.

unsigned LHSSignBits = Known.countMinLeadingZeros();

Known = computeKnownBits(Den, DL, 0, AC, &I);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Try RHS first

if (Known.isNegative() || !Known.isNonNegative())
return -1;
unsigned RHSSignBits = Known.countMinLeadingZeros();

unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
return DivBits;
}
}

// The fractional part of a float is enough to accurately represent up to
Expand Down
98 changes: 98 additions & 0 deletions llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9999,3 +9999,101 @@ define <2 x i64> @v_udiv_i64_exact(<2 x i64> %num) {
%result = udiv exact <2 x i64> %num, <i64 4096, i64 1024>
ret <2 x i64> %result
}

define i64 @udiv_i64_gt_smax(i8 %size) {
; GFX6-LABEL: udiv_i64_gt_smax:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 8
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; GFX6-NEXT: v_not_b32_e32 v1, v1
; GFX6-NEXT: v_not_b32_e32 v0, v0
; GFX6-NEXT: s_mov_b32 s4, 0xcccccccd
; GFX6-NEXT: v_mul_lo_u32 v3, v1, s4
; GFX6-NEXT: v_mul_hi_u32 v4, v0, s4
; GFX6-NEXT: s_mov_b32 s6, 0xcccccccc
; GFX6-NEXT: v_mul_hi_u32 v5, v1, s4
; GFX6-NEXT: v_mul_hi_u32 v2, v0, s6
; GFX6-NEXT: v_mul_lo_u32 v0, v0, s6
; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v5, vcc
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v3
; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GFX6-NEXT: v_mul_lo_u32 v2, v1, s6
; GFX6-NEXT: v_mul_hi_u32 v1, v1, s6
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v4, v0
; GFX6-NEXT: v_addc_u32_e64 v3, s[4:5], 0, 0, vcc
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v2, v0
; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; GFX6-NEXT: v_alignbit_b32 v0, v1, v0, 3
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 3, v1
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: udiv_i64_gt_smax:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, 31
; GFX9-NEXT: v_not_b32_sdwa v4, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0
; GFX9-NEXT: s_mov_b32 s4, 0xcccccccd
; GFX9-NEXT: v_ashrrev_i32_sdwa v1, v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_mul_hi_u32 v0, v4, s4
; GFX9-NEXT: v_not_b32_e32 v5, v1
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_mov_b32 s6, 0xcccccccc
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v5, s4, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v6, v3
; GFX9-NEXT: v_mov_b32_e32 v3, v1
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v4, s6, v[2:3]
; GFX9-NEXT: v_mov_b32_e32 v0, v1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v6, v0
; GFX9-NEXT: v_addc_co_u32_e64 v1, s[4:5], 0, 0, vcc
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v5, s6, v[0:1]
; GFX9-NEXT: v_alignbit_b32 v0, v1, v0, 3
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 3, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%esize = sext i8 %size to i64
%minus = sub nuw nsw i64 -1, %esize
%div = udiv i64 %minus, 10
ret i64 %div
}

define i64 @udiv_i64_9divbits(i8 %size) {
; GFX6-LABEL: udiv_i64_9divbits:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX6-NEXT: v_add_i32_e32 v0, vcc, 1, v0
; GFX6-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX6-NEXT: s_mov_b32 s4, 0x41200000
; GFX6-NEXT: v_mul_f32_e32 v1, 0x3dcccccd, v0
; GFX6-NEXT: v_trunc_f32_e32 v1, v1
; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v1
; GFX6-NEXT: v_mad_f32 v0, -v1, s4, v0
; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s4
; GFX6-NEXT: v_mov_b32_e32 v1, 0
; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GFX6-NEXT: v_and_b32_e32 v0, 0x1ff, v0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: udiv_i64_9divbits:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, 1
; GFX9-NEXT: v_add_u32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT: s_mov_b32 s4, 0x41200000
; GFX9-NEXT: v_mul_f32_e32 v1, 0x3dcccccd, v0
; GFX9-NEXT: v_trunc_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v1
; GFX9-NEXT: v_mad_f32 v0, -v1, s4, v0
; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, s4
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v2, vcc
; GFX9-NEXT: v_and_b32_e32 v0, 0x1ff, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%zextend = zext i8 %size to i64
%num = add nuw nsw i64 1, %zextend
%div = udiv i64 %num, 10
ret i64 %div
}

119 changes: 115 additions & 4 deletions llvm/test/CodeGen/AMDGPU/bypass-div.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1021,8 +1021,116 @@ define i64 @sdiv64_known32(i64 %a, i64 %b) {
; GFX9-LABEL: sdiv64_known32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; GFX9-NEXT: v_or_b32_e32 v5, v2, v0
; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GFX9-NEXT: v_mov_b32_e32 v7, v1
; GFX9-NEXT: v_mov_b32_e32 v6, v3
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_xor_b64 s[6:7], exec, s[4:5]
; GFX9-NEXT: s_cbranch_execz .LBB10_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v6
; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v0
; GFX9-NEXT: v_sub_co_u32_e32 v11, vcc, 0, v6
; GFX9-NEXT: v_subb_co_u32_e32 v12, vcc, 0, v0, vcc
; GFX9-NEXT: v_madmk_f32 v1, v3, 0x4f800000, v1
; GFX9-NEXT: v_rcp_f32_e32 v1, v1
; GFX9-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1
; GFX9-NEXT: v_mul_f32_e32 v3, 0x2f800000, v1
; GFX9-NEXT: v_trunc_f32_e32 v3, v3
; GFX9-NEXT: v_madmk_f32 v1, v3, 0xcf800000, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v10, v3
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_mul_lo_u32 v5, v11, v10
; GFX9-NEXT: v_mul_lo_u32 v8, v12, v1
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v11, v1, 0
; GFX9-NEXT: v_add3_u32 v8, v4, v5, v8
; GFX9-NEXT: v_mul_hi_u32 v9, v1, v3
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v8, 0
; GFX9-NEXT: v_add_co_u32_e32 v13, vcc, v9, v4
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v10, v3, 0
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v10, v8, 0
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v13, v3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v9, vcc
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v8
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v1, v3
; GFX9-NEXT: v_addc_co_u32_e32 v13, vcc, v10, v4, vcc
; GFX9-NEXT: v_mul_lo_u32 v5, v11, v13
; GFX9-NEXT: v_mul_lo_u32 v8, v12, v1
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v11, v1, 0
; GFX9-NEXT: v_add3_u32 v8, v4, v5, v8
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v13, v8, 0
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v1, v8, 0
; GFX9-NEXT: v_mul_hi_u32 v12, v1, v3
; GFX9-NEXT: v_mad_u64_u32 v[10:11], s[4:5], v13, v3, 0
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v12, v8
; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v9, vcc
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v10
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v8, v11, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v5, vcc
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v1, v3
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v4, vcc
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v7, v5, 0
; GFX9-NEXT: v_mul_hi_u32 v8, v7, v1
; GFX9-NEXT: v_add_co_u32_e32 v10, vcc, v8, v3
; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, 0, v4, vcc
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v2, v1, 0
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v2, v5, 0
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v10, v3
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v11, v4, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v9, vcc
; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v1, v8
; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v3, vcc
; GFX9-NEXT: v_mul_lo_u32 v8, v0, v1
; GFX9-NEXT: v_mul_lo_u32 v9, v6, v5
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v6, v1, 0
; GFX9-NEXT: v_add3_u32 v4, v4, v9, v8
; GFX9-NEXT: v_sub_u32_e32 v8, v2, v4
; GFX9-NEXT: v_sub_co_u32_e32 v3, vcc, v7, v3
; GFX9-NEXT: v_subb_co_u32_e64 v7, s[4:5], v8, v0, vcc
; GFX9-NEXT: v_sub_co_u32_e64 v8, s[4:5], v3, v6
; GFX9-NEXT: v_subbrev_co_u32_e64 v7, s[4:5], 0, v7, s[4:5]
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v7, v0
; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[4:5]
; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v8, v6
; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[4:5]
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], v7, v0
; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v8, s[4:5]
; GFX9-NEXT: v_add_co_u32_e64 v8, s[4:5], 2, v1
; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v4, vcc
; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], 0, v5, s[4:5]
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v2, v0
; GFX9-NEXT: v_add_co_u32_e64 v10, s[4:5], 1, v1
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v3, v6
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[4:5], 0, v5, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v2, v0
; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, v7
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v9, s[4:5]
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: v_cndmask_b32_e64 v0, v10, v8, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v4, v1, v0, vcc
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: .LBB10_2: ; %Flow
; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[6:7]
; GFX9-NEXT: s_cbranch_execz .LBB10_4
; GFX9-NEXT: ; %bb.3:
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v3
; GFX9-NEXT: v_sub_u32_e32 v2, 0, v3
; GFX9-NEXT: v_mov_b32_e32 v5, 0
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
Expand All @@ -1033,14 +1141,17 @@ define i64 @sdiv64_known32(i64 %a, i64 %b) {
; GFX9-NEXT: v_mul_lo_u32 v2, v0, v3
; GFX9-NEXT: v_add_u32_e32 v4, 1, v0
; GFX9-NEXT: v_sub_u32_e32 v1, v1, v2
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
; GFX9-NEXT: v_sub_u32_e32 v2, v1, v3
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
; GFX9-NEXT: v_add_u32_e32 v2, 1, v0
; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v1, v3
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v2, vcc
; GFX9-NEXT: .LBB10_4:
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_mov_b32_e32 v0, v4
; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%a.ext = ashr i64 %a, 32
%b.ext = ashr i64 %b, 32
Expand Down
Loading
Loading