Skip to content

Re apply 130577 narrow math for and operand #133896

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
10bb8da
Revert "Revert "[AMDGPU][CodeGenPrepare] Narrow 64 bit math to 32 bit…
Shoreshen Apr 1, 2025
fd6b530
Merge branch 'main' into revert-133880-revert-130577-narrow-math-for-…
Shoreshen Apr 1, 2025
69271c0
fix address sanitizer failure
Shoreshen Apr 1, 2025
f831e51
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 2, 2025
a937f5b
fix comments
Shoreshen Apr 2, 2025
9fd971f
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 3, 2025
990da17
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 7, 2025
80695a0
remove isd related type check
Shoreshen Apr 7, 2025
37dc751
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 8, 2025
72560d7
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 8, 2025
8d87d7c
fix comments
Shoreshen Apr 8, 2025
92442bf
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 9, 2025
7db8bae
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 9, 2025
96a3cd9
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 11, 2025
f2db6a2
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 11, 2025
ef91659
fix comments
Shoreshen Apr 11, 2025
11a745c
fix comment
Shoreshen Apr 11, 2025
c84f1f3
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 14, 2025
468eec7
fix comment, consider one constant situation
Shoreshen Apr 14, 2025
b185ed9
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 15, 2025
7332e7d
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 16, 2025
3e06714
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 16, 2025
90bad11
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 16, 2025
6d34b08
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 16, 2025
8add585
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 17, 2025
a5837c4
Merge branch 'main' into re-apply-130577-narrow-math-for-and-operand
Shoreshen Apr 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1559,6 +1559,80 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
llvm_unreachable("not a division");
}

/*
This will cause non-byte load in consistency, for example:
```
%load = load i1, ptr addrspace(4) %arg, align 4
%zext = zext i1 %load to
i64 %add = add i64 %zext
```
Instead of creating `s_and_b32 s0, s0, 1`,
it will create `s_and_b32 s0, s0, 0xff`.
We accept this change since the non-byte load assumes the upper bits
within the byte are all 0.
Comment on lines +1563 to +1572
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comment is off topic for what this actually does (should also prefer c++ style comments)

*/
static bool tryNarrowMathIfNoOverflow(Instruction *I,
const SITargetLowering *TLI,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can use references

const TargetTransformInfo &TTI,
const DataLayout &DL) {
unsigned Opc = I->getOpcode();
Type *OldType = I->getType();

if (Opc != Instruction::Add && Opc != Instruction::Mul)
return false;

unsigned OrigBit = OldType->getScalarSizeInBits();

if (Opc != Instruction::Add && Opc != Instruction::Mul)
llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and "
"Instruction::Mul.");

unsigned MaxBitsNeeded = computeKnownBits(I, DL).countMaxActiveBits();

MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
Type *NewType = DL.getSmallestLegalIntType(I->getContext(), MaxBitsNeeded);
if (!NewType)
return false;
unsigned NewBit = NewType->getIntegerBitWidth();
if (NewBit >= OrigBit)
return false;
NewType = I->getType()->getWithNewBitWidth(NewBit);

// Old cost
InstructionCost OldCost =
TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput);
// New cost of new op
InstructionCost NewCost =
TTI.getArithmeticInstrCost(Opc, NewType, TTI::TCK_RecipThroughput);
// New cost of narrowing 2 operands (use trunc)
int NumOfNonConstOps = 2;
if (isa<Constant>(I->getOperand(0)) || isa<Constant>(I->getOperand(1))) {
// Cannot be both constant, should be propagated
NumOfNonConstOps = 1;
}
NewCost += NumOfNonConstOps * TTI.getCastInstrCost(Instruction::Trunc,
NewType, OldType,
TTI.getCastContextHint(I),
TTI::TCK_RecipThroughput);
// New cost of zext narrowed result to original type
NewCost +=
TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType,
TTI.getCastContextHint(I), TTI::TCK_RecipThroughput);
if (NewCost >= OldCost)
return false;

IRBuilder<> Builder(I);
Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType);
Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType);
Value *Arith =
Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1);

Value *Zext = Builder.CreateZExt(Arith, OldType);
I->replaceAllUsesWith(Zext);
I->eraseFromParent();
return true;
}

bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (foldBinOpIntoSelect(I))
return true;
Expand All @@ -1569,6 +1643,9 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {

if (UseMul24Intrin && replaceMulWithMul24(I))
return true;
if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
TM.getTargetTransformInfo(F), DL))
return true;

bool Changed = false;
Instruction::BinaryOps Opc = I.getOpcode();
Expand Down
5 changes: 4 additions & 1 deletion llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,10 @@ define i64 @umul24_i64_2(i64 %lhs, i64 %rhs) {
; DISABLED-LABEL: @umul24_i64_2(
; DISABLED-NEXT: [[LHS24:%.*]] = and i64 [[LHS:%.*]], 65535
; DISABLED-NEXT: [[RHS24:%.*]] = and i64 [[RHS:%.*]], 65535
; DISABLED-NEXT: [[MUL:%.*]] = mul i64 [[LHS24]], [[RHS24]]
; DISABLED-NEXT: [[TMP1:%.*]] = trunc i64 [[LHS24]] to i32
; DISABLED-NEXT: [[TMP2:%.*]] = trunc i64 [[RHS24]] to i32
; DISABLED-NEXT: [[TMP3:%.*]] = mul i32 [[TMP1]], [[TMP2]]
; DISABLED-NEXT: [[MUL:%.*]] = zext i32 [[TMP3]] to i64
; DISABLED-NEXT: ret i64 [[MUL]]
;
%lhs24 = and i64 %lhs, 65535
Expand Down
52 changes: 24 additions & 28 deletions llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1823,22 +1823,22 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB3_2
; GFX1264-NEXT: ; %bb.1:
; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX1264-NEXT: v_mov_b32_e32 v1, 0
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
Expand All @@ -1860,29 +1860,27 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: add_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1232-NEXT: s_mov_b32 s7, exec_lo
; GFX1232-NEXT: s_mov_b32 s5, 0
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB3_2
; GFX1232-NEXT: ; %bb.1:
; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mov_b32 s10, -1
; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB3_2:
; GFX1232-NEXT: s_wait_alu 0xfffe
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s3, v1
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
Expand Down Expand Up @@ -5372,22 +5370,22 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB9_2
; GFX1264-NEXT: ; %bb.1:
; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX1264-NEXT: v_mov_b32_e32 v1, 0
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
Expand All @@ -5412,29 +5410,27 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: sub_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1232-NEXT: s_mov_b32 s7, exec_lo
; GFX1232-NEXT: s_mov_b32 s5, 0
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB9_2
; GFX1232-NEXT: ; %bb.1:
; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mov_b32 s10, -1
; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB9_2:
; GFX1232-NEXT: s_wait_alu 0xfffe
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_mul_u32_u24_e32 v0, 5, v2
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
Original file line number Diff line number Diff line change
Expand Up @@ -170,13 +170,13 @@ define void @issue63986_reduced_expanded(i64 %idxprom) {
; CHECK-NEXT: s_cbranch_execnz .LBB1_8
; CHECK-NEXT: .LBB1_5: ; %loop-memcpy-residual.preheader
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: s_mov_b64 s[6:7], 0
; CHECK-NEXT: s_mov_b64 s[8:9], 0
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: v_mov_b32_e32 v1, s5
; CHECK-NEXT: .LBB1_6: ; %loop-memcpy-residual
; CHECK-NEXT: s_add_u32 s4, s6, 1
; CHECK-NEXT: s_addc_u32 s5, s7, 0
; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
; CHECK-NEXT: s_mov_b64 s[6:7], 1
; CHECK-NEXT: s_add_i32 s6, s8, 1
; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
; CHECK-NEXT: s_mov_b64 s[8:9], 1
; CHECK-NEXT: s_cbranch_vccnz .LBB1_6
; CHECK-NEXT: ; %bb.7: ; %Flow
; CHECK-NEXT: v_mov_b32_e32 v0, 0
Expand Down
Loading
Loading