Skip to content

[AMDGPU] - Add constant folding for s_quadmask #72381

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Nov 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions llvm/lib/Analysis/ConstantFolding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1533,6 +1533,7 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
case Intrinsic::amdgcn_perm:
case Intrinsic::amdgcn_wave_reduce_umin:
case Intrinsic::amdgcn_wave_reduce_umax:
case Intrinsic::amdgcn_s_quadmask:
case Intrinsic::amdgcn_s_bitreplicate:
case Intrinsic::arm_mve_vctp8:
case Intrinsic::arm_mve_vctp16:
Expand Down Expand Up @@ -2424,6 +2425,18 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
return ConstantFP::get(Ty->getContext(), Val);
}

case Intrinsic::amdgcn_s_quadmask: {
uint64_t Val = Op->getZExtValue();
uint64_t QuadMask = 0;
for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
if (!(Val & 0xF))
continue;

QuadMask |= (1 << I);
}
return ConstantInt::get(Ty, QuadMask);
}

case Intrinsic::amdgcn_s_bitreplicate: {
uint64_t Val = Op->getZExtValue();
Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
Expand Down
100 changes: 94 additions & 6 deletions llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,60 @@
declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
declare i64 @llvm.amdgcn.s.quadmask.i64(i64)

define i32 @test_quadmask_constant_zero_i32() {
; GFX11-LABEL: test_quadmask_constant_zero_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 0)
ret i32 %qm
}

define i32 @test_quadmask_constant_neg_one_i32() {
; GFX11-LABEL: test_quadmask_constant_neg_one_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, 0xff
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 -1)
ret i32 %qm
}

define i32 @test_quadmask_constant_undef_i32() {
; GFX11-LABEL: test_quadmask_constant_undef_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 undef)
ret i32 %qm
}

define i32 @test_quadmask_constant_poison_i32() {
; GFX11-LABEL: test_quadmask_constant_poison_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 poison)
ret i32 %qm
}

define i32 @test_quadmask_constant_i32() {
; GFX11-LABEL: test_quadmask_constant_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, 0x85fe3a92
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, 0xcb
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 u0x85FE3A92)
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 u0x85003092)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added some zero quads here and further down to make the constant more interesting to check, rather than just 0xff

ret i32 %qm
}

Expand Down Expand Up @@ -50,13 +95,56 @@ define i64 @test_quadmask_constant_i64() {
; GFX11-LABEL: test_quadmask_constant_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s0, 0x85fe3a92
; GFX11-NEXT: s_mov_b32 s1, 0x67de48fc
; GFX11-NEXT: v_dual_mov_b32 v0, 0xe3e6 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 u0x67D000FC85F00A90)
ret i64 %qm
}

define i64 @test_quadmask_constant_zero_i64() {
; GFX11-LABEL: test_quadmask_constant_zero_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 0)
ret i64 %qm
}

define i64 @test_quadmask_constant_neg_one_i64() {
; GFX11-LABEL: test_quadmask_constant_neg_one_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 0xffff :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 -1)
ret i64 %qm
}

define i64 @test_quadmask_constant_undef_i64() {
; GFX11-LABEL: test_quadmask_constant_undef_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 undef)
ret i64 %qm
}

define i64 @test_quadmask_constant_poison_i64() {
; GFX11-LABEL: test_quadmask_constant_poison_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 u0x67DE48FC85FE3A92)
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 poison)
ret i64 %qm
}

Expand Down