Skip to content

AMDGPU: Implement isExtractVecEltCheap #122460

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1952,6 +1952,13 @@ bool SITargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
return Index == 0;
}

bool SITargetLowering::isExtractVecEltCheap(EVT VT, unsigned Index) const {
// TODO: This should be more aggressive, particular for 16-bit element
// vectors. However there are some mixed improvements and regressions.
EVT EltTy = VT.getVectorElementType();
return EltTy.getSizeInBits() % 32 == 0;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@broxigarchen @Sisyph for true16 we should aim to return EltTy.getSizeInBits() % 16 == 0 here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Even without true16 it should be better (maybe only even aligned cases?)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, without true16 EltTy.getSizeInBits() * Index % 32 == 0 would make sense to me.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes I would think EltTy.getSizeInBits() * Index % 16 == 0 for True16 would be the way to go.

}

bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
if (Subtarget->has16BitInsts() && VT == MVT::i16) {
switch (Op) {
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/SIISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {

bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const override;
bool isExtractVecEltCheap(EVT VT, unsigned Index) const override;

bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;

Expand Down
12 changes: 5 additions & 7 deletions llvm/test/CodeGen/AMDGPU/mad-mix.ll
Original file line number Diff line number Diff line change
Expand Up @@ -385,17 +385,15 @@ define <2 x float> @v_mad_mix_v2f32_shuffle(<2 x half> %src0, <2 x half> %src1,
; SDAG-CI: ; %bb.0:
; SDAG-CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v4, v5
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v5, v1
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SDAG-CI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v1, v4
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v4, v5
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v5, v0
; SDAG-CI-NEXT: v_mad_f32 v0, v4, v2, v1
; SDAG-CI-NEXT: v_mac_f32_e32 v1, v5, v3
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SDAG-CI-NEXT: v_cvt_f32_f16_e32 v4, v0
; SDAG-CI-NEXT: v_mad_f32 v0, v1, v2, v5
; SDAG-CI-NEXT: v_mad_f32 v1, v4, v3, v5
; SDAG-CI-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-CI-LABEL: v_mad_mix_v2f32_shuffle:
Expand Down
32 changes: 27 additions & 5 deletions llvm/test/CodeGen/AMDGPU/packed-fp32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -549,24 +549,46 @@ bb:
ret void
}

; GCN-LABEL: {{^}}fadd_fadd_fsub:
; GCN-LABEL: {{^}}fadd_fadd_fsub_0:
; GFX900: v_add_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, 0
; GFX900: v_add_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}}
; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], 0 op_sel_hi:[1,0]{{$}}

; PACKED-SDAG: v_add_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, 0
; PACKED-SDAG: v_add_f32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}

; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}}
; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}}
define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg) {
define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) {
bb:
%i12 = fadd <2 x float> zeroinitializer, %arg
%shift8 = shufflevector <2 x float> %i12, <2 x float> undef, <2 x i32> <i32 1, i32 undef>
%shift8 = shufflevector <2 x float> %i12, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
%i13 = fadd <2 x float> zeroinitializer, %shift8
%i14 = shufflevector <2 x float> %arg, <2 x float> %i13, <2 x i32> <i32 0, i32 2>
%i15 = fsub <2 x float> %i14, zeroinitializer
store <2 x float> %i15, ptr undef
ret void
}

; GCN-LABEL: {{^}}fadd_fadd_fsub:
; GFX900: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
; GFX900: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}

; PACKED-SDAG: v_add_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
; PACKED-SDAG: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}] op_sel_hi:[1,0]{{$}}

; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}}
; PACKED-GISEL: v_pk_add_f32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}]{{$}}
define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, ptr addrspace(1) %ptr) {
bb:
%i12 = fadd <2 x float> %arg, %arg1
%shift8 = shufflevector <2 x float> %i12, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
%i13 = fadd <2 x float> %arg1, %shift8
%i14 = shufflevector <2 x float> %arg, <2 x float> %i13, <2 x i32> <i32 0, i32 2>
%i15 = fsub <2 x float> %i14, %arg1
store <2 x float> %i15, ptr addrspace(1) %ptr
ret void
}

; GCN-LABEL: {{^}}fadd_shuffle_v4:
; GFX900-COUNT-4: v_add_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
; PACKED-SDAG-COUNT-2: v_pk_add_f32 v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] op_sel_hi:[1,0]{{$}}
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/AMDGPU/trunc-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ define <2 x i16> @vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression(i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_lshr_b32_e32 v0, 16, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: vector_trunc_high_bits_undef_lshr_rhs_alignbit_regression:
Expand Down
Loading