Skip to content

AMDGPU: Try constant fold after folding immediate #141862

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1675,6 +1675,12 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
<< static_cast<int>(Fold.UseOpNo) << " of "
<< *Fold.UseMI);

if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
Changed = true;
}

} else if (Fold.Commuted) {
// Restoring instruction's original operand order if fold has failed.
TII->commuteInstruction(*Fold.UseMI, false);
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,8 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
; CHECK-NEXT: ; %bb.1: ; %inc
; CHECK-NEXT: v_not_b32_e32 v2, v4
; CHECK-NEXT: v_not_b32_e32 v3, 0
; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
; CHECK-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_mov_b32_e32 v0, v2
Expand Down
22 changes: 22 additions & 0 deletions llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
Original file line number Diff line number Diff line change
Expand Up @@ -961,3 +961,25 @@ body: |
S_ENDPGM 0, implicit %2, implicit %3

...

---
name: constant_v_or_b32_uses_subreg_or_0_regression
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0, $vgpr1

; GCN-LABEL: name: constant_v_or_b32_uses_subreg_or_0_regression
; GCN: liveins: $vgpr0, $vgpr1
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]]
%0:vgpr_32 = COPY $vgpr0
%1:vgpr_32 = COPY $vgpr1
%2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%3:vreg_64 = REG_SEQUENCE %2:vgpr_32, %subreg.sub0, %0:vgpr_32, %subreg.sub1
%4:vgpr_32 = V_OR_B32_e64 %3.sub0:vreg_64, %1, implicit $exec
S_ENDPGM 0, implicit %4

...
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@ body: |
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec
; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
%0:vgpr_32 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ body: |
; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg
; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1
; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec
; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]]
; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]]
%0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
%1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
%2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/AMDGPU/sdiv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -404,12 +404,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc
; GCN-IR-NEXT: v_not_b32_e32 v5, v10
; GCN-IR-NEXT: v_not_b32_e32 v4, v10
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8
; GCN-IR-NEXT: v_not_b32_e32 v4, 0
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v5, v11
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11
; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v4, vcc
; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/AMDGPU/srem64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -380,12 +380,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
; GCN-IR-NEXT: v_not_b32_e32 v7, v12
; GCN-IR-NEXT: v_not_b32_e32 v6, v12
; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
; GCN-IR-NEXT: v_not_b32_e32 v6, 0
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc
; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/AMDGPU/udiv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -348,10 +348,9 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10
; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
; GCN-IR-NEXT: v_not_b32_e32 v0, v14
; GCN-IR-NEXT: v_not_b32_e32 v1, 0
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15
; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/AMDGPU/urem64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -355,12 +355,11 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
; GCN-IR-NEXT: v_not_b32_e32 v7, v12
; GCN-IR-NEXT: v_not_b32_e32 v6, v12
; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
; GCN-IR-NEXT: v_not_b32_e32 v6, 0
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc
; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
Expand Down
Loading