Skip to content

Commit 80064b6

Browse files
authored
AMDGPU: Try constant fold after folding immediate (#141862)
This helps avoid some regressions in a future patch. The or 0 pattern appears in the division tests because the reduce 64-bit bit operation to a 32-bit one with half identity value is only implemented for constants. We could fix that by using computeKnownBits. Additionally the pattern disappears if I optimize the IR division expansion, so that IR should probably be emitted more optimally in the first place.
1 parent 4bf2de1 commit 80064b6

File tree

9 files changed

+42
-20
lines changed

9 files changed

+42
-20
lines changed

llvm/lib/Target/AMDGPU/SIFoldOperands.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1675,6 +1675,12 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
16751675
LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
16761676
<< static_cast<int>(Fold.UseOpNo) << " of "
16771677
<< *Fold.UseMI);
1678+
1679+
if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
1680+
LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
1681+
Changed = true;
1682+
}
1683+
16781684
} else if (Fold.Commuted) {
16791685
// Restoring instruction's original operand order if fold has failed.
16801686
TII->commuteInstruction(*Fold.UseMI, false);

llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,8 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
105105
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
106106
; CHECK-NEXT: ; %bb.1: ; %inc
107107
; CHECK-NEXT: v_not_b32_e32 v2, v4
108-
; CHECK-NEXT: v_not_b32_e32 v3, 0
109108
; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
110-
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
109+
; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
111110
; CHECK-NEXT: ; %bb.2: ; %UnifiedReturnBlock
112111
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
113112
; CHECK-NEXT: v_mov_b32_e32 v0, v2

llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -961,3 +961,25 @@ body: |
961961
S_ENDPGM 0, implicit %2, implicit %3
962962
963963
...
964+
965+
---
966+
name: constant_v_or_b32_uses_subreg_or_0_regression
967+
tracksRegLiveness: true
968+
body: |
969+
bb.0:
970+
liveins: $vgpr0, $vgpr1
971+
972+
; GCN-LABEL: name: constant_v_or_b32_uses_subreg_or_0_regression
973+
; GCN: liveins: $vgpr0, $vgpr1
974+
; GCN-NEXT: {{ $}}
975+
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
976+
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
977+
; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]]
978+
%0:vgpr_32 = COPY $vgpr0
979+
%1:vgpr_32 = COPY $vgpr1
980+
%2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
981+
%3:vreg_64 = REG_SEQUENCE %2:vgpr_32, %subreg.sub0, %0:vgpr_32, %subreg.sub1
982+
%4:vgpr_32 = V_OR_B32_e64 %3.sub0:vreg_64, %1, implicit $exec
983+
S_ENDPGM 0, implicit %4
984+
985+
...

llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,7 @@ body: |
4343
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
4444
; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
4545
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
46-
; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec
47-
; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
46+
; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
4847
%0:vgpr_32 = IMPLICIT_DEF
4948
%1:vgpr_32 = IMPLICIT_DEF
5049
%2:vgpr_32 = IMPLICIT_DEF

llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ body: |
88
; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg
99
; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
1010
; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1
11-
; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec
12-
; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]]
11+
; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
12+
; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]]
1313
%0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
1414
%1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
1515
%2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec

llvm/test/CodeGen/AMDGPU/sdiv64.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -404,12 +404,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
404404
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
405405
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0
406406
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc
407-
; GCN-IR-NEXT: v_not_b32_e32 v5, v10
407+
; GCN-IR-NEXT: v_not_b32_e32 v4, v10
408408
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8
409-
; GCN-IR-NEXT: v_not_b32_e32 v4, 0
410-
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v5, v11
409+
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11
411410
; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
412-
; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v4, vcc
411+
; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
413412
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
414413
; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
415414
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0

llvm/test/CodeGen/AMDGPU/srem64.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -380,12 +380,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
380380
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
381381
; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2
382382
; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
383-
; GCN-IR-NEXT: v_not_b32_e32 v7, v12
383+
; GCN-IR-NEXT: v_not_b32_e32 v6, v12
384384
; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
385-
; GCN-IR-NEXT: v_not_b32_e32 v6, 0
386-
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13
385+
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
387386
; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
388-
; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc
387+
; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
389388
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
390389
; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
391390
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0

llvm/test/CodeGen/AMDGPU/udiv64.ll

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -348,10 +348,9 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
348348
; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10
349349
; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
350350
; GCN-IR-NEXT: v_not_b32_e32 v0, v14
351-
; GCN-IR-NEXT: v_not_b32_e32 v1, 0
352351
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15
353352
; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
354-
; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
353+
; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
355354
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
356355
; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
357356
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0

llvm/test/CodeGen/AMDGPU/urem64.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -355,12 +355,11 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
355355
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
356356
; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
357357
; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
358-
; GCN-IR-NEXT: v_not_b32_e32 v7, v12
358+
; GCN-IR-NEXT: v_not_b32_e32 v6, v12
359359
; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
360-
; GCN-IR-NEXT: v_not_b32_e32 v6, 0
361-
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v7, v13
360+
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
362361
; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
363-
; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v6, vcc
362+
; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
364363
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
365364
; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
366365
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0

0 commit comments

Comments
 (0)