Skip to content
This repository was archived by the owner on Mar 28, 2020. It is now read-only.

Commit 6c8569f

Browse files
committed
AMDGPU: Re-visit nodes in performAndCombine
This fixes test regressions when i64 loads/stores are made promote. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@267240 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 9ef1010 commit 6c8569f

File tree

3 files changed

+17
-9
lines changed

3 files changed

+17
-9
lines changed

lib/Target/AMDGPU/AMDGPUISelLowering.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2229,6 +2229,11 @@ SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N,
22292229
SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS);
22302230
SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS);
22312231

2232+
// Re-visit the ands. It's possible we eliminated one of them and it could
2233+
// simplify the vector.
2234+
DCI.AddToWorklist(Lo.getNode());
2235+
DCI.AddToWorklist(Hi.getNode());
2236+
22322237
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd);
22332238
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
22342239
}

test/CodeGen/AMDGPU/and.ll

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -213,12 +213,14 @@ define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
213213

214214
; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64:
215215
; SI: s_load_dwordx2
216-
; SI: s_load_dwordx2
217-
; SI: s_load_dwordx2
216+
; SI: s_load_dword [[A:s[0-9]+]]
217+
; SI: s_load_dword [[B:s[0-9]+]]
218218
; SI: s_load_dwordx2
219219
; SI-NOT: and
220-
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62
221-
; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62
220+
; SI: s_lshl_b32 [[A]], [[A]], 1
221+
; SI: s_lshl_b32 [[B]], [[B]], 1
222+
; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62
223+
; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62
222224
; SI-NOT: and
223225
; SI: buffer_store_dwordx2
224226
define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
@@ -336,9 +338,10 @@ define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %
336338
}
337339

338340
; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink:
339-
; SI: s_lshl_b64 s{{\[}}[[VALLO:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1
341+
; SI: s_load_dword [[A:s[0-9]+]]
342+
; SI: s_lshl_b32 [[A]], [[A]], 1{{$}}
340343
; SI-NOT: and
341-
; SI: s_and_b32 s{{[0-9]+}}, s[[VALLO]], 64
344+
; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64
342345
; SI-NOT: and
343346
; SI: s_add_u32
344347
; SI-NEXT: s_addc_u32

test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -299,9 +299,9 @@ define void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addr
299299
}
300300

301301
; GCN-LABEL: {{^}}and_not_mask_i64:
302-
; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
303-
; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 20
304-
; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, v[[SHRLO]]
302+
; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
303+
; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 20, v[[VALLO]]
304+
; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, [[SHR]]
305305
; GCN-DAG: v_mov_b32_e32 v[[SHRHI]], 0{{$}}
306306
; GCN-NOT: v[[SHRLO]]
307307
; GCN-NOT: v[[SHRHI]]

0 commit comments

Comments
 (0)