Skip to content

Commit 194b080

Browse files
committed
[DAG] LoadedSlice::canMergeExpensiveCrossRegisterBankCopy - replace getABITypeAlign with allowsMemoryAccess (PR45116)
One of the cases identified in PR45116 - we don't need to limit load combines to ABI alignment, we can use allowsMemoryAccess - which tests using getABITypeAlign, but also checks if a target permits (fast) misaligned memory loads by checking allowsMisalignedMemoryAccesses as a fallback.
1 parent 8d50a84 commit 194b080

File tree

2 files changed

+11
-18
lines changed

2 files changed

+11
-18
lines changed

llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16257,11 +16257,12 @@ struct LoadedSlice {
1625716257
return false;
1625816258

1625916259
// Check if it will be merged with the load.
16260-
// 1. Check the alignment constraint.
16261-
Align RequiredAlignment = DAG->getDataLayout().getABITypeAlign(
16262-
ResVT.getTypeForEVT(*DAG->getContext()));
16263-
16264-
if (RequiredAlignment > getAlign())
16260+
// 1. Check the alignment / fast memory access constraint.
16261+
bool IsFast = false;
16262+
if (!TLI.allowsMemoryAccess(*DAG->getContext(), DAG->getDataLayout(), ResVT,
16263+
Origin->getAddressSpace(), getAlign(),
16264+
Origin->getMemOperand()->getFlags(), &IsFast) ||
16265+
!IsFast)
1626516266
return false;
1626616267

1626716268
// 2. Check that the load is a legal operation for that type.

llvm/test/CodeGen/X86/load-partial.ll

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -280,24 +280,16 @@ define <4 x float> @load_float4_float3_trunc_0123_unaligned(<4 x float>* nocaptu
280280
;
281281
; SSE41-LABEL: load_float4_float3_trunc_0123_unaligned:
282282
; SSE41: # %bb.0:
283-
; SSE41-NEXT: movq 8(%rdi), %rax
284-
; SSE41-NEXT: movd %eax, %xmm1
285-
; SSE41-NEXT: shrq $32, %rax
286-
; SSE41-NEXT: movd %eax, %xmm2
287283
; SSE41-NEXT: movups (%rdi), %xmm0
288-
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
289-
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
284+
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
285+
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
290286
; SSE41-NEXT: retq
291287
;
292288
; AVX-LABEL: load_float4_float3_trunc_0123_unaligned:
293289
; AVX: # %bb.0:
294-
; AVX-NEXT: movq 8(%rdi), %rax
295-
; AVX-NEXT: vmovd %eax, %xmm0
296-
; AVX-NEXT: shrq $32, %rax
297-
; AVX-NEXT: vmovd %eax, %xmm1
298-
; AVX-NEXT: vmovups (%rdi), %xmm2
299-
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1],xmm0[0],xmm2[3]
300-
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
290+
; AVX-NEXT: vmovups (%rdi), %xmm0
291+
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
292+
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
301293
; AVX-NEXT: retq
302294
%2 = bitcast <4 x float>* %0 to i64*
303295
%3 = load i64, i64* %2, align 1

0 commit comments

Comments
 (0)