Skip to content

Commit cf59d67

Browse files
committed
[X86] Fix the bug of pr62625
We should not call tryOptimizeLEAtoMOV() in eliminateFrameIndex() when the base register is a virtual register, because tryOptimizeLEAtoMOV would assume the base register be physical register. Although we can also optimize LEA to MOV with virtual register, I'd like to leave the optimization in another patch. Differential Revision: https://reviews.llvm.org/D150521
1 parent b2809b4 commit cf59d67

File tree

2 files changed

+97
-1
lines changed

2 files changed

+97
-1
lines changed

llvm/lib/Target/X86/X86RegisterInfo.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -812,7 +812,7 @@ void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
812812
int Offset = FIOffset + Imm;
813813
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
814814
"Requesting 64-bit offset in 32-bit immediate!");
815-
if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
815+
if (Offset != 0)
816816
MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
817817
} else {
818818
// Offset is symbolic. This is extremely rare.

llvm/test/CodeGen/X86/x86-64-baseptr.ll

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,102 @@ entry:
306306
ret void
307307
}
308308

309+
; pr62625
310+
define void @vmw_host_printf(ptr %fmt, ...) nounwind {
311+
; CHECK-LABEL: vmw_host_printf:
312+
; CHECK: # %bb.0: # %entry
313+
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r10
314+
; CHECK-NEXT: andq $-16, %rsp
315+
; CHECK-NEXT: pushq -8(%r10)
316+
; CHECK-NEXT: pushq %rbp
317+
; CHECK-NEXT: movq %rsp, %rbp
318+
; CHECK-NEXT: pushq %rbx
319+
; CHECK-NEXT: subq $200, %rsp
320+
; CHECK-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
321+
; CHECK-NEXT: movq %rsi, -184(%rbp)
322+
; CHECK-NEXT: movq %rdx, -176(%rbp)
323+
; CHECK-NEXT: movq %rcx, -168(%rbp)
324+
; CHECK-NEXT: movq %r8, -160(%rbp)
325+
; CHECK-NEXT: movq %r9, -152(%rbp)
326+
; CHECK-NEXT: testb %al, %al
327+
; CHECK-NEXT: je .LBB3_2
328+
; CHECK-NEXT: # %bb.1: # %entry
329+
; CHECK-NEXT: movaps %xmm0, -144(%rbp)
330+
; CHECK-NEXT: movaps %xmm1, -128(%rbp)
331+
; CHECK-NEXT: movaps %xmm2, -112(%rbp)
332+
; CHECK-NEXT: movaps %xmm3, -96(%rbp)
333+
; CHECK-NEXT: movaps %xmm4, -80(%rbp)
334+
; CHECK-NEXT: movaps %xmm5, -64(%rbp)
335+
; CHECK-NEXT: movaps %xmm6, -48(%rbp)
336+
; CHECK-NEXT: movaps %xmm7, -32(%rbp)
337+
; CHECK-NEXT: .LBB3_2: # %entry
338+
; CHECK-NEXT: leaq -192(%rbp), %rax
339+
; CHECK-NEXT: movq %rax, (%rax)
340+
; CHECK-NEXT: leaq (%r10), %rax
341+
; CHECK-NEXT: movq %rax, (%rax)
342+
; CHECK-NEXT: movl $48, (%rax)
343+
; CHECK-NEXT: movl $8, (%rax)
344+
; CHECK-NEXT: xorl %eax, %eax
345+
; CHECK-NEXT: xorl %ebx, %ebx
346+
; CHECK-NEXT: xorl %ecx, %ecx
347+
; CHECK-NEXT: #APP
348+
; CHECK-NEXT: #NO_APP
349+
; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
350+
; CHECK-NEXT: leaq -8(%rbp), %rsp
351+
; CHECK-NEXT: popq %rbx
352+
; CHECK-NEXT: popq %rbp
353+
; CHECK-NEXT: leaq -8(%r10), %rsp
354+
; CHECK-NEXT: retq
355+
;
356+
; X32ABI-LABEL: vmw_host_printf:
357+
; X32ABI: # %bb.0: # %entry
358+
; X32ABI-NEXT: pushq %rbp
359+
; X32ABI-NEXT: movl %esp, %ebp
360+
; X32ABI-NEXT: pushq %rbx
361+
; X32ABI-NEXT: andl $-16, %esp
362+
; X32ABI-NEXT: subl $208, %esp
363+
; X32ABI-NEXT: movl %esp, %ebx
364+
; X32ABI-NEXT: movq %rsi, 24(%ebx)
365+
; X32ABI-NEXT: movq %rdx, 32(%ebx)
366+
; X32ABI-NEXT: movq %rcx, 40(%ebx)
367+
; X32ABI-NEXT: movq %r8, 48(%ebx)
368+
; X32ABI-NEXT: movq %r9, 56(%ebx)
369+
; X32ABI-NEXT: testb %al, %al
370+
; X32ABI-NEXT: je .LBB3_2
371+
; X32ABI-NEXT: # %bb.1: # %entry
372+
; X32ABI-NEXT: movaps %xmm0, 64(%ebx)
373+
; X32ABI-NEXT: movaps %xmm1, 80(%ebx)
374+
; X32ABI-NEXT: movaps %xmm2, 96(%ebx)
375+
; X32ABI-NEXT: movaps %xmm3, 112(%ebx)
376+
; X32ABI-NEXT: movaps %xmm4, 128(%ebx)
377+
; X32ABI-NEXT: movaps %xmm5, 144(%ebx)
378+
; X32ABI-NEXT: movaps %xmm6, 160(%ebx)
379+
; X32ABI-NEXT: movaps %xmm7, 176(%ebx)
380+
; X32ABI-NEXT: .LBB3_2: # %entry
381+
; X32ABI-NEXT: leal 16(%rbx), %eax
382+
; X32ABI-NEXT: movl %eax, (%eax)
383+
; X32ABI-NEXT: leal 16(%rbp), %eax
384+
; X32ABI-NEXT: movl %eax, (%eax)
385+
; X32ABI-NEXT: movl $48, (%eax)
386+
; X32ABI-NEXT: movl $8, (%eax)
387+
; X32ABI-NEXT: xorl %eax, %eax
388+
; X32ABI-NEXT: xorl %ebx, %ebx
389+
; X32ABI-NEXT: xorl %ecx, %ecx
390+
; X32ABI-NEXT: #APP
391+
; X32ABI-NEXT: #NO_APP
392+
; X32ABI-NEXT: leal -8(%ebp), %esp
393+
; X32ABI-NEXT: popq %rbx
394+
; X32ABI-NEXT: popq %rbp
395+
; X32ABI-NEXT: retq
396+
entry:
397+
%0 = alloca i8, i64 poison, align 8
398+
call void @llvm.va_start(ptr nonnull poison)
399+
%1 = call { i64, i64, i64, i64, i64, i64 } asm sideeffect "", "={ax},={bx},={cx},={dx},={si},={di},{ax},{bx},{cx},{dx},{si},{di},~{memory},~{dirflag},~{fpsr},~{flags}"(i32 0, i32 0, i32 0, i16 undef, i64 undef, i64 undef)
400+
ret void
401+
}
402+
403+
declare void @llvm.va_start(ptr)
404+
309405
attributes #0 = {"frame-pointer"="all"}
310406
!llvm.module.flags = !{!0}
311407
!0 = !{i32 2, !"override-stack-alignment", i32 32}

0 commit comments

Comments
 (0)