Skip to content
This repository was archived by the owner on Mar 28, 2020. It is now read-only.

Commit a5b8d5a

Browse files
committed
[X86] Add isel patterns for atomic_load+sub+atomic_sub.
Despite the comment removed in this patch, this is beneficial when the RHS of the sub is a register. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@338930 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 592e45f commit a5b8d5a

File tree

2 files changed

+13
-25
lines changed

2 files changed

+13
-25
lines changed

lib/Target/X86/X86InstrCompiler.td

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -918,8 +918,7 @@ defm : RELEASE_BINOP_MI<"ADD", add>;
918918
defm : RELEASE_BINOP_MI<"AND", and>;
919919
defm : RELEASE_BINOP_MI<"OR", or>;
920920
defm : RELEASE_BINOP_MI<"XOR", xor>;
921-
// Note: we don't deal with sub, because substractions of constants are
922-
// optimized into additions before this code can run.
921+
defm : RELEASE_BINOP_MI<"SUB", sub>;
923922

924923
// Same as above, but for floating-point.
925924
// FIXME: imm version.

test/CodeGen/X86/atomic_mi.ll

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -462,17 +462,14 @@ define void @add_32r_seq_cst(i32* %p, i32 %v) {
462462
define void @sub_8r(i8* %p, i8 %v) {
463463
; X64-LABEL: sub_8r:
464464
; X64: # %bb.0:
465-
; X64-NEXT: movb (%rdi), %al
466-
; X64-NEXT: subb %sil, %al
467-
; X64-NEXT: movb %al, (%rdi)
465+
; X64-NEXT: subb %sil, (%rdi)
468466
; X64-NEXT: retq
469467
;
470468
; X32-LABEL: sub_8r:
471469
; X32: # %bb.0:
472-
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
473-
; X32-NEXT: movb (%eax), %cl
474-
; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
475-
; X32-NEXT: movb %cl, (%eax)
470+
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
471+
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
472+
; X32-NEXT: subb %al, (%ecx)
476473
; X32-NEXT: retl
477474
%1 = load atomic i8, i8* %p seq_cst, align 1
478475
%2 = sub i8 %1, %v
@@ -485,17 +482,14 @@ define void @sub_16r(i16* %p, i16 %v) {
485482
; treat 16 bit arithmetic as expensive on X86/X86_64.
486483
; X64-LABEL: sub_16r:
487484
; X64: # %bb.0:
488-
; X64-NEXT: movzwl (%rdi), %eax
489-
; X64-NEXT: subw %si, %ax
490-
; X64-NEXT: movw %ax, (%rdi)
485+
; X64-NEXT: subw %si, (%rdi)
491486
; X64-NEXT: retq
492487
;
493488
; X32-LABEL: sub_16r:
494489
; X32: # %bb.0:
495-
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
496-
; X32-NEXT: movzwl (%eax), %ecx
497-
; X32-NEXT: subw {{[0-9]+}}(%esp), %cx
498-
; X32-NEXT: movw %cx, (%eax)
490+
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
491+
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
492+
; X32-NEXT: subw %ax, (%ecx)
499493
; X32-NEXT: retl
500494
%1 = load atomic i16, i16* %p acquire, align 2
501495
%2 = sub i16 %1, %v
@@ -506,17 +500,14 @@ define void @sub_16r(i16* %p, i16 %v) {
506500
define void @sub_32r(i32* %p, i32 %v) {
507501
; X64-LABEL: sub_32r:
508502
; X64: # %bb.0:
509-
; X64-NEXT: movl (%rdi), %eax
510-
; X64-NEXT: subl %esi, %eax
511-
; X64-NEXT: movl %eax, (%rdi)
503+
; X64-NEXT: subl %esi, (%rdi)
512504
; X64-NEXT: retq
513505
;
514506
; X32-LABEL: sub_32r:
515507
; X32: # %bb.0:
516508
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
517-
; X32-NEXT: movl (%eax), %ecx
518-
; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
519-
; X32-NEXT: movl %ecx, (%eax)
509+
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
510+
; X32-NEXT: subl %eax, (%ecx)
520511
; X32-NEXT: retl
521512
%1 = load atomic i32, i32* %p acquire, align 4
522513
%2 = sub i32 %1, %v
@@ -575,9 +566,7 @@ define i32 @sub_32r_ret_load(i32* %p, i32 %v) {
575566
define void @sub_64r(i64* %p, i64 %v) {
576567
; X64-LABEL: sub_64r:
577568
; X64: # %bb.0:
578-
; X64-NEXT: movq (%rdi), %rax
579-
; X64-NEXT: subq %rsi, %rax
580-
; X64-NEXT: movq %rax, (%rdi)
569+
; X64-NEXT: subq %rsi, (%rdi)
581570
; X64-NEXT: retq
582571
;
583572
; X32-LABEL: sub_64r:

0 commit comments

Comments
 (0)