|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2 | 2 | ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
|
3 |
| -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64 |
| 3 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=small | FileCheck %s --check-prefix=X64 |
| 4 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=medium | FileCheck %s --check-prefix=X64 |
| 5 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 -code-model=large | FileCheck %s --check-prefix=X64-LARGE |
4 | 6 |
|
5 | 7 | define double @mmx_zero(double, double, double, double) nounwind {
|
6 | 8 | ; X86-LABEL: mmx_zero:
|
@@ -78,6 +80,41 @@ define double @mmx_zero(double, double, double, double) nounwind {
|
78 | 80 | ; X64-NEXT: paddw %mm2, %mm0
|
79 | 81 | ; X64-NEXT: movq2dq %mm0, %xmm0
|
80 | 82 | ; X64-NEXT: retq
|
| 83 | +; |
| 84 | +; X64-LARGE-LABEL: mmx_zero: |
| 85 | +; X64-LARGE: # %bb.0: |
| 86 | +; X64-LARGE-NEXT: movdq2q %xmm0, %mm0 |
| 87 | +; X64-LARGE-NEXT: movdq2q %xmm1, %mm5 |
| 88 | +; X64-LARGE-NEXT: movq %mm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| 89 | +; X64-LARGE-NEXT: movq %mm0, %mm3 |
| 90 | +; X64-LARGE-NEXT: paddd %mm5, %mm3 |
| 91 | +; X64-LARGE-NEXT: pxor %mm1, %mm1 |
| 92 | +; X64-LARGE-NEXT: movq %mm3, %mm6 |
| 93 | +; X64-LARGE-NEXT: pmuludq %mm1, %mm6 |
| 94 | +; X64-LARGE-NEXT: movdq2q %xmm2, %mm4 |
| 95 | +; X64-LARGE-NEXT: movq %mm6, %mm2 |
| 96 | +; X64-LARGE-NEXT: paddd %mm4, %mm2 |
| 97 | +; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| 98 | +; X64-LARGE-NEXT: movq %mm5, %mm1 |
| 99 | +; X64-LARGE-NEXT: paddw %mm0, %mm1 |
| 100 | +; X64-LARGE-NEXT: movdq2q %xmm3, %mm5 |
| 101 | +; X64-LARGE-NEXT: movq %mm1, %mm7 |
| 102 | +; X64-LARGE-NEXT: pmuludq %mm5, %mm7 |
| 103 | +; X64-LARGE-NEXT: paddw %mm4, %mm7 |
| 104 | +; X64-LARGE-NEXT: paddw %mm7, %mm5 |
| 105 | +; X64-LARGE-NEXT: paddw %mm5, %mm2 |
| 106 | +; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| 107 | +; X64-LARGE-NEXT: paddw %mm6, %mm0 |
| 108 | +; X64-LARGE-NEXT: pmuludq %mm3, %mm0 |
| 109 | +; X64-LARGE-NEXT: pxor %mm3, %mm3 |
| 110 | +; X64-LARGE-NEXT: paddw %mm3, %mm0 |
| 111 | +; X64-LARGE-NEXT: paddw %mm1, %mm0 |
| 112 | +; X64-LARGE-NEXT: pmuludq %mm7, %mm0 |
| 113 | +; X64-LARGE-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload |
| 114 | +; X64-LARGE-NEXT: paddw %mm5, %mm0 |
| 115 | +; X64-LARGE-NEXT: paddw %mm2, %mm0 |
| 116 | +; X64-LARGE-NEXT: movq2dq %mm0, %xmm0 |
| 117 | +; X64-LARGE-NEXT: retq |
81 | 118 | %5 = bitcast double %0 to x86_mmx
|
82 | 119 | %6 = bitcast double %1 to x86_mmx
|
83 | 120 | %7 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %5, x86_mmx %6)
|
|
0 commit comments