1
1
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2
- ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix =X32-SSE2
3
- ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix= X64-SSSE3
4
- ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix= X64-AVX
2
+ ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes =X32-SSE2
3
+ ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64, X64-SSSE3
4
+ ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64, X64-AVX
5
5
6
6
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7
7
@@ -12,15 +12,10 @@ define i32 @t(<2 x i64>* %val) nounwind {
12
12
; X32-SSE2-NEXT: movl 8(%eax), %eax
13
13
; X32-SSE2-NEXT: retl
14
14
;
15
- ; X64-SSSE3-LABEL: t:
16
- ; X64-SSSE3: # %bb.0:
17
- ; X64-SSSE3-NEXT: movl 8(%rdi), %eax
18
- ; X64-SSSE3-NEXT: retq
19
- ;
20
- ; X64-AVX-LABEL: t:
21
- ; X64-AVX: # %bb.0:
22
- ; X64-AVX-NEXT: movl 8(%rdi), %eax
23
- ; X64-AVX-NEXT: retq
15
+ ; X64-LABEL: t:
16
+ ; X64: # %bb.0:
17
+ ; X64-NEXT: movl 8(%rdi), %eax
18
+ ; X64-NEXT: retq
24
19
%tmp2 = load <2 x i64 >, <2 x i64 >* %val , align 16 ; <<2 x i64>> [#uses=1]
25
20
%tmp3 = bitcast <2 x i64 > %tmp2 to <4 x i32 > ; <<4 x i32>> [#uses=1]
26
21
%tmp4 = extractelement <4 x i32 > %tmp3 , i32 2 ; <i32> [#uses=1]
@@ -34,13 +29,9 @@ define i32 @t2(<8 x i32>* %xp) {
34
29
; X32-SSE2: # %bb.0:
35
30
; X32-SSE2-NEXT: retl
36
31
;
37
- ; X64-SSSE3-LABEL: t2:
38
- ; X64-SSSE3: # %bb.0:
39
- ; X64-SSSE3-NEXT: retq
40
- ;
41
- ; X64-AVX-LABEL: t2:
42
- ; X64-AVX: # %bb.0:
43
- ; X64-AVX-NEXT: retq
32
+ ; X64-LABEL: t2:
33
+ ; X64: # %bb.0:
34
+ ; X64-NEXT: retq
44
35
%x = load <8 x i32 >, <8 x i32 >* %xp
45
36
%Shuff68 = shufflevector <8 x i32 > %x , <8 x i32 > undef , <8 x i32 > <i32 undef , i32 7 , i32 9 , i32 undef , i32 13 , i32 15 , i32 1 , i32 3 >
46
37
%y = extractelement <8 x i32 > %Shuff68 , i32 0
@@ -91,16 +82,10 @@ define i64 @t4(<2 x double>* %a) {
91
82
; X32-SSE2-NEXT: movd %xmm0, %edx
92
83
; X32-SSE2-NEXT: retl
93
84
;
94
- ; X64-SSSE3-LABEL: t4:
95
- ; X64-SSSE3: # %bb.0:
96
- ; X64-SSSE3-NEXT: movq (%rdi), %rax
97
- ; X64-SSSE3-NEXT: retq
98
- ;
99
- ; X64-AVX-LABEL: t4:
100
- ; X64-AVX: # %bb.0:
101
- ; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
102
- ; X64-AVX-NEXT: vpextrq $1, %xmm0, %rax
103
- ; X64-AVX-NEXT: retq
85
+ ; X64-LABEL: t4:
86
+ ; X64: # %bb.0:
87
+ ; X64-NEXT: movq (%rdi), %rax
88
+ ; X64-NEXT: retq
104
89
%b = load <2 x double >, <2 x double >* %a , align 16
105
90
%c = shufflevector <2 x double > %b , <2 x double > %b , <2 x i32 > <i32 1 , i32 0 >
106
91
%d = bitcast <2 x double > %c to <2 x i64 >
0 commit comments