1
1
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2
- ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+sse4.1 | FileCheck %s --check-prefix =SSE41
3
- ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx | FileCheck %s --check-prefix =AVX --check-prefix= AVX1
4
- ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx2 | FileCheck %s --check-prefix =AVX --check-prefix= AVX2
5
- ; RUN: llc < %s -mtriple=i686-unknwon -mattr=+avx2 | FileCheck %s --check-prefix=X32 -AVX2
2
+ ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+sse4.1 | FileCheck %s --check-prefixes =SSE41
3
+ ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx | FileCheck %s --check-prefixes =AVX, AVX1
4
+ ; RUN: llc < %s -mtriple=x86_64-unknwon -mattr=+avx2 | FileCheck %s --check-prefixes =AVX, AVX2
5
+ ; RUN: llc < %s -mtriple=i686-unknwon -mattr=+avx2 | FileCheck %s --check-prefixes=X86 -AVX2
6
6
7
7
; PR14887
8
8
; These tests inject a store into the chain to test the inreg versions of pmovsx
@@ -24,15 +24,15 @@ define void @test1(ptr %in, ptr %out) nounwind {
24
24
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
25
25
; AVX-NEXT: retq
26
26
;
27
- ; X32 -AVX2-LABEL: test1:
28
- ; X32 -AVX2: # %bb.0:
29
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
30
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
31
- ; X32 -AVX2-NEXT: vpmovsxbq (%ecx), %xmm0
32
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
33
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
34
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
35
- ; X32 -AVX2-NEXT: retl
27
+ ; X86 -AVX2-LABEL: test1:
28
+ ; X86 -AVX2: # %bb.0:
29
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
30
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
31
+ ; X86 -AVX2-NEXT: vpmovsxbq (%ecx), %xmm0
32
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
33
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
34
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
35
+ ; X86 -AVX2-NEXT: retl
36
36
%wide.load35 = load <2 x i8 >, ptr %in , align 1
37
37
%sext = sext <2 x i8 > %wide.load35 to <2 x i64 >
38
38
store <2 x i64 > zeroinitializer , ptr undef , align 8
@@ -71,16 +71,16 @@ define void @test2(ptr %in, ptr %out) nounwind {
71
71
; AVX2-NEXT: vzeroupper
72
72
; AVX2-NEXT: retq
73
73
;
74
- ; X32 -AVX2-LABEL: test2:
75
- ; X32 -AVX2: # %bb.0:
76
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
77
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
78
- ; X32 -AVX2-NEXT: vpmovsxbq (%ecx), %ymm0
79
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
80
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
81
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
82
- ; X32 -AVX2-NEXT: vzeroupper
83
- ; X32 -AVX2-NEXT: retl
74
+ ; X86 -AVX2-LABEL: test2:
75
+ ; X86 -AVX2: # %bb.0:
76
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
77
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
78
+ ; X86 -AVX2-NEXT: vpmovsxbq (%ecx), %ymm0
79
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
80
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
81
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
82
+ ; X86 -AVX2-NEXT: vzeroupper
83
+ ; X86 -AVX2-NEXT: retl
84
84
%wide.load35 = load <4 x i8 >, ptr %in , align 1
85
85
%sext = sext <4 x i8 > %wide.load35 to <4 x i64 >
86
86
store <4 x i64 > zeroinitializer , ptr undef , align 8
@@ -105,15 +105,15 @@ define void @test3(ptr %in, ptr %out) nounwind {
105
105
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
106
106
; AVX-NEXT: retq
107
107
;
108
- ; X32 -AVX2-LABEL: test3:
109
- ; X32 -AVX2: # %bb.0:
110
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
111
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
112
- ; X32 -AVX2-NEXT: vpmovsxbd (%ecx), %xmm0
113
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
114
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
115
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
116
- ; X32 -AVX2-NEXT: retl
108
+ ; X86 -AVX2-LABEL: test3:
109
+ ; X86 -AVX2: # %bb.0:
110
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
111
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
112
+ ; X86 -AVX2-NEXT: vpmovsxbd (%ecx), %xmm0
113
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
114
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
115
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
116
+ ; X86 -AVX2-NEXT: retl
117
117
%wide.load35 = load <4 x i8 >, ptr %in , align 1
118
118
%sext = sext <4 x i8 > %wide.load35 to <4 x i32 >
119
119
store <4 x i32 > zeroinitializer , ptr undef , align 8
@@ -152,16 +152,16 @@ define void @test4(ptr %in, ptr %out) nounwind {
152
152
; AVX2-NEXT: vzeroupper
153
153
; AVX2-NEXT: retq
154
154
;
155
- ; X32 -AVX2-LABEL: test4:
156
- ; X32 -AVX2: # %bb.0:
157
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
158
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
159
- ; X32 -AVX2-NEXT: vpmovsxbd (%ecx), %ymm0
160
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
161
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
162
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
163
- ; X32 -AVX2-NEXT: vzeroupper
164
- ; X32 -AVX2-NEXT: retl
155
+ ; X86 -AVX2-LABEL: test4:
156
+ ; X86 -AVX2: # %bb.0:
157
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
158
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
159
+ ; X86 -AVX2-NEXT: vpmovsxbd (%ecx), %ymm0
160
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
161
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
162
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
163
+ ; X86 -AVX2-NEXT: vzeroupper
164
+ ; X86 -AVX2-NEXT: retl
165
165
%wide.load35 = load <8 x i8 >, ptr %in , align 1
166
166
%sext = sext <8 x i8 > %wide.load35 to <8 x i32 >
167
167
store <8 x i32 > zeroinitializer , ptr undef , align 8
@@ -186,15 +186,15 @@ define void @test5(ptr %in, ptr %out) nounwind {
186
186
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
187
187
; AVX-NEXT: retq
188
188
;
189
- ; X32 -AVX2-LABEL: test5:
190
- ; X32 -AVX2: # %bb.0:
191
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
192
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
193
- ; X32 -AVX2-NEXT: vpmovsxbw (%ecx), %xmm0
194
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
195
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
196
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
197
- ; X32 -AVX2-NEXT: retl
189
+ ; X86 -AVX2-LABEL: test5:
190
+ ; X86 -AVX2: # %bb.0:
191
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
192
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
193
+ ; X86 -AVX2-NEXT: vpmovsxbw (%ecx), %xmm0
194
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
195
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
196
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
197
+ ; X86 -AVX2-NEXT: retl
198
198
%wide.load35 = load <8 x i8 >, ptr %in , align 1
199
199
%sext = sext <8 x i8 > %wide.load35 to <8 x i16 >
200
200
store <8 x i16 > zeroinitializer , ptr undef , align 8
@@ -233,16 +233,16 @@ define void @test6(ptr %in, ptr %out) nounwind {
233
233
; AVX2-NEXT: vzeroupper
234
234
; AVX2-NEXT: retq
235
235
;
236
- ; X32 -AVX2-LABEL: test6:
237
- ; X32 -AVX2: # %bb.0:
238
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
239
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
240
- ; X32 -AVX2-NEXT: vpmovsxbw (%ecx), %ymm0
241
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
242
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
243
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
244
- ; X32 -AVX2-NEXT: vzeroupper
245
- ; X32 -AVX2-NEXT: retl
236
+ ; X86 -AVX2-LABEL: test6:
237
+ ; X86 -AVX2: # %bb.0:
238
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
239
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
240
+ ; X86 -AVX2-NEXT: vpmovsxbw (%ecx), %ymm0
241
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
242
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
243
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
244
+ ; X86 -AVX2-NEXT: vzeroupper
245
+ ; X86 -AVX2-NEXT: retl
246
246
%wide.load35 = load <16 x i8 >, ptr %in , align 1
247
247
%sext = sext <16 x i8 > %wide.load35 to <16 x i16 >
248
248
store <16 x i16 > zeroinitializer , ptr undef , align 8
@@ -267,15 +267,15 @@ define void @test7(ptr %in, ptr %out) nounwind {
267
267
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
268
268
; AVX-NEXT: retq
269
269
;
270
- ; X32 -AVX2-LABEL: test7:
271
- ; X32 -AVX2: # %bb.0:
272
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
273
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
274
- ; X32 -AVX2-NEXT: vpmovsxwq (%ecx), %xmm0
275
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
276
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
277
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
278
- ; X32 -AVX2-NEXT: retl
270
+ ; X86 -AVX2-LABEL: test7:
271
+ ; X86 -AVX2: # %bb.0:
272
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
273
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
274
+ ; X86 -AVX2-NEXT: vpmovsxwq (%ecx), %xmm0
275
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
276
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
277
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
278
+ ; X86 -AVX2-NEXT: retl
279
279
%wide.load35 = load <2 x i16 >, ptr %in , align 1
280
280
%sext = sext <2 x i16 > %wide.load35 to <2 x i64 >
281
281
store <2 x i64 > zeroinitializer , ptr undef , align 8
@@ -314,16 +314,16 @@ define void @test8(ptr %in, ptr %out) nounwind {
314
314
; AVX2-NEXT: vzeroupper
315
315
; AVX2-NEXT: retq
316
316
;
317
- ; X32 -AVX2-LABEL: test8:
318
- ; X32 -AVX2: # %bb.0:
319
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
320
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
321
- ; X32 -AVX2-NEXT: vpmovsxwq (%ecx), %ymm0
322
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
323
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
324
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
325
- ; X32 -AVX2-NEXT: vzeroupper
326
- ; X32 -AVX2-NEXT: retl
317
+ ; X86 -AVX2-LABEL: test8:
318
+ ; X86 -AVX2: # %bb.0:
319
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
320
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
321
+ ; X86 -AVX2-NEXT: vpmovsxwq (%ecx), %ymm0
322
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
323
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
324
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
325
+ ; X86 -AVX2-NEXT: vzeroupper
326
+ ; X86 -AVX2-NEXT: retl
327
327
%wide.load35 = load <4 x i16 >, ptr %in , align 1
328
328
%sext = sext <4 x i16 > %wide.load35 to <4 x i64 >
329
329
store <4 x i64 > zeroinitializer , ptr undef , align 8
@@ -348,15 +348,15 @@ define void @test9(ptr %in, ptr %out) nounwind {
348
348
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
349
349
; AVX-NEXT: retq
350
350
;
351
- ; X32 -AVX2-LABEL: test9:
352
- ; X32 -AVX2: # %bb.0:
353
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
354
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
355
- ; X32 -AVX2-NEXT: vpmovsxwd (%ecx), %xmm0
356
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
357
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
358
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
359
- ; X32 -AVX2-NEXT: retl
351
+ ; X86 -AVX2-LABEL: test9:
352
+ ; X86 -AVX2: # %bb.0:
353
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
354
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
355
+ ; X86 -AVX2-NEXT: vpmovsxwd (%ecx), %xmm0
356
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
357
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
358
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
359
+ ; X86 -AVX2-NEXT: retl
360
360
%wide.load35 = load <4 x i16 >, ptr %in , align 1
361
361
%sext = sext <4 x i16 > %wide.load35 to <4 x i32 >
362
362
store <4 x i32 > zeroinitializer , ptr undef , align 8
@@ -395,16 +395,16 @@ define void @test10(ptr %in, ptr %out) nounwind {
395
395
; AVX2-NEXT: vzeroupper
396
396
; AVX2-NEXT: retq
397
397
;
398
- ; X32 -AVX2-LABEL: test10:
399
- ; X32 -AVX2: # %bb.0:
400
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
401
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
402
- ; X32 -AVX2-NEXT: vpmovsxwd (%ecx), %ymm0
403
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
404
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
405
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
406
- ; X32 -AVX2-NEXT: vzeroupper
407
- ; X32 -AVX2-NEXT: retl
398
+ ; X86 -AVX2-LABEL: test10:
399
+ ; X86 -AVX2: # %bb.0:
400
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
401
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
402
+ ; X86 -AVX2-NEXT: vpmovsxwd (%ecx), %ymm0
403
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
404
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
405
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
406
+ ; X86 -AVX2-NEXT: vzeroupper
407
+ ; X86 -AVX2-NEXT: retl
408
408
%wide.load35 = load <8 x i16 >, ptr %in , align 1
409
409
%sext = sext <8 x i16 > %wide.load35 to <8 x i32 >
410
410
store <8 x i32 > zeroinitializer , ptr undef , align 8
@@ -429,15 +429,15 @@ define void @test11(ptr %in, ptr %out) nounwind {
429
429
; AVX-NEXT: vmovdqu %xmm0, (%rsi)
430
430
; AVX-NEXT: retq
431
431
;
432
- ; X32 -AVX2-LABEL: test11:
433
- ; X32 -AVX2: # %bb.0:
434
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
435
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
436
- ; X32 -AVX2-NEXT: vpmovsxdq (%ecx), %xmm0
437
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
438
- ; X32 -AVX2-NEXT: vmovups %xmm1, (%eax)
439
- ; X32 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
440
- ; X32 -AVX2-NEXT: retl
432
+ ; X86 -AVX2-LABEL: test11:
433
+ ; X86 -AVX2: # %bb.0:
434
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
435
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
436
+ ; X86 -AVX2-NEXT: vpmovsxdq (%ecx), %xmm0
437
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
438
+ ; X86 -AVX2-NEXT: vmovups %xmm1, (%eax)
439
+ ; X86 -AVX2-NEXT: vmovdqu %xmm0, (%eax)
440
+ ; X86 -AVX2-NEXT: retl
441
441
%wide.load35 = load <2 x i32 >, ptr %in , align 1
442
442
%sext = sext <2 x i32 > %wide.load35 to <2 x i64 >
443
443
store <2 x i64 > zeroinitializer , ptr undef , align 8
@@ -476,16 +476,16 @@ define void @test12(ptr %in, ptr %out) nounwind {
476
476
; AVX2-NEXT: vzeroupper
477
477
; AVX2-NEXT: retq
478
478
;
479
- ; X32 -AVX2-LABEL: test12:
480
- ; X32 -AVX2: # %bb.0:
481
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
482
- ; X32 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
483
- ; X32 -AVX2-NEXT: vpmovsxdq (%ecx), %ymm0
484
- ; X32 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
485
- ; X32 -AVX2-NEXT: vmovups %ymm1, (%eax)
486
- ; X32 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
487
- ; X32 -AVX2-NEXT: vzeroupper
488
- ; X32 -AVX2-NEXT: retl
479
+ ; X86 -AVX2-LABEL: test12:
480
+ ; X86 -AVX2: # %bb.0:
481
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
482
+ ; X86 -AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
483
+ ; X86 -AVX2-NEXT: vpmovsxdq (%ecx), %ymm0
484
+ ; X86 -AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
485
+ ; X86 -AVX2-NEXT: vmovups %ymm1, (%eax)
486
+ ; X86 -AVX2-NEXT: vmovdqu %ymm0, (%eax)
487
+ ; X86 -AVX2-NEXT: vzeroupper
488
+ ; X86 -AVX2-NEXT: retl
489
489
%wide.load35 = load <4 x i32 >, ptr %in , align 1
490
490
%sext = sext <4 x i32 > %wide.load35 to <4 x i64 >
491
491
store <4 x i64 > zeroinitializer , ptr undef , align 8
0 commit comments