@@ -36,3 +36,37 @@ entry:
36
36
%Y = shufflevector <2 x i64 > %X , <2 x i64 > zeroinitializer , <2 x i32 > <i32 0 , i32 2 >
37
37
ret <2 x i64 >%Y
38
38
}
39
+
40
+ ; FIXME: We shouldn't shrink the load to movss here since it is volatile.
41
+ define <4 x i32 > @load_zmov_4i32_to_0zzz_volatile (<4 x i32 > *%ptr ) {
42
+ ; SSE-LABEL: load_zmov_4i32_to_0zzz_volatile:
43
+ ; SSE: # %bb.0: # %entry
44
+ ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
45
+ ; SSE-NEXT: retq
46
+ ;
47
+ ; AVX-LABEL: load_zmov_4i32_to_0zzz_volatile:
48
+ ; AVX: # %bb.0: # %entry
49
+ ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
50
+ ; AVX-NEXT: retq
51
+ entry:
52
+ %X = load volatile <4 x i32 >, <4 x i32 >* %ptr
53
+ %Y = shufflevector <4 x i32 > %X , <4 x i32 > zeroinitializer , <4 x i32 > <i32 0 , i32 4 , i32 4 , i32 4 >
54
+ ret <4 x i32 >%Y
55
+ }
56
+
57
+ ; FIXME: We shouldn't shrink the load to movsd here since it is volatile.
58
+ define <2 x i64 > @load_zmov_2i64_to_0z_volatile (<2 x i64 > *%ptr ) {
59
+ ; SSE-LABEL: load_zmov_2i64_to_0z_volatile:
60
+ ; SSE: # %bb.0: # %entry
61
+ ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
62
+ ; SSE-NEXT: retq
63
+ ;
64
+ ; AVX-LABEL: load_zmov_2i64_to_0z_volatile:
65
+ ; AVX: # %bb.0: # %entry
66
+ ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
67
+ ; AVX-NEXT: retq
68
+ entry:
69
+ %X = load volatile <2 x i64 >, <2 x i64 >* %ptr
70
+ %Y = shufflevector <2 x i64 > %X , <2 x i64 > zeroinitializer , <2 x i32 > <i32 0 , i32 2 >
71
+ ret <2 x i64 >%Y
72
+ }
0 commit comments