@@ -20,10 +20,10 @@ body: |
20
20
; CHECK-NEXT: SI_RETURN
21
21
%0:_(p3) = COPY $vgpr0
22
22
%1:_(p3) = COPY $vgpr1
23
- %12 :_(<8 x s16>) = G_IMPLICIT_DEF
24
- %10 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
25
- %11 :_(<4 x s16>) = G_SHUFFLE_VECTOR %10 (<8 x s16>), %12 , shufflemask(4, 5, 6, 7)
26
- G_STORE %11 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
23
+ %2 :_(<8 x s16>) = G_IMPLICIT_DEF
24
+ %3 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
25
+ %4 :_(<4 x s16>) = G_SHUFFLE_VECTOR %3 (<8 x s16>), %2 , shufflemask(4, 5, 6, 7)
26
+ G_STORE %4 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
27
27
SI_RETURN
28
28
...
29
29
@@ -46,10 +46,10 @@ body: |
46
46
; CHECK-NEXT: SI_RETURN
47
47
%0:_(p3) = COPY $vgpr0
48
48
%1:_(p3) = COPY $vgpr1
49
- %12 :_(<8 x s16>) = G_IMPLICIT_DEF
50
- %10 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
51
- %11 :_(<2 x s16>) = G_SHUFFLE_VECTOR %10 (<8 x s16>), %12 , shufflemask(3, 4)
52
- G_STORE %11 (<2 x s16>), %1(p3) :: (store (<2 x s16>), addrspace 3)
49
+ %2 :_(<8 x s16>) = G_IMPLICIT_DEF
50
+ %3 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
51
+ %4 :_(<2 x s16>) = G_SHUFFLE_VECTOR %3 (<8 x s16>), %2 , shufflemask(3, 4)
52
+ G_STORE %4 (<2 x s16>), %1(p3) :: (store (<2 x s16>), addrspace 3)
53
53
SI_RETURN
54
54
55
55
...
@@ -73,10 +73,10 @@ body: |
73
73
; CHECK-NEXT: SI_RETURN
74
74
%0:_(p3) = COPY $vgpr0
75
75
%1:_(p3) = COPY $vgpr1
76
- %12 :_(<8 x s16>) = G_IMPLICIT_DEF
77
- %10 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
78
- %11 :_(<3 x s16>) = G_SHUFFLE_VECTOR %10 (<8 x s16>), %12 , shufflemask(0, 1, 2)
79
- G_STORE %11 (<3 x s16>), %1(p3) :: (store (<3 x s16>), addrspace 3)
76
+ %2 :_(<8 x s16>) = G_IMPLICIT_DEF
77
+ %3 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
78
+ %4 :_(<3 x s16>) = G_SHUFFLE_VECTOR %3 (<8 x s16>), %2 , shufflemask(0, 1, 2)
79
+ G_STORE %4 (<3 x s16>), %1(p3) :: (store (<3 x s16>), addrspace 3)
80
80
SI_RETURN
81
81
...
82
82
@@ -101,10 +101,10 @@ body: |
101
101
; CHECK-NEXT: SI_RETURN
102
102
%0:_(p3) = COPY $vgpr0
103
103
%1:_(p3) = COPY $vgpr1
104
- %12 :_(<8 x s16>) = G_IMPLICIT_DEF
105
- %10 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
106
- %11 :_(<4 x s16>) = G_SHUFFLE_VECTOR %10 (<8 x s16>), %12 , shufflemask(4, 5, -1, 7)
107
- G_STORE %11 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
104
+ %2 :_(<8 x s16>) = G_IMPLICIT_DEF
105
+ %3 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
106
+ %4 :_(<4 x s16>) = G_SHUFFLE_VECTOR %3 (<8 x s16>), %2 , shufflemask(4, 5, -1, 7)
107
+ G_STORE %4 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
108
108
SI_RETURN
109
109
...
110
110
@@ -128,10 +128,34 @@ body: |
128
128
; CHECK-NEXT: SI_RETURN
129
129
%0:_(p3) = COPY $vgpr0
130
130
%1:_(p3) = COPY $vgpr1
131
- %12 :_(<8 x s16>) = G_IMPLICIT_DEF
132
- %10 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
133
- %11 :_(<4 x s16>) = G_SHUFFLE_VECTOR %10 (<8 x s16>), %12 , shufflemask(6, 7, 8, 9)
134
- G_STORE %11 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
131
+ %2 :_(<8 x s16>) = G_IMPLICIT_DEF
132
+ %3 :_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
133
+ %4 :_(<4 x s16>) = G_SHUFFLE_VECTOR %3 (<8 x s16>), %2 , shufflemask(6, 7, 8, 9)
134
+ G_STORE %4 (<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
135
135
SI_RETURN
136
136
...
137
137
138
+
139
+ ---
140
+ name : shuffle_vector_to_copy
141
+ tracksRegLiveness : true
142
+ body : |
143
+ bb.0:
144
+ liveins: $vgpr0, $vgpr1
145
+ ; CHECK-LABEL: name: shuffle_vector_to_copy
146
+ ; CHECK: liveins: $vgpr0, $vgpr1
147
+ ; CHECK-NEXT: {{ $}}
148
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
149
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
150
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
151
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
152
+ ; CHECK-NEXT: G_STORE [[UV4]](s16), [[COPY1]](p3) :: (store (s16), addrspace 3)
153
+ ; CHECK-NEXT: SI_RETURN
154
+ %0:_(p3) = COPY $vgpr0
155
+ %1:_(p3) = COPY $vgpr1
156
+ %2:_(<8 x s16>) = G_IMPLICIT_DEF
157
+ %3:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
158
+ %4:_(s16) = G_SHUFFLE_VECTOR %3(<8 x s16>), %2, shufflemask(4)
159
+ G_STORE %4(s16), %1(p3) :: (store (s16), addrspace 3)
160
+ SI_RETURN
161
+ ...
0 commit comments