|
2 | 2 | # RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
|
3 | 3 | # RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
|
4 | 4 | ---
|
5 |
| -name: test_nxv2s8 |
6 |
| -body: | |
| 5 | +name: test_nxv2i8 |
| 6 | +body: | |
7 | 7 | bb.0.entry:
|
8 |
| - ; CHECK-LABEL: name: test_nxv2s8 |
| 8 | +
|
| 9 | + ; CHECK-LABEL: name: test_nxv2i8 |
9 | 10 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
|
10 | 11 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
|
11 | 12 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
12 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>) |
| 13 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>) |
| 14 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
13 | 15 | %0:_(<vscale x 2 x s8>) = COPY $v8
|
14 | 16 | %1:_(<vscale x 2 x s8>) = COPY $v9
|
15 | 17 | %2:_(<vscale x 2 x s8>) = G_ADD %0, %1
|
16 |
| - PseudoRET implicit %2 |
| 18 | + $v8 = COPY %2(<vscale x 2 x s8>) |
| 19 | + PseudoRET implicit $v8 |
| 20 | +
|
17 | 21 | ...
|
18 | 22 | ---
|
19 |
| -name: test_nxv4s8 |
20 |
| -body: | |
| 23 | +name: test_nxv4i8 |
| 24 | +body: | |
21 | 25 | bb.0.entry:
|
22 |
| - ; CHECK-LABEL: name: test_nxv4s8 |
| 26 | +
|
| 27 | + ; CHECK-LABEL: name: test_nxv4i8 |
23 | 28 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
|
24 | 29 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
|
25 | 30 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
26 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>) |
| 31 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>) |
| 32 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
27 | 33 | %0:_(<vscale x 4 x s8>) = COPY $v8
|
28 | 34 | %1:_(<vscale x 4 x s8>) = COPY $v9
|
29 | 35 | %2:_(<vscale x 4 x s8>) = G_ADD %0, %1
|
30 |
| - PseudoRET implicit %2 |
| 36 | + $v8 = COPY %2(<vscale x 4 x s8>) |
| 37 | + PseudoRET implicit $v8 |
| 38 | +
|
31 | 39 | ...
|
32 | 40 | ---
|
33 |
| -name: test_nxv8s8 |
34 |
| -body: | |
| 41 | +name: test_nxv8i8 |
| 42 | +body: | |
35 | 43 | bb.0.entry:
|
36 |
| - ; CHECK-LABEL: name: test_nxv8s8 |
| 44 | +
|
| 45 | + ; CHECK-LABEL: name: test_nxv8i8 |
37 | 46 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
|
38 | 47 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
|
39 | 48 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
40 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>) |
| 49 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>) |
| 50 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
41 | 51 | %0:_(<vscale x 8 x s8>) = COPY $v8
|
42 | 52 | %1:_(<vscale x 8 x s8>) = COPY $v9
|
43 | 53 | %2:_(<vscale x 8 x s8>) = G_ADD %0, %1
|
44 |
| - PseudoRET implicit %2 |
| 54 | + $v8 = COPY %2(<vscale x 8 x s8>) |
| 55 | + PseudoRET implicit $v8 |
| 56 | +
|
45 | 57 | ...
|
46 | 58 | ---
|
47 |
| -name: test_nxv16s8 |
48 |
| -body: | |
| 59 | +name: test_nxv16i8 |
| 60 | +body: | |
49 | 61 | bb.0.entry:
|
50 |
| - ; CHECK-LABEL: name: test_nxv16s8 |
51 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8 |
52 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9 |
| 62 | +
|
| 63 | + ; CHECK-LABEL: name: test_nxv16i8 |
| 64 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2 |
| 65 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2 |
53 | 66 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
54 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>) |
55 |
| - %0:_(<vscale x 16 x s8>) = COPY $v8 |
56 |
| - %1:_(<vscale x 16 x s8>) = COPY $v9 |
| 67 | + ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>) |
| 68 | + ; CHECK-NEXT: PseudoRET implicit $v8m2 |
| 69 | + %0:_(<vscale x 16 x s8>) = COPY $v8m2 |
| 70 | + %1:_(<vscale x 16 x s8>) = COPY $v10m2 |
57 | 71 | %2:_(<vscale x 16 x s8>) = G_ADD %0, %1
|
58 |
| - PseudoRET implicit %2 |
| 72 | + $v8m2 = COPY %2(<vscale x 16 x s8>) |
| 73 | + PseudoRET implicit $v8m2 |
| 74 | +
|
59 | 75 | ...
|
60 | 76 | ---
|
61 |
| -name: test_nxv32s8 |
62 |
| -body: | |
| 77 | +name: test_nxv32i8 |
| 78 | +body: | |
63 | 79 | bb.0.entry:
|
64 |
| - ; CHECK-LABEL: name: test_nxv32s8 |
65 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8 |
66 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9 |
| 80 | +
|
| 81 | + ; CHECK-LABEL: name: test_nxv32i8 |
| 82 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4 |
| 83 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4 |
67 | 84 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
68 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>) |
69 |
| - %0:_(<vscale x 32 x s8>) = COPY $v8 |
70 |
| - %1:_(<vscale x 32 x s8>) = COPY $v9 |
| 85 | + ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>) |
| 86 | + ; CHECK-NEXT: PseudoRET implicit $v8m4 |
| 87 | + %0:_(<vscale x 32 x s8>) = COPY $v8m4 |
| 88 | + %1:_(<vscale x 32 x s8>) = COPY $v12m4 |
71 | 89 | %2:_(<vscale x 32 x s8>) = G_ADD %0, %1
|
72 |
| - PseudoRET implicit %2 |
| 90 | + $v8m4 = COPY %2(<vscale x 32 x s8>) |
| 91 | + PseudoRET implicit $v8m4 |
| 92 | +
|
73 | 93 | ...
|
74 | 94 | ---
|
75 |
| -name: test_nxv64s8 |
76 |
| -body: | |
| 95 | +name: test_nxv64i8 |
| 96 | +body: | |
77 | 97 | bb.0.entry:
|
78 |
| - ; CHECK-LABEL: name: test_nxv64s8 |
79 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8 |
80 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9 |
| 98 | +
|
| 99 | + ; CHECK-LABEL: name: test_nxv64i8 |
| 100 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8 |
| 101 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8 |
81 | 102 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
|
82 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>) |
83 |
| - %0:_(<vscale x 64 x s8>) = COPY $v8 |
84 |
| - %1:_(<vscale x 64 x s8>) = COPY $v9 |
| 103 | + ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>) |
| 104 | + ; CHECK-NEXT: PseudoRET implicit $v8m8 |
| 105 | + %0:_(<vscale x 64 x s8>) = COPY $v8m8 |
| 106 | + %1:_(<vscale x 64 x s8>) = COPY $v16m8 |
85 | 107 | %2:_(<vscale x 64 x s8>) = G_ADD %0, %1
|
86 |
| - PseudoRET implicit %2 |
| 108 | + $v8m8 = COPY %2(<vscale x 64 x s8>) |
| 109 | + PseudoRET implicit $v8m8 |
| 110 | +
|
87 | 111 | ...
|
88 | 112 | ---
|
89 |
| -name: test_nxv2s16 |
90 |
| -body: | |
| 113 | +name: test_nxv2i16 |
| 114 | +body: | |
91 | 115 | bb.0.entry:
|
92 |
| - ; CHECK-LABEL: name: test_nxv2s16 |
| 116 | +
|
| 117 | + ; CHECK-LABEL: name: test_nxv2i16 |
93 | 118 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
|
94 | 119 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
|
95 | 120 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
|
96 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>) |
| 121 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>) |
| 122 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
97 | 123 | %0:_(<vscale x 2 x s16>) = COPY $v8
|
98 | 124 | %1:_(<vscale x 2 x s16>) = COPY $v9
|
99 | 125 | %2:_(<vscale x 2 x s16>) = G_ADD %0, %1
|
100 |
| - PseudoRET implicit %2 |
| 126 | + $v8 = COPY %2(<vscale x 2 x s16>) |
| 127 | + PseudoRET implicit $v8 |
| 128 | +
|
101 | 129 | ...
|
102 | 130 | ---
|
103 |
| -name: test_nxv4s16 |
104 |
| -body: | |
| 131 | +name: test_nxv4i16 |
| 132 | +body: | |
105 | 133 | bb.0.entry:
|
106 |
| - ; CHECK-LABEL: name: test_nxv4s16 |
| 134 | +
|
| 135 | + ; CHECK-LABEL: name: test_nxv4i16 |
107 | 136 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
|
108 | 137 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
|
109 | 138 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
|
110 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>) |
| 139 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>) |
| 140 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
111 | 141 | %0:_(<vscale x 4 x s16>) = COPY $v8
|
112 | 142 | %1:_(<vscale x 4 x s16>) = COPY $v9
|
113 | 143 | %2:_(<vscale x 4 x s16>) = G_ADD %0, %1
|
114 |
| - PseudoRET implicit %2 |
| 144 | + $v8 = COPY %2(<vscale x 4 x s16>) |
| 145 | + PseudoRET implicit $v8 |
| 146 | +
|
115 | 147 | ...
|
116 | 148 | ---
|
117 |
| -name: test_nxv8s16 |
118 |
| -body: | |
| 149 | +name: test_nxv8i16 |
| 150 | +body: | |
119 | 151 | bb.0.entry:
|
120 |
| - ; CHECK-LABEL: name: test_nxv8s16 |
121 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8 |
122 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9 |
| 152 | +
|
| 153 | + ; CHECK-LABEL: name: test_nxv8i16 |
| 154 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 |
| 155 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2 |
123 | 156 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
|
124 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>) |
125 |
| - %0:_(<vscale x 8 x s16>) = COPY $v8 |
126 |
| - %1:_(<vscale x 8 x s16>) = COPY $v9 |
| 157 | + ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>) |
| 158 | + ; CHECK-NEXT: PseudoRET implicit $v8m2 |
| 159 | + %0:_(<vscale x 8 x s16>) = COPY $v8m2 |
| 160 | + %1:_(<vscale x 8 x s16>) = COPY $v10m2 |
127 | 161 | %2:_(<vscale x 8 x s16>) = G_ADD %0, %1
|
128 |
| - PseudoRET implicit %2 |
| 162 | + $v8m2 = COPY %2(<vscale x 8 x s16>) |
| 163 | + PseudoRET implicit $v8m2 |
| 164 | +
|
129 | 165 | ...
|
130 | 166 | ---
|
131 |
| -name: test_nxv16s16 |
132 |
| -body: | |
| 167 | +name: test_nxv16i16 |
| 168 | +body: | |
133 | 169 | bb.0.entry:
|
134 |
| - ; CHECK-LABEL: name: test_nxv16s16 |
135 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8 |
136 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9 |
| 170 | +
|
| 171 | + ; CHECK-LABEL: name: test_nxv16i16 |
| 172 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4 |
| 173 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4 |
137 | 174 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
|
138 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>) |
139 |
| - %0:_(<vscale x 16 x s16>) = COPY $v8 |
140 |
| - %1:_(<vscale x 16 x s16>) = COPY $v9 |
| 175 | + ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>) |
| 176 | + ; CHECK-NEXT: PseudoRET implicit $v8m4 |
| 177 | + %0:_(<vscale x 16 x s16>) = COPY $v8m4 |
| 178 | + %1:_(<vscale x 16 x s16>) = COPY $v12m4 |
141 | 179 | %2:_(<vscale x 16 x s16>) = G_ADD %0, %1
|
142 |
| - PseudoRET implicit %2 |
| 180 | + $v8m4 = COPY %2(<vscale x 16 x s16>) |
| 181 | + PseudoRET implicit $v8m4 |
| 182 | +
|
143 | 183 | ...
|
144 | 184 | ---
|
145 |
| -name: test_nxv32s16 |
146 |
| -body: | |
| 185 | +name: test_nxv32i16 |
| 186 | +body: | |
147 | 187 | bb.0.entry:
|
148 |
| - ; CHECK-LABEL: name: test_nxv32s16 |
149 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8 |
150 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9 |
| 188 | +
|
| 189 | + ; CHECK-LABEL: name: test_nxv32i16 |
| 190 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8 |
| 191 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8 |
151 | 192 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
|
152 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>) |
153 |
| - %0:_(<vscale x 32 x s16>) = COPY $v8 |
154 |
| - %1:_(<vscale x 32 x s16>) = COPY $v9 |
| 193 | + ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>) |
| 194 | + ; CHECK-NEXT: PseudoRET implicit $v8m8 |
| 195 | + %0:_(<vscale x 32 x s16>) = COPY $v8m8 |
| 196 | + %1:_(<vscale x 32 x s16>) = COPY $v16m8 |
155 | 197 | %2:_(<vscale x 32 x s16>) = G_ADD %0, %1
|
156 |
| - PseudoRET implicit %2 |
| 198 | + $v8m8 = COPY %2(<vscale x 32 x s16>) |
| 199 | + PseudoRET implicit $v8m8 |
| 200 | +
|
157 | 201 | ...
|
158 | 202 | ---
|
159 |
| -name: test_nxv2s32 |
160 |
| -body: | |
| 203 | +name: test_nxv2i32 |
| 204 | +body: | |
161 | 205 | bb.0.entry:
|
162 |
| - ; CHECK-LABEL: name: test_nxv2s32 |
| 206 | +
|
| 207 | + ; CHECK-LABEL: name: test_nxv2i32 |
163 | 208 | ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
|
164 | 209 | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
|
165 | 210 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
|
166 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>) |
| 211 | + ; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>) |
| 212 | + ; CHECK-NEXT: PseudoRET implicit $v8 |
167 | 213 | %0:_(<vscale x 2 x s32>) = COPY $v8
|
168 | 214 | %1:_(<vscale x 2 x s32>) = COPY $v9
|
169 | 215 | %2:_(<vscale x 2 x s32>) = G_ADD %0, %1
|
170 |
| - PseudoRET implicit %2 |
| 216 | + $v8 = COPY %2(<vscale x 2 x s32>) |
| 217 | + PseudoRET implicit $v8 |
| 218 | +
|
171 | 219 | ...
|
172 | 220 | ---
|
173 |
| -name: test_nxv4s32 |
174 |
| -body: | |
| 221 | +name: test_nxv4i32 |
| 222 | +body: | |
175 | 223 | bb.0.entry:
|
176 |
| - ; CHECK-LABEL: name: test_nxv4s32 |
177 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8 |
178 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9 |
| 224 | +
|
| 225 | + ; CHECK-LABEL: name: test_nxv4i32 |
| 226 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 |
| 227 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2 |
179 | 228 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
|
180 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>) |
181 |
| - %0:_(<vscale x 4 x s32>) = COPY $v8 |
182 |
| - %1:_(<vscale x 4 x s32>) = COPY $v9 |
| 229 | + ; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>) |
| 230 | + ; CHECK-NEXT: PseudoRET implicit $v8m2 |
| 231 | + %0:_(<vscale x 4 x s32>) = COPY $v8m2 |
| 232 | + %1:_(<vscale x 4 x s32>) = COPY $v10m2 |
183 | 233 | %2:_(<vscale x 4 x s32>) = G_ADD %0, %1
|
184 |
| - PseudoRET implicit %2 |
| 234 | + $v8m2 = COPY %2(<vscale x 4 x s32>) |
| 235 | + PseudoRET implicit $v8m2 |
| 236 | +
|
185 | 237 | ...
|
186 | 238 | ---
|
187 |
| -name: test_nxv8s32 |
188 |
| -body: | |
| 239 | +name: test_nxv8i32 |
| 240 | +body: | |
189 | 241 | bb.0.entry:
|
190 |
| - ; CHECK-LABEL: name: test_nxv8s32 |
191 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8 |
192 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9 |
| 242 | +
|
| 243 | + ; CHECK-LABEL: name: test_nxv8i32 |
| 244 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4 |
| 245 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4 |
193 | 246 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
|
194 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>) |
195 |
| - %0:_(<vscale x 8 x s32>) = COPY $v8 |
196 |
| - %1:_(<vscale x 8 x s32>) = COPY $v9 |
| 247 | + ; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>) |
| 248 | + ; CHECK-NEXT: PseudoRET implicit $v8m4 |
| 249 | + %0:_(<vscale x 8 x s32>) = COPY $v8m4 |
| 250 | + %1:_(<vscale x 8 x s32>) = COPY $v12m4 |
197 | 251 | %2:_(<vscale x 8 x s32>) = G_ADD %0, %1
|
198 |
| - PseudoRET implicit %2 |
| 252 | + $v8m4 = COPY %2(<vscale x 8 x s32>) |
| 253 | + PseudoRET implicit $v8m4 |
| 254 | +
|
199 | 255 | ...
|
200 | 256 | ---
|
201 |
| -name: test_nxv16s32 |
202 |
| -body: | |
| 257 | +name: test_nxv16i32 |
| 258 | +body: | |
203 | 259 | bb.0.entry:
|
204 |
| - ; CHECK-LABEL: name: test_nxv16s32 |
205 |
| - ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8 |
206 |
| - ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9 |
| 260 | +
|
| 261 | + ; CHECK-LABEL: name: test_nxv16i32 |
| 262 | + ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8 |
| 263 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8 |
207 | 264 | ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
|
208 |
| - ; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>) |
209 |
| - %0:_(<vscale x 16 x s32>) = COPY $v8 |
210 |
| - %1:_(<vscale x 16 x s32>) = COPY $v9 |
| 265 | + ; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>) |
| 266 | + ; CHECK-NEXT: PseudoRET implicit $v8m8 |
| 267 | + %0:_(<vscale x 16 x s32>) = COPY $v8m8 |
| 268 | + %1:_(<vscale x 16 x s32>) = COPY $v16m8 |
211 | 269 | %2:_(<vscale x 16 x s32>) = G_ADD %0, %1
|
212 |
| - PseudoRET implicit %2 |
| 270 | + $v8m8 = COPY %2(<vscale x 16 x s32>) |
| 271 | + PseudoRET implicit $v8m8 |
| 272 | +
|
213 | 273 | ...
|
| 274 | + |
0 commit comments