Skip to content

Commit ce77aea

Browse files
committed
update all legalize mir files with correct LMUL>1 cases
1 parent c1df380 commit ce77aea

File tree

6 files changed

+1331
-925
lines changed

6 files changed

+1331
-925
lines changed

llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-add-zve32x.mir

Lines changed: 172 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -2,212 +2,273 @@
22
# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
33
# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
44
---
5-
name: test_nxv2s8
6-
body: |
5+
name: test_nxv2i8
6+
body: |
77
bb.0.entry:
8-
; CHECK-LABEL: name: test_nxv2s8
8+
9+
; CHECK-LABEL: name: test_nxv2i8
910
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
1011
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
1112
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
12-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s8>)
13+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
1315
%0:_(<vscale x 2 x s8>) = COPY $v8
1416
%1:_(<vscale x 2 x s8>) = COPY $v9
1517
%2:_(<vscale x 2 x s8>) = G_ADD %0, %1
16-
PseudoRET implicit %2
18+
$v8 = COPY %2(<vscale x 2 x s8>)
19+
PseudoRET implicit $v8
20+
1721
...
1822
---
19-
name: test_nxv4s8
20-
body: |
23+
name: test_nxv4i8
24+
body: |
2125
bb.0.entry:
22-
; CHECK-LABEL: name: test_nxv4s8
26+
27+
; CHECK-LABEL: name: test_nxv4i8
2328
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
2429
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
2530
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
26-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s8>)
31+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
2733
%0:_(<vscale x 4 x s8>) = COPY $v8
2834
%1:_(<vscale x 4 x s8>) = COPY $v9
2935
%2:_(<vscale x 4 x s8>) = G_ADD %0, %1
30-
PseudoRET implicit %2
36+
$v8 = COPY %2(<vscale x 4 x s8>)
37+
PseudoRET implicit $v8
38+
3139
...
3240
---
33-
name: test_nxv8s8
34-
body: |
41+
name: test_nxv8i8
42+
body: |
3543
bb.0.entry:
36-
; CHECK-LABEL: name: test_nxv8s8
44+
45+
; CHECK-LABEL: name: test_nxv8i8
3746
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
3847
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
3948
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
40-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s8>)
49+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
4151
%0:_(<vscale x 8 x s8>) = COPY $v8
4252
%1:_(<vscale x 8 x s8>) = COPY $v9
4353
%2:_(<vscale x 8 x s8>) = G_ADD %0, %1
44-
PseudoRET implicit %2
54+
$v8 = COPY %2(<vscale x 8 x s8>)
55+
PseudoRET implicit $v8
56+
4557
...
4658
---
47-
name: test_nxv16s8
48-
body: |
59+
name: test_nxv16i8
60+
body: |
4961
bb.0.entry:
50-
; CHECK-LABEL: name: test_nxv16s8
51-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8
52-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v9
62+
63+
; CHECK-LABEL: name: test_nxv16i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
5366
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
54-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s8>)
55-
%0:_(<vscale x 16 x s8>) = COPY $v8
56-
%1:_(<vscale x 16 x s8>) = COPY $v9
67+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8m2
69+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
70+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
5771
%2:_(<vscale x 16 x s8>) = G_ADD %0, %1
58-
PseudoRET implicit %2
72+
$v8m2 = COPY %2(<vscale x 16 x s8>)
73+
PseudoRET implicit $v8m2
74+
5975
...
6076
---
61-
name: test_nxv32s8
62-
body: |
77+
name: test_nxv32i8
78+
body: |
6379
bb.0.entry:
64-
; CHECK-LABEL: name: test_nxv32s8
65-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8
66-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v9
80+
81+
; CHECK-LABEL: name: test_nxv32i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
6784
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
68-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s8>)
69-
%0:_(<vscale x 32 x s8>) = COPY $v8
70-
%1:_(<vscale x 32 x s8>) = COPY $v9
85+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m4
87+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
88+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
7189
%2:_(<vscale x 32 x s8>) = G_ADD %0, %1
72-
PseudoRET implicit %2
90+
$v8m4 = COPY %2(<vscale x 32 x s8>)
91+
PseudoRET implicit $v8m4
92+
7393
...
7494
---
75-
name: test_nxv64s8
76-
body: |
95+
name: test_nxv64i8
96+
body: |
7797
bb.0.entry:
78-
; CHECK-LABEL: name: test_nxv64s8
79-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8
80-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v9
98+
99+
; CHECK-LABEL: name: test_nxv64i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
81102
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
82-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 64 x s8>)
83-
%0:_(<vscale x 64 x s8>) = COPY $v8
84-
%1:_(<vscale x 64 x s8>) = COPY $v9
103+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m8
105+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
106+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
85107
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1
86-
PseudoRET implicit %2
108+
$v8m8 = COPY %2(<vscale x 64 x s8>)
109+
PseudoRET implicit $v8m8
110+
87111
...
88112
---
89-
name: test_nxv2s16
90-
body: |
113+
name: test_nxv2i16
114+
body: |
91115
bb.0.entry:
92-
; CHECK-LABEL: name: test_nxv2s16
116+
117+
; CHECK-LABEL: name: test_nxv2i16
93118
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
94119
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
95120
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
96-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s16>)
121+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
122+
; CHECK-NEXT: PseudoRET implicit $v8
97123
%0:_(<vscale x 2 x s16>) = COPY $v8
98124
%1:_(<vscale x 2 x s16>) = COPY $v9
99125
%2:_(<vscale x 2 x s16>) = G_ADD %0, %1
100-
PseudoRET implicit %2
126+
$v8 = COPY %2(<vscale x 2 x s16>)
127+
PseudoRET implicit $v8
128+
101129
...
102130
---
103-
name: test_nxv4s16
104-
body: |
131+
name: test_nxv4i16
132+
body: |
105133
bb.0.entry:
106-
; CHECK-LABEL: name: test_nxv4s16
134+
135+
; CHECK-LABEL: name: test_nxv4i16
107136
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
108137
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
109138
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
110-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s16>)
139+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
111141
%0:_(<vscale x 4 x s16>) = COPY $v8
112142
%1:_(<vscale x 4 x s16>) = COPY $v9
113143
%2:_(<vscale x 4 x s16>) = G_ADD %0, %1
114-
PseudoRET implicit %2
144+
$v8 = COPY %2(<vscale x 4 x s16>)
145+
PseudoRET implicit $v8
146+
115147
...
116148
---
117-
name: test_nxv8s16
118-
body: |
149+
name: test_nxv8i16
150+
body: |
119151
bb.0.entry:
120-
; CHECK-LABEL: name: test_nxv8s16
121-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8
122-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v9
152+
153+
; CHECK-LABEL: name: test_nxv8i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
123156
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
124-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s16>)
125-
%0:_(<vscale x 8 x s16>) = COPY $v8
126-
%1:_(<vscale x 8 x s16>) = COPY $v9
157+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8m2
159+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
160+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
127161
%2:_(<vscale x 8 x s16>) = G_ADD %0, %1
128-
PseudoRET implicit %2
162+
$v8m2 = COPY %2(<vscale x 8 x s16>)
163+
PseudoRET implicit $v8m2
164+
129165
...
130166
---
131-
name: test_nxv16s16
132-
body: |
167+
name: test_nxv16i16
168+
body: |
133169
bb.0.entry:
134-
; CHECK-LABEL: name: test_nxv16s16
135-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8
136-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v9
170+
171+
; CHECK-LABEL: name: test_nxv16i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
137174
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
138-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s16>)
139-
%0:_(<vscale x 16 x s16>) = COPY $v8
140-
%1:_(<vscale x 16 x s16>) = COPY $v9
175+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8m4
177+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
178+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
141179
%2:_(<vscale x 16 x s16>) = G_ADD %0, %1
142-
PseudoRET implicit %2
180+
$v8m4 = COPY %2(<vscale x 16 x s16>)
181+
PseudoRET implicit $v8m4
182+
143183
...
144184
---
145-
name: test_nxv32s16
146-
body: |
185+
name: test_nxv32i16
186+
body: |
147187
bb.0.entry:
148-
; CHECK-LABEL: name: test_nxv32s16
149-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8
150-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v9
188+
189+
; CHECK-LABEL: name: test_nxv32i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
151192
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
152-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 32 x s16>)
153-
%0:_(<vscale x 32 x s16>) = COPY $v8
154-
%1:_(<vscale x 32 x s16>) = COPY $v9
193+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m8
195+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
196+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
155197
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1
156-
PseudoRET implicit %2
198+
$v8m8 = COPY %2(<vscale x 32 x s16>)
199+
PseudoRET implicit $v8m8
200+
157201
...
158202
---
159-
name: test_nxv2s32
160-
body: |
203+
name: test_nxv2i32
204+
body: |
161205
bb.0.entry:
162-
; CHECK-LABEL: name: test_nxv2s32
206+
207+
; CHECK-LABEL: name: test_nxv2i32
163208
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
164209
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
165210
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
166-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 2 x s32>)
211+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
212+
; CHECK-NEXT: PseudoRET implicit $v8
167213
%0:_(<vscale x 2 x s32>) = COPY $v8
168214
%1:_(<vscale x 2 x s32>) = COPY $v9
169215
%2:_(<vscale x 2 x s32>) = G_ADD %0, %1
170-
PseudoRET implicit %2
216+
$v8 = COPY %2(<vscale x 2 x s32>)
217+
PseudoRET implicit $v8
218+
171219
...
172220
---
173-
name: test_nxv4s32
174-
body: |
221+
name: test_nxv4i32
222+
body: |
175223
bb.0.entry:
176-
; CHECK-LABEL: name: test_nxv4s32
177-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8
178-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v9
224+
225+
; CHECK-LABEL: name: test_nxv4i32
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
179228
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
180-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 4 x s32>)
181-
%0:_(<vscale x 4 x s32>) = COPY $v8
182-
%1:_(<vscale x 4 x s32>) = COPY $v9
229+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m2
231+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
232+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
183233
%2:_(<vscale x 4 x s32>) = G_ADD %0, %1
184-
PseudoRET implicit %2
234+
$v8m2 = COPY %2(<vscale x 4 x s32>)
235+
PseudoRET implicit $v8m2
236+
185237
...
186238
---
187-
name: test_nxv8s32
188-
body: |
239+
name: test_nxv8i32
240+
body: |
189241
bb.0.entry:
190-
; CHECK-LABEL: name: test_nxv8s32
191-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8
192-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v9
242+
243+
; CHECK-LABEL: name: test_nxv8i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
193246
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
194-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 8 x s32>)
195-
%0:_(<vscale x 8 x s32>) = COPY $v8
196-
%1:_(<vscale x 8 x s32>) = COPY $v9
247+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8m4
249+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
250+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
197251
%2:_(<vscale x 8 x s32>) = G_ADD %0, %1
198-
PseudoRET implicit %2
252+
$v8m4 = COPY %2(<vscale x 8 x s32>)
253+
PseudoRET implicit $v8m4
254+
199255
...
200256
---
201-
name: test_nxv16s32
202-
body: |
257+
name: test_nxv16i32
258+
body: |
203259
bb.0.entry:
204-
; CHECK-LABEL: name: test_nxv16s32
205-
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8
206-
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v9
260+
261+
; CHECK-LABEL: name: test_nxv16i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
207264
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
208-
; CHECK-NEXT: PseudoRET implicit [[ADD]](<vscale x 16 x s32>)
209-
%0:_(<vscale x 16 x s32>) = COPY $v8
210-
%1:_(<vscale x 16 x s32>) = COPY $v9
265+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8m8
267+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
268+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
211269
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1
212-
PseudoRET implicit %2
270+
$v8m8 = COPY %2(<vscale x 16 x s32>)
271+
PseudoRET implicit $v8m8
272+
213273
...
274+

0 commit comments

Comments
 (0)