@@ -162,7 +162,7 @@ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
162
162
; RV32-NEXT: liveins: $x10
163
163
; RV32-NEXT: {{ $}}
164
164
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
165
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2 )
165
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
166
166
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
167
167
; RV32-NEXT: PseudoRET implicit $v8
168
168
;
@@ -171,7 +171,7 @@ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
171
171
; RV64-NEXT: liveins: $x10
172
172
; RV64-NEXT: {{ $}}
173
173
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
174
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2 )
174
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
175
175
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
176
176
; RV64-NEXT: PseudoRET implicit $v8
177
177
%va = load <vscale x 1 x i16 >, ptr %pa
@@ -184,7 +184,7 @@ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
184
184
; RV32-NEXT: liveins: $x10
185
185
; RV32-NEXT: {{ $}}
186
186
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
187
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4 )
187
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
188
188
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
189
189
; RV32-NEXT: PseudoRET implicit $v8
190
190
;
@@ -193,7 +193,7 @@ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
193
193
; RV64-NEXT: liveins: $x10
194
194
; RV64-NEXT: {{ $}}
195
195
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
196
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4 )
196
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
197
197
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
198
198
; RV64-NEXT: PseudoRET implicit $v8
199
199
%va = load <vscale x 2 x i16 >, ptr %pa
@@ -206,7 +206,7 @@ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
206
206
; RV32-NEXT: liveins: $x10
207
207
; RV32-NEXT: {{ $}}
208
208
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
209
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8 )
209
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
210
210
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
211
211
; RV32-NEXT: PseudoRET implicit $v8
212
212
;
@@ -215,7 +215,7 @@ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
215
215
; RV64-NEXT: liveins: $x10
216
216
; RV64-NEXT: {{ $}}
217
217
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
218
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8 )
218
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
219
219
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
220
220
; RV64-NEXT: PseudoRET implicit $v8
221
221
%va = load <vscale x 4 x i16 >, ptr %pa
@@ -228,7 +228,7 @@ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
228
228
; RV32-NEXT: liveins: $x10
229
229
; RV32-NEXT: {{ $}}
230
230
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
231
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16 )
231
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
232
232
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
233
233
; RV32-NEXT: PseudoRET implicit $v8m2
234
234
;
@@ -237,7 +237,7 @@ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
237
237
; RV64-NEXT: liveins: $x10
238
238
; RV64-NEXT: {{ $}}
239
239
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
240
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16 )
240
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
241
241
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
242
242
; RV64-NEXT: PseudoRET implicit $v8m2
243
243
%va = load <vscale x 8 x i16 >, ptr %pa
@@ -250,7 +250,7 @@ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
250
250
; RV32-NEXT: liveins: $x10
251
251
; RV32-NEXT: {{ $}}
252
252
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
253
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32 )
253
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
254
254
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
255
255
; RV32-NEXT: PseudoRET implicit $v8m4
256
256
;
@@ -259,7 +259,7 @@ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
259
259
; RV64-NEXT: liveins: $x10
260
260
; RV64-NEXT: {{ $}}
261
261
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
262
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32 )
262
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
263
263
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
264
264
; RV64-NEXT: PseudoRET implicit $v8m4
265
265
%va = load <vscale x 16 x i16 >, ptr %pa
@@ -272,7 +272,7 @@ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
272
272
; RV32-NEXT: liveins: $x10
273
273
; RV32-NEXT: {{ $}}
274
274
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
275
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64 )
275
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
276
276
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
277
277
; RV32-NEXT: PseudoRET implicit $v8m8
278
278
;
@@ -281,7 +281,7 @@ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
281
281
; RV64-NEXT: liveins: $x10
282
282
; RV64-NEXT: {{ $}}
283
283
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
284
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64 )
284
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
285
285
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
286
286
; RV64-NEXT: PseudoRET implicit $v8m8
287
287
%va = load <vscale x 32 x i16 >, ptr %pa
@@ -294,7 +294,7 @@ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
294
294
; RV32-NEXT: liveins: $x10
295
295
; RV32-NEXT: {{ $}}
296
296
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
297
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4 )
297
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
298
298
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
299
299
; RV32-NEXT: PseudoRET implicit $v8
300
300
;
@@ -303,7 +303,7 @@ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
303
303
; RV64-NEXT: liveins: $x10
304
304
; RV64-NEXT: {{ $}}
305
305
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
306
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4 )
306
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
307
307
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
308
308
; RV64-NEXT: PseudoRET implicit $v8
309
309
%va = load <vscale x 1 x i32 >, ptr %pa
@@ -316,7 +316,7 @@ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
316
316
; RV32-NEXT: liveins: $x10
317
317
; RV32-NEXT: {{ $}}
318
318
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
319
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8 )
319
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
320
320
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
321
321
; RV32-NEXT: PseudoRET implicit $v8
322
322
;
@@ -325,7 +325,7 @@ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
325
325
; RV64-NEXT: liveins: $x10
326
326
; RV64-NEXT: {{ $}}
327
327
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
328
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8 )
328
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
329
329
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
330
330
; RV64-NEXT: PseudoRET implicit $v8
331
331
%va = load <vscale x 2 x i32 >, ptr %pa
@@ -338,7 +338,7 @@ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
338
338
; RV32-NEXT: liveins: $x10
339
339
; RV32-NEXT: {{ $}}
340
340
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
341
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16 )
341
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
342
342
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
343
343
; RV32-NEXT: PseudoRET implicit $v8m2
344
344
;
@@ -347,7 +347,7 @@ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
347
347
; RV64-NEXT: liveins: $x10
348
348
; RV64-NEXT: {{ $}}
349
349
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
350
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16 )
350
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
351
351
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
352
352
; RV64-NEXT: PseudoRET implicit $v8m2
353
353
%va = load <vscale x 4 x i32 >, ptr %pa
@@ -360,7 +360,7 @@ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
360
360
; RV32-NEXT: liveins: $x10
361
361
; RV32-NEXT: {{ $}}
362
362
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
363
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32 )
363
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
364
364
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
365
365
; RV32-NEXT: PseudoRET implicit $v8m4
366
366
;
@@ -369,7 +369,7 @@ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
369
369
; RV64-NEXT: liveins: $x10
370
370
; RV64-NEXT: {{ $}}
371
371
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
372
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32 )
372
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
373
373
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
374
374
; RV64-NEXT: PseudoRET implicit $v8m4
375
375
%va = load <vscale x 8 x i32 >, ptr %pa
@@ -382,7 +382,7 @@ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
382
382
; RV32-NEXT: liveins: $x10
383
383
; RV32-NEXT: {{ $}}
384
384
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
385
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64 )
385
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
386
386
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
387
387
; RV32-NEXT: PseudoRET implicit $v8m8
388
388
;
@@ -391,7 +391,7 @@ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
391
391
; RV64-NEXT: liveins: $x10
392
392
; RV64-NEXT: {{ $}}
393
393
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
394
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64 )
394
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
395
395
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
396
396
; RV64-NEXT: PseudoRET implicit $v8m8
397
397
%va = load <vscale x 16 x i32 >, ptr %pa
@@ -404,7 +404,7 @@ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
404
404
; RV32-NEXT: liveins: $x10
405
405
; RV32-NEXT: {{ $}}
406
406
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
407
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8 )
407
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
408
408
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
409
409
; RV32-NEXT: PseudoRET implicit $v8
410
410
;
@@ -413,7 +413,7 @@ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
413
413
; RV64-NEXT: liveins: $x10
414
414
; RV64-NEXT: {{ $}}
415
415
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
416
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8 )
416
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
417
417
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
418
418
; RV64-NEXT: PseudoRET implicit $v8
419
419
%va = load <vscale x 1 x i64 >, ptr %pa
@@ -426,7 +426,7 @@ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
426
426
; RV32-NEXT: liveins: $x10
427
427
; RV32-NEXT: {{ $}}
428
428
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
429
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16 )
429
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
430
430
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
431
431
; RV32-NEXT: PseudoRET implicit $v8m2
432
432
;
@@ -435,7 +435,7 @@ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
435
435
; RV64-NEXT: liveins: $x10
436
436
; RV64-NEXT: {{ $}}
437
437
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
438
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16 )
438
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
439
439
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
440
440
; RV64-NEXT: PseudoRET implicit $v8m2
441
441
%va = load <vscale x 2 x i64 >, ptr %pa
@@ -448,7 +448,7 @@ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
448
448
; RV32-NEXT: liveins: $x10
449
449
; RV32-NEXT: {{ $}}
450
450
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
451
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32 )
451
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
452
452
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
453
453
; RV32-NEXT: PseudoRET implicit $v8m4
454
454
;
@@ -457,7 +457,7 @@ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
457
457
; RV64-NEXT: liveins: $x10
458
458
; RV64-NEXT: {{ $}}
459
459
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
460
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32 )
460
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
461
461
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
462
462
; RV64-NEXT: PseudoRET implicit $v8m4
463
463
%va = load <vscale x 4 x i64 >, ptr %pa
@@ -470,7 +470,7 @@ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
470
470
; RV32-NEXT: liveins: $x10
471
471
; RV32-NEXT: {{ $}}
472
472
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
473
- ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64 )
473
+ ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
474
474
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
475
475
; RV32-NEXT: PseudoRET implicit $v8m8
476
476
;
@@ -479,7 +479,7 @@ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
479
479
; RV64-NEXT: liveins: $x10
480
480
; RV64-NEXT: {{ $}}
481
481
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
482
- ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64 )
482
+ ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
483
483
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
484
484
; RV64-NEXT: PseudoRET implicit $v8m8
485
485
%va = load <vscale x 8 x i64 >, ptr %pa
0 commit comments