Skip to content

Commit 4fa1f36

Browse files
committed
update test cases
1 parent 56c8328 commit 4fa1f36

File tree

1 file changed

+30
-30
lines changed
  • llvm/test/CodeGen/RISCV/GlobalISel/irtranslator

1 file changed

+30
-30
lines changed

llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ld.ll

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
162162
; RV32-NEXT: liveins: $x10
163163
; RV32-NEXT: {{ $}}
164164
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
165-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2)
165+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
166166
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
167167
; RV32-NEXT: PseudoRET implicit $v8
168168
;
@@ -171,7 +171,7 @@ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) {
171171
; RV64-NEXT: liveins: $x10
172172
; RV64-NEXT: {{ $}}
173173
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
174-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa, align 2)
174+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
175175
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
176176
; RV64-NEXT: PseudoRET implicit $v8
177177
%va = load <vscale x 1 x i16>, ptr %pa
@@ -184,7 +184,7 @@ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
184184
; RV32-NEXT: liveins: $x10
185185
; RV32-NEXT: {{ $}}
186186
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
187-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4)
187+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
188188
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
189189
; RV32-NEXT: PseudoRET implicit $v8
190190
;
@@ -193,7 +193,7 @@ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) {
193193
; RV64-NEXT: liveins: $x10
194194
; RV64-NEXT: {{ $}}
195195
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
196-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa, align 4)
196+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
197197
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
198198
; RV64-NEXT: PseudoRET implicit $v8
199199
%va = load <vscale x 2 x i16>, ptr %pa
@@ -206,7 +206,7 @@ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
206206
; RV32-NEXT: liveins: $x10
207207
; RV32-NEXT: {{ $}}
208208
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
209-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8)
209+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
210210
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
211211
; RV32-NEXT: PseudoRET implicit $v8
212212
;
@@ -215,7 +215,7 @@ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) {
215215
; RV64-NEXT: liveins: $x10
216216
; RV64-NEXT: {{ $}}
217217
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
218-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 8)
218+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
219219
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
220220
; RV64-NEXT: PseudoRET implicit $v8
221221
%va = load <vscale x 4 x i16>, ptr %pa
@@ -228,7 +228,7 @@ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
228228
; RV32-NEXT: liveins: $x10
229229
; RV32-NEXT: {{ $}}
230230
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
231-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16)
231+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
232232
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
233233
; RV32-NEXT: PseudoRET implicit $v8m2
234234
;
@@ -237,7 +237,7 @@ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) {
237237
; RV64-NEXT: liveins: $x10
238238
; RV64-NEXT: {{ $}}
239239
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
240-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa, align 16)
240+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
241241
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
242242
; RV64-NEXT: PseudoRET implicit $v8m2
243243
%va = load <vscale x 8 x i16>, ptr %pa
@@ -250,7 +250,7 @@ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
250250
; RV32-NEXT: liveins: $x10
251251
; RV32-NEXT: {{ $}}
252252
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
253-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32)
253+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
254254
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
255255
; RV32-NEXT: PseudoRET implicit $v8m4
256256
;
@@ -259,7 +259,7 @@ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) {
259259
; RV64-NEXT: liveins: $x10
260260
; RV64-NEXT: {{ $}}
261261
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
262-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa, align 32)
262+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
263263
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
264264
; RV64-NEXT: PseudoRET implicit $v8m4
265265
%va = load <vscale x 16 x i16>, ptr %pa
@@ -272,7 +272,7 @@ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
272272
; RV32-NEXT: liveins: $x10
273273
; RV32-NEXT: {{ $}}
274274
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
275-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64)
275+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
276276
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
277277
; RV32-NEXT: PseudoRET implicit $v8m8
278278
;
@@ -281,7 +281,7 @@ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) {
281281
; RV64-NEXT: liveins: $x10
282282
; RV64-NEXT: {{ $}}
283283
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
284-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa, align 64)
284+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
285285
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
286286
; RV64-NEXT: PseudoRET implicit $v8m8
287287
%va = load <vscale x 32 x i16>, ptr %pa
@@ -294,7 +294,7 @@ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
294294
; RV32-NEXT: liveins: $x10
295295
; RV32-NEXT: {{ $}}
296296
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
297-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4)
297+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
298298
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
299299
; RV32-NEXT: PseudoRET implicit $v8
300300
;
@@ -303,7 +303,7 @@ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) {
303303
; RV64-NEXT: liveins: $x10
304304
; RV64-NEXT: {{ $}}
305305
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
306-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa, align 4)
306+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
307307
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
308308
; RV64-NEXT: PseudoRET implicit $v8
309309
%va = load <vscale x 1 x i32>, ptr %pa
@@ -316,7 +316,7 @@ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
316316
; RV32-NEXT: liveins: $x10
317317
; RV32-NEXT: {{ $}}
318318
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
319-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8)
319+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
320320
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
321321
; RV32-NEXT: PseudoRET implicit $v8
322322
;
@@ -325,7 +325,7 @@ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) {
325325
; RV64-NEXT: liveins: $x10
326326
; RV64-NEXT: {{ $}}
327327
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
328-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 8)
328+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
329329
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
330330
; RV64-NEXT: PseudoRET implicit $v8
331331
%va = load <vscale x 2 x i32>, ptr %pa
@@ -338,7 +338,7 @@ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
338338
; RV32-NEXT: liveins: $x10
339339
; RV32-NEXT: {{ $}}
340340
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
341-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16)
341+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
342342
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
343343
; RV32-NEXT: PseudoRET implicit $v8m2
344344
;
@@ -347,7 +347,7 @@ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) {
347347
; RV64-NEXT: liveins: $x10
348348
; RV64-NEXT: {{ $}}
349349
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
350-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa, align 16)
350+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
351351
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
352352
; RV64-NEXT: PseudoRET implicit $v8m2
353353
%va = load <vscale x 4 x i32>, ptr %pa
@@ -360,7 +360,7 @@ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
360360
; RV32-NEXT: liveins: $x10
361361
; RV32-NEXT: {{ $}}
362362
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
363-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32)
363+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
364364
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
365365
; RV32-NEXT: PseudoRET implicit $v8m4
366366
;
@@ -369,7 +369,7 @@ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) {
369369
; RV64-NEXT: liveins: $x10
370370
; RV64-NEXT: {{ $}}
371371
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
372-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa, align 32)
372+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
373373
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
374374
; RV64-NEXT: PseudoRET implicit $v8m4
375375
%va = load <vscale x 8 x i32>, ptr %pa
@@ -382,7 +382,7 @@ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
382382
; RV32-NEXT: liveins: $x10
383383
; RV32-NEXT: {{ $}}
384384
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
385-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64)
385+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
386386
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
387387
; RV32-NEXT: PseudoRET implicit $v8m8
388388
;
@@ -391,7 +391,7 @@ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) {
391391
; RV64-NEXT: liveins: $x10
392392
; RV64-NEXT: {{ $}}
393393
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
394-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa, align 64)
394+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
395395
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
396396
; RV64-NEXT: PseudoRET implicit $v8m8
397397
%va = load <vscale x 16 x i32>, ptr %pa
@@ -404,7 +404,7 @@ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
404404
; RV32-NEXT: liveins: $x10
405405
; RV32-NEXT: {{ $}}
406406
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
407-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8)
407+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
408408
; RV32-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
409409
; RV32-NEXT: PseudoRET implicit $v8
410410
;
@@ -413,7 +413,7 @@ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) {
413413
; RV64-NEXT: liveins: $x10
414414
; RV64-NEXT: {{ $}}
415415
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
416-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa, align 8)
416+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
417417
; RV64-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
418418
; RV64-NEXT: PseudoRET implicit $v8
419419
%va = load <vscale x 1 x i64>, ptr %pa
@@ -426,7 +426,7 @@ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
426426
; RV32-NEXT: liveins: $x10
427427
; RV32-NEXT: {{ $}}
428428
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
429-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16)
429+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
430430
; RV32-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
431431
; RV32-NEXT: PseudoRET implicit $v8m2
432432
;
@@ -435,7 +435,7 @@ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) {
435435
; RV64-NEXT: liveins: $x10
436436
; RV64-NEXT: {{ $}}
437437
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
438-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 16)
438+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
439439
; RV64-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
440440
; RV64-NEXT: PseudoRET implicit $v8m2
441441
%va = load <vscale x 2 x i64>, ptr %pa
@@ -448,7 +448,7 @@ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
448448
; RV32-NEXT: liveins: $x10
449449
; RV32-NEXT: {{ $}}
450450
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
451-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32)
451+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
452452
; RV32-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
453453
; RV32-NEXT: PseudoRET implicit $v8m4
454454
;
@@ -457,7 +457,7 @@ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) {
457457
; RV64-NEXT: liveins: $x10
458458
; RV64-NEXT: {{ $}}
459459
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
460-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa, align 32)
460+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
461461
; RV64-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
462462
; RV64-NEXT: PseudoRET implicit $v8m4
463463
%va = load <vscale x 4 x i64>, ptr %pa
@@ -470,7 +470,7 @@ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
470470
; RV32-NEXT: liveins: $x10
471471
; RV32-NEXT: {{ $}}
472472
; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
473-
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64)
473+
; RV32-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
474474
; RV32-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
475475
; RV32-NEXT: PseudoRET implicit $v8m8
476476
;
@@ -479,7 +479,7 @@ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) {
479479
; RV64-NEXT: liveins: $x10
480480
; RV64-NEXT: {{ $}}
481481
; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
482-
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa, align 64)
482+
; RV64-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
483483
; RV64-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
484484
; RV64-NEXT: PseudoRET implicit $v8m8
485485
%va = load <vscale x 8 x i64>, ptr %pa

0 commit comments

Comments
 (0)