@@ -310,23 +310,24 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
310
310
; CHECK-NEXT: add a5, sp, a5
311
311
; CHECK-NEXT: addi a5, a5, 16
312
312
; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
313
+ ; CHECK-NEXT: mv a6, a7
313
314
; CHECK-NEXT: bltu a7, a3, .LBB16_4
314
315
; CHECK-NEXT: # %bb.3:
315
- ; CHECK-NEXT: li a7 , 64
316
+ ; CHECK-NEXT: li a6 , 64
316
317
; CHECK-NEXT: .LBB16_4:
317
318
; CHECK-NEXT: addi a5, a1, 384
318
319
; CHECK-NEXT: li a3, 32
319
320
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
320
321
; CHECK-NEXT: vle64.v v8, (a1)
321
- ; CHECK-NEXT: csrr a6 , vlenb
322
- ; CHECK-NEXT: slli a6, a6 , 3
323
- ; CHECK-NEXT: add a6 , sp, a6
324
- ; CHECK-NEXT: addi a6, a6 , 16
325
- ; CHECK-NEXT: vs8r.v v8, (a6 ) # Unknown-size Folded Spill
326
- ; CHECK-NEXT: addi a6, a7 , -32
327
- ; CHECK-NEXT: sltu t0, a7, a6
328
- ; CHECK-NEXT: addi t0, t0 , -1
329
- ; CHECK-NEXT: and a6, t0, a6
322
+ ; CHECK-NEXT: csrr t0 , vlenb
323
+ ; CHECK-NEXT: slli t0, t0 , 3
324
+ ; CHECK-NEXT: add t0 , sp, t0
325
+ ; CHECK-NEXT: addi t0, t0 , 16
326
+ ; CHECK-NEXT: vs8r.v v8, (t0 ) # Unknown-size Folded Spill
327
+ ; CHECK-NEXT: addi t0, a6 , -32
328
+ ; CHECK-NEXT: sltu a6, a6, t0
329
+ ; CHECK-NEXT: addi a6, a6 , -1
330
+ ; CHECK-NEXT: and a6, a6, t0
330
331
; CHECK-NEXT: addi t0, a6, -16
331
332
; CHECK-NEXT: sltu t1, a6, t0
332
333
; CHECK-NEXT: addi t1, t1, -1
@@ -364,14 +365,15 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
364
365
; CHECK-NEXT: add a5, sp, a5
365
366
; CHECK-NEXT: addi a5, a5, 16
366
367
; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
368
+ ; CHECK-NEXT: mv a5, a4
367
369
; CHECK-NEXT: bltu a4, a3, .LBB16_8
368
370
; CHECK-NEXT: # %bb.7:
369
- ; CHECK-NEXT: li a4 , 32
371
+ ; CHECK-NEXT: li a5 , 32
370
372
; CHECK-NEXT: .LBB16_8:
371
373
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
372
374
; CHECK-NEXT: vle64.v v24, (a1)
373
- ; CHECK-NEXT: addi a1, a4 , -16
374
- ; CHECK-NEXT: sltu a5, a4 , a1
375
+ ; CHECK-NEXT: addi a1, a5 , -16
376
+ ; CHECK-NEXT: sltu a5, a5 , a1
375
377
; CHECK-NEXT: addi a5, a5, -1
376
378
; CHECK-NEXT: and a1, a5, a1
377
379
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
@@ -387,62 +389,63 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
387
389
; CHECK-NEXT: vmv1r.v v0, v5
388
390
; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
389
391
; CHECK-NEXT: vmv.v.v v0, v8
392
+ ; CHECK-NEXT: mv a1, a7
390
393
; CHECK-NEXT: bltu a7, a3, .LBB16_12
391
394
; CHECK-NEXT: # %bb.11:
392
- ; CHECK-NEXT: li a7 , 32
395
+ ; CHECK-NEXT: li a1 , 32
393
396
; CHECK-NEXT: .LBB16_12:
394
397
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
395
- ; CHECK-NEXT: csrr a1 , vlenb
396
- ; CHECK-NEXT: li a4 , 24
397
- ; CHECK-NEXT: mul a1, a1, a4
398
- ; CHECK-NEXT: add a1 , sp, a1
399
- ; CHECK-NEXT: addi a1, a1 , 16
400
- ; CHECK-NEXT: vl8r.v v8, (a1 ) # Unknown-size Folded Reload
398
+ ; CHECK-NEXT: csrr a4 , vlenb
399
+ ; CHECK-NEXT: li a5 , 24
400
+ ; CHECK-NEXT: mul a4, a4, a5
401
+ ; CHECK-NEXT: add a4 , sp, a4
402
+ ; CHECK-NEXT: addi a4, a4 , 16
403
+ ; CHECK-NEXT: vl8r.v v8, (a4 ) # Unknown-size Folded Reload
401
404
; CHECK-NEXT: vmv4r.v v24, v8
402
- ; CHECK-NEXT: csrr a1 , vlenb
403
- ; CHECK-NEXT: li a4 , 56
404
- ; CHECK-NEXT: mul a1, a1, a4
405
- ; CHECK-NEXT: add a1 , sp, a1
406
- ; CHECK-NEXT: addi a1, a1 , 16
407
- ; CHECK-NEXT: vl8r.v v8, (a1 ) # Unknown-size Folded Reload
405
+ ; CHECK-NEXT: csrr a4 , vlenb
406
+ ; CHECK-NEXT: li a5 , 56
407
+ ; CHECK-NEXT: mul a4, a4, a5
408
+ ; CHECK-NEXT: add a4 , sp, a4
409
+ ; CHECK-NEXT: addi a4, a4 , 16
410
+ ; CHECK-NEXT: vl8r.v v8, (a4 ) # Unknown-size Folded Reload
408
411
; CHECK-NEXT: vslideup.vi v8, v24, 16
409
- ; CHECK-NEXT: csrr a1 , vlenb
410
- ; CHECK-NEXT: li a4 , 56
411
- ; CHECK-NEXT: mul a1, a1, a4
412
- ; CHECK-NEXT: add a1 , sp, a1
413
- ; CHECK-NEXT: addi a1, a1 , 16
414
- ; CHECK-NEXT: vs8r.v v8, (a1 ) # Unknown-size Folded Spill
415
- ; CHECK-NEXT: csrr a1 , vlenb
416
- ; CHECK-NEXT: slli a1, a1 , 4
417
- ; CHECK-NEXT: add a1 , sp, a1
418
- ; CHECK-NEXT: addi a1, a1 , 16
419
- ; CHECK-NEXT: vl8r.v v8, (a1 ) # Unknown-size Folded Reload
412
+ ; CHECK-NEXT: csrr a4 , vlenb
413
+ ; CHECK-NEXT: li a5 , 56
414
+ ; CHECK-NEXT: mul a4, a4, a5
415
+ ; CHECK-NEXT: add a4 , sp, a4
416
+ ; CHECK-NEXT: addi a4, a4 , 16
417
+ ; CHECK-NEXT: vs8r.v v8, (a4 ) # Unknown-size Folded Spill
418
+ ; CHECK-NEXT: csrr a4 , vlenb
419
+ ; CHECK-NEXT: slli a4, a4 , 4
420
+ ; CHECK-NEXT: add a4 , sp, a4
421
+ ; CHECK-NEXT: addi a4, a4 , 16
422
+ ; CHECK-NEXT: vl8r.v v8, (a4 ) # Unknown-size Folded Reload
420
423
; CHECK-NEXT: vmv4r.v v24, v8
421
- ; CHECK-NEXT: csrr a1 , vlenb
422
- ; CHECK-NEXT: li a4 , 48
423
- ; CHECK-NEXT: mul a1, a1, a4
424
- ; CHECK-NEXT: add a1 , sp, a1
425
- ; CHECK-NEXT: addi a1, a1 , 16
426
- ; CHECK-NEXT: vl8r.v v8, (a1 ) # Unknown-size Folded Reload
424
+ ; CHECK-NEXT: csrr a4 , vlenb
425
+ ; CHECK-NEXT: li a5 , 48
426
+ ; CHECK-NEXT: mul a4, a4, a5
427
+ ; CHECK-NEXT: add a4 , sp, a4
428
+ ; CHECK-NEXT: addi a4, a4 , 16
429
+ ; CHECK-NEXT: vl8r.v v8, (a4 ) # Unknown-size Folded Reload
427
430
; CHECK-NEXT: vslideup.vi v8, v24, 16
428
- ; CHECK-NEXT: csrr a1 , vlenb
429
- ; CHECK-NEXT: li a4 , 48
430
- ; CHECK-NEXT: mul a1, a1, a4
431
- ; CHECK-NEXT: add a1 , sp, a1
432
- ; CHECK-NEXT: addi a1, a1 , 16
433
- ; CHECK-NEXT: vs8r.v v8, (a1 ) # Unknown-size Folded Spill
431
+ ; CHECK-NEXT: csrr a4 , vlenb
432
+ ; CHECK-NEXT: li a5 , 48
433
+ ; CHECK-NEXT: mul a4, a4, a5
434
+ ; CHECK-NEXT: add a4 , sp, a4
435
+ ; CHECK-NEXT: addi a4, a4 , 16
436
+ ; CHECK-NEXT: vs8r.v v8, (a4 ) # Unknown-size Folded Spill
434
437
; CHECK-NEXT: vmv4r.v v8, v0
435
438
; CHECK-NEXT: vslideup.vi v8, v16, 16
436
- ; CHECK-NEXT: csrr a1 , vlenb
437
- ; CHECK-NEXT: li a4 , 24
438
- ; CHECK-NEXT: mul a1, a1, a4
439
- ; CHECK-NEXT: add a1 , sp, a1
440
- ; CHECK-NEXT: addi a1, a1 , 16
441
- ; CHECK-NEXT: vs8r.v v8, (a1 ) # Unknown-size Folded Spill
442
- ; CHECK-NEXT: addi a1, a7 , -16
443
- ; CHECK-NEXT: sltu a4, a7, a1
444
- ; CHECK-NEXT: addi a4, a4 , -1
445
- ; CHECK-NEXT: and a1, a4, a1
439
+ ; CHECK-NEXT: csrr a4 , vlenb
440
+ ; CHECK-NEXT: li a5 , 24
441
+ ; CHECK-NEXT: mul a4, a4, a5
442
+ ; CHECK-NEXT: add a4 , sp, a4
443
+ ; CHECK-NEXT: addi a4, a4 , 16
444
+ ; CHECK-NEXT: vs8r.v v8, (a4 ) # Unknown-size Folded Spill
445
+ ; CHECK-NEXT: addi a4, a1 , -16
446
+ ; CHECK-NEXT: sltu a1, a1, a4
447
+ ; CHECK-NEXT: addi a1, a1 , -1
448
+ ; CHECK-NEXT: and a1, a1, a4
446
449
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
447
450
; CHECK-NEXT: vmv1r.v v0, v6
448
451
; CHECK-NEXT: csrr a1, vlenb
0 commit comments