@@ -887,12 +887,12 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfl
887
887
; CHECK-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
888
888
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
889
889
; CHECK-NEXT: vsetvli a4, zero, e16, m8, ta, ma
890
- ; CHECK-NEXT: vmv.v.x v24 , a2
890
+ ; CHECK-NEXT: vmv.v.x v8 , a2
891
891
; CHECK-NEXT: csrr a2, vlenb
892
892
; CHECK-NEXT: slli a2, a2, 5
893
893
; CHECK-NEXT: add a2, sp, a2
894
894
; CHECK-NEXT: addi a2, a2, 16
895
- ; CHECK-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
895
+ ; CHECK-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
896
896
; CHECK-NEXT: csrr a2, vlenb
897
897
; CHECK-NEXT: slli a2, a2, 5
898
898
; CHECK-NEXT: add a2, sp, a2
@@ -2425,12 +2425,12 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16(<vscale x 32 x half> %va, half %b,
2425
2425
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
2426
2426
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
2427
2427
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
2428
- ; ZVFHMIN-NEXT: vmv.v.x v24 , a2
2428
+ ; ZVFHMIN-NEXT: vmv.v.x v8 , a2
2429
2429
; ZVFHMIN-NEXT: csrr a2, vlenb
2430
2430
; ZVFHMIN-NEXT: slli a2, a2, 5
2431
2431
; ZVFHMIN-NEXT: add a2, sp, a2
2432
2432
; ZVFHMIN-NEXT: addi a2, a2, 16
2433
- ; ZVFHMIN-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
2433
+ ; ZVFHMIN-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
2434
2434
; ZVFHMIN-NEXT: csrr a2, vlenb
2435
2435
; ZVFHMIN-NEXT: slli a2, a2, 5
2436
2436
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -8250,13 +8250,13 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
8250
8250
; ZVFHMIN-NEXT: slli a2, a2, 5
8251
8251
; ZVFHMIN-NEXT: add a2, sp, a2
8252
8252
; ZVFHMIN-NEXT: addi a2, a2, 16
8253
- ; ZVFHMIN-NEXT: vl8r.v v16 , (a2) # Unknown-size Folded Reload
8254
- ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20 , v0.t
8253
+ ; ZVFHMIN-NEXT: vl8r.v v8 , (a2) # Unknown-size Folded Reload
8254
+ ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12 , v0.t
8255
8255
; ZVFHMIN-NEXT: csrr a2, vlenb
8256
8256
; ZVFHMIN-NEXT: slli a2, a2, 3
8257
8257
; ZVFHMIN-NEXT: add a2, sp, a2
8258
8258
; ZVFHMIN-NEXT: addi a2, a2, 16
8259
- ; ZVFHMIN-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
8259
+ ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
8260
8260
; ZVFHMIN-NEXT: csrr a2, vlenb
8261
8261
; ZVFHMIN-NEXT: slli a2, a2, 3
8262
8262
; ZVFHMIN-NEXT: mv a3, a2
@@ -8548,12 +8548,12 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %
8548
8548
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
8549
8549
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
8550
8550
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
8551
- ; ZVFHMIN-NEXT: vmv.v.x v24 , a2
8551
+ ; ZVFHMIN-NEXT: vmv.v.x v16 , a2
8552
8552
; ZVFHMIN-NEXT: csrr a2, vlenb
8553
8553
; ZVFHMIN-NEXT: slli a2, a2, 5
8554
8554
; ZVFHMIN-NEXT: add a2, sp, a2
8555
8555
; ZVFHMIN-NEXT: addi a2, a2, 16
8556
- ; ZVFHMIN-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
8556
+ ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
8557
8557
; ZVFHMIN-NEXT: csrr a2, vlenb
8558
8558
; ZVFHMIN-NEXT: slli a2, a2, 5
8559
8559
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -9098,9 +9098,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
9098
9098
; ZVFHMIN-NEXT: add a2, sp, a2
9099
9099
; ZVFHMIN-NEXT: addi a2, a2, 16
9100
9100
; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
9101
- ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24 , v12, v0.t
9101
+ ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16 , v12, v0.t
9102
9102
; ZVFHMIN-NEXT: addi a2, sp, 16
9103
- ; ZVFHMIN-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
9103
+ ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
9104
9104
; ZVFHMIN-NEXT: csrr a2, vlenb
9105
9105
; ZVFHMIN-NEXT: slli a2, a2, 3
9106
9106
; ZVFHMIN-NEXT: mv a3, a2
@@ -10808,9 +10808,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
10808
10808
; ZVFHMIN-NEXT: add a2, sp, a2
10809
10809
; ZVFHMIN-NEXT: addi a2, a2, 16
10810
10810
; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
10811
- ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24 , v12, v0.t
10811
+ ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16 , v12, v0.t
10812
10812
; ZVFHMIN-NEXT: addi a2, sp, 16
10813
- ; ZVFHMIN-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
10813
+ ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
10814
10814
; ZVFHMIN-NEXT: csrr a2, vlenb
10815
10815
; ZVFHMIN-NEXT: slli a2, a2, 3
10816
10816
; ZVFHMIN-NEXT: mv a3, a2
@@ -11397,12 +11397,12 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
11397
11397
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
11398
11398
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
11399
11399
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11400
- ; ZVFHMIN-NEXT: vmv.v.x v24 , a2
11400
+ ; ZVFHMIN-NEXT: vmv.v.x v8 , a2
11401
11401
; ZVFHMIN-NEXT: csrr a2, vlenb
11402
11402
; ZVFHMIN-NEXT: slli a2, a2, 5
11403
11403
; ZVFHMIN-NEXT: add a2, sp, a2
11404
11404
; ZVFHMIN-NEXT: addi a2, a2, 16
11405
- ; ZVFHMIN-NEXT: vs8r.v v24 , (a2) # Unknown-size Folded Spill
11405
+ ; ZVFHMIN-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
11406
11406
; ZVFHMIN-NEXT: csrr a2, vlenb
11407
11407
; ZVFHMIN-NEXT: slli a2, a2, 5
11408
11408
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -11546,12 +11546,12 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
11546
11546
; ZVFHMIN-NEXT: addi a4, a4, 16
11547
11547
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
11548
11548
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11549
- ; ZVFHMIN-NEXT: vmv.v.x v16 , a2
11549
+ ; ZVFHMIN-NEXT: vmv.v.x v8 , a2
11550
11550
; ZVFHMIN-NEXT: csrr a2, vlenb
11551
11551
; ZVFHMIN-NEXT: slli a2, a2, 5
11552
11552
; ZVFHMIN-NEXT: add a2, sp, a2
11553
11553
; ZVFHMIN-NEXT: addi a2, a2, 16
11554
- ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
11554
+ ; ZVFHMIN-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
11555
11555
; ZVFHMIN-NEXT: csrr a2, vlenb
11556
11556
; ZVFHMIN-NEXT: slli a2, a2, 5
11557
11557
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -11805,15 +11805,15 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
11805
11805
; ZVFHMIN-NEXT: addi a4, sp, 16
11806
11806
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
11807
11807
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11808
- ; ZVFHMIN-NEXT: vmv.v.x v16 , a2
11808
+ ; ZVFHMIN-NEXT: vmv.v.x v8 , a2
11809
11809
; ZVFHMIN-NEXT: csrr a2, vlenb
11810
11810
; ZVFHMIN-NEXT: slli a2, a2, 3
11811
11811
; ZVFHMIN-NEXT: mv a4, a2
11812
11812
; ZVFHMIN-NEXT: slli a2, a2, 1
11813
11813
; ZVFHMIN-NEXT: add a2, a2, a4
11814
11814
; ZVFHMIN-NEXT: add a2, sp, a2
11815
11815
; ZVFHMIN-NEXT: addi a2, a2, 16
11816
- ; ZVFHMIN-NEXT: vs8r.v v16 , (a2) # Unknown-size Folded Spill
11816
+ ; ZVFHMIN-NEXT: vs8r.v v8 , (a2) # Unknown-size Folded Spill
11817
11817
; ZVFHMIN-NEXT: csrr a2, vlenb
11818
11818
; ZVFHMIN-NEXT: slli a2, a2, 3
11819
11819
; ZVFHMIN-NEXT: mv a4, a2
0 commit comments