Skip to content

Commit 022fe3b

Browse files
committed
RISCV: Implement isLoadFromStackSlot/isStoreToStackSlot for rvv
This partially helps avoid regressions in a future regalloc patch. It isn't sufficient, and I think there are more missing implementations of the copy and spill hooks.
1 parent e8dba3a commit 022fe3b

File tree

4 files changed

+41
-29
lines changed

4 files changed

+41
-29
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,12 @@ Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
118118
case RISCV::FLD:
119119
MemBytes = 8;
120120
break;
121+
case RISCV::VL8RE8_V:
122+
if (!MI.getOperand(1).isFI())
123+
return Register();
124+
FrameIndex = MI.getOperand(1).getIndex();
125+
MemBytes = ~0u;
126+
return MI.getOperand(0).getReg();
121127
}
122128

123129
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
@@ -158,6 +164,12 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
158164
case RISCV::FSD:
159165
MemBytes = 8;
160166
break;
167+
case RISCV::VS8R_V:
168+
if (!MI.getOperand(1).isFI())
169+
return Register();
170+
FrameIndex = MI.getOperand(1).getIndex();
171+
MemBytes = ~0u;
172+
return MI.getOperand(0).getReg();
161173
}
162174

163175
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&

llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2339,14 +2339,14 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
23392339
; RV32-NEXT: add a4, sp, a4
23402340
; RV32-NEXT: addi a4, a4, 16
23412341
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
2342-
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
2342+
; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t
23432343
; RV32-NEXT: csrr a4, vlenb
23442344
; RV32-NEXT: li a5, 48
23452345
; RV32-NEXT: mul a4, a4, a5
23462346
; RV32-NEXT: add a4, sp, a4
23472347
; RV32-NEXT: addi a4, a4, 16
2348-
; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
2349-
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
2348+
; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
2349+
; RV32-NEXT: vand.vv v16, v8, v16, v0.t
23502350
; RV32-NEXT: csrr a4, vlenb
23512351
; RV32-NEXT: li a5, 24
23522352
; RV32-NEXT: mul a4, a4, a5

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1002,24 +1002,24 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
10021002
; RV64-NEXT: addi a1, a1, 16
10031003
; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
10041004
; RV64-NEXT: csrr a1, vlenb
1005-
; RV64-NEXT: li a2, 68
1006-
; RV64-NEXT: mul a1, a1, a2
1005+
; RV64-NEXT: slli a1, a1, 5
10071006
; RV64-NEXT: add a1, sp, a1
10081007
; RV64-NEXT: addi a1, a1, 16
1009-
; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
1008+
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
10101009
; RV64-NEXT: csrr a1, vlenb
1011-
; RV64-NEXT: slli a1, a1, 5
1010+
; RV64-NEXT: li a2, 68
1011+
; RV64-NEXT: mul a1, a1, a2
10121012
; RV64-NEXT: add a1, sp, a1
10131013
; RV64-NEXT: addi a1, a1, 16
1014-
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
1014+
; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
10151015
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
1016-
; RV64-NEXT: vmv.v.v v16, v8
1016+
; RV64-NEXT: vmv.v.v v12, v8
10171017
; RV64-NEXT: csrr a1, vlenb
10181018
; RV64-NEXT: li a2, 68
10191019
; RV64-NEXT: mul a1, a1, a2
10201020
; RV64-NEXT: add a1, sp, a1
10211021
; RV64-NEXT: addi a1, a1, 16
1022-
; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
1022+
; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
10231023
; RV64-NEXT: csrr a1, vlenb
10241024
; RV64-NEXT: li a2, 76
10251025
; RV64-NEXT: mul a1, a1, a2

llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -887,12 +887,12 @@ define <vscale x 32 x bfloat> @vfma_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfl
887887
; CHECK-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
888888
; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
889889
; CHECK-NEXT: vsetvli a4, zero, e16, m8, ta, ma
890-
; CHECK-NEXT: vmv.v.x v24, a2
890+
; CHECK-NEXT: vmv.v.x v8, a2
891891
; CHECK-NEXT: csrr a2, vlenb
892892
; CHECK-NEXT: slli a2, a2, 5
893893
; CHECK-NEXT: add a2, sp, a2
894894
; CHECK-NEXT: addi a2, a2, 16
895-
; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
895+
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
896896
; CHECK-NEXT: csrr a2, vlenb
897897
; CHECK-NEXT: slli a2, a2, 5
898898
; CHECK-NEXT: add a2, sp, a2
@@ -2425,12 +2425,12 @@ define <vscale x 32 x half> @vfma_vf_nxv32f16(<vscale x 32 x half> %va, half %b,
24252425
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
24262426
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
24272427
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
2428-
; ZVFHMIN-NEXT: vmv.v.x v24, a2
2428+
; ZVFHMIN-NEXT: vmv.v.x v8, a2
24292429
; ZVFHMIN-NEXT: csrr a2, vlenb
24302430
; ZVFHMIN-NEXT: slli a2, a2, 5
24312431
; ZVFHMIN-NEXT: add a2, sp, a2
24322432
; ZVFHMIN-NEXT: addi a2, a2, 16
2433-
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
2433+
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
24342434
; ZVFHMIN-NEXT: csrr a2, vlenb
24352435
; ZVFHMIN-NEXT: slli a2, a2, 5
24362436
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -8250,13 +8250,13 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
82508250
; ZVFHMIN-NEXT: slli a2, a2, 5
82518251
; ZVFHMIN-NEXT: add a2, sp, a2
82528252
; ZVFHMIN-NEXT: addi a2, a2, 16
8253-
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
8254-
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
8253+
; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
8254+
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
82558255
; ZVFHMIN-NEXT: csrr a2, vlenb
82568256
; ZVFHMIN-NEXT: slli a2, a2, 3
82578257
; ZVFHMIN-NEXT: add a2, sp, a2
82588258
; ZVFHMIN-NEXT: addi a2, a2, 16
8259-
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
8259+
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
82608260
; ZVFHMIN-NEXT: csrr a2, vlenb
82618261
; ZVFHMIN-NEXT: slli a2, a2, 3
82628262
; ZVFHMIN-NEXT: mv a3, a2
@@ -8548,12 +8548,12 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, half %
85488548
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
85498549
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t
85508550
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
8551-
; ZVFHMIN-NEXT: vmv.v.x v24, a2
8551+
; ZVFHMIN-NEXT: vmv.v.x v16, a2
85528552
; ZVFHMIN-NEXT: csrr a2, vlenb
85538553
; ZVFHMIN-NEXT: slli a2, a2, 5
85548554
; ZVFHMIN-NEXT: add a2, sp, a2
85558555
; ZVFHMIN-NEXT: addi a2, a2, 16
8556-
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
8556+
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
85578557
; ZVFHMIN-NEXT: csrr a2, vlenb
85588558
; ZVFHMIN-NEXT: slli a2, a2, 5
85598559
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -9098,9 +9098,9 @@ define <vscale x 32 x half> @vfnmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
90989098
; ZVFHMIN-NEXT: add a2, sp, a2
90999099
; ZVFHMIN-NEXT: addi a2, a2, 16
91009100
; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
9101-
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
9101+
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
91029102
; ZVFHMIN-NEXT: addi a2, sp, 16
9103-
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
9103+
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
91049104
; ZVFHMIN-NEXT: csrr a2, vlenb
91059105
; ZVFHMIN-NEXT: slli a2, a2, 3
91069106
; ZVFHMIN-NEXT: mv a3, a2
@@ -10808,9 +10808,9 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
1080810808
; ZVFHMIN-NEXT: add a2, sp, a2
1080910809
; ZVFHMIN-NEXT: addi a2, a2, 16
1081010810
; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
10811-
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
10811+
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t
1081210812
; ZVFHMIN-NEXT: addi a2, sp, 16
10813-
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
10813+
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
1081410814
; ZVFHMIN-NEXT: csrr a2, vlenb
1081510815
; ZVFHMIN-NEXT: slli a2, a2, 3
1081610816
; ZVFHMIN-NEXT: mv a3, a2
@@ -11397,12 +11397,12 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, half
1139711397
; ZVFHMIN-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
1139811398
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t
1139911399
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11400-
; ZVFHMIN-NEXT: vmv.v.x v24, a2
11400+
; ZVFHMIN-NEXT: vmv.v.x v8, a2
1140111401
; ZVFHMIN-NEXT: csrr a2, vlenb
1140211402
; ZVFHMIN-NEXT: slli a2, a2, 5
1140311403
; ZVFHMIN-NEXT: add a2, sp, a2
1140411404
; ZVFHMIN-NEXT: addi a2, a2, 16
11405-
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
11405+
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
1140611406
; ZVFHMIN-NEXT: csrr a2, vlenb
1140711407
; ZVFHMIN-NEXT: slli a2, a2, 5
1140811408
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -11546,12 +11546,12 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_commute(<vscale x 32 x half> %v
1154611546
; ZVFHMIN-NEXT: addi a4, a4, 16
1154711547
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
1154811548
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11549-
; ZVFHMIN-NEXT: vmv.v.x v16, a2
11549+
; ZVFHMIN-NEXT: vmv.v.x v8, a2
1155011550
; ZVFHMIN-NEXT: csrr a2, vlenb
1155111551
; ZVFHMIN-NEXT: slli a2, a2, 5
1155211552
; ZVFHMIN-NEXT: add a2, sp, a2
1155311553
; ZVFHMIN-NEXT: addi a2, a2, 16
11554-
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
11554+
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
1155511555
; ZVFHMIN-NEXT: csrr a2, vlenb
1155611556
; ZVFHMIN-NEXT: slli a2, a2, 5
1155711557
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -11805,15 +11805,15 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16_unmasked_commute(<vscale x 32 x
1180511805
; ZVFHMIN-NEXT: addi a4, sp, 16
1180611806
; ZVFHMIN-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
1180711807
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma
11808-
; ZVFHMIN-NEXT: vmv.v.x v16, a2
11808+
; ZVFHMIN-NEXT: vmv.v.x v8, a2
1180911809
; ZVFHMIN-NEXT: csrr a2, vlenb
1181011810
; ZVFHMIN-NEXT: slli a2, a2, 3
1181111811
; ZVFHMIN-NEXT: mv a4, a2
1181211812
; ZVFHMIN-NEXT: slli a2, a2, 1
1181311813
; ZVFHMIN-NEXT: add a2, a2, a4
1181411814
; ZVFHMIN-NEXT: add a2, sp, a2
1181511815
; ZVFHMIN-NEXT: addi a2, a2, 16
11816-
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
11816+
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
1181711817
; ZVFHMIN-NEXT: csrr a2, vlenb
1181811818
; ZVFHMIN-NEXT: slli a2, a2, 3
1181911819
; ZVFHMIN-NEXT: mv a4, a2

0 commit comments

Comments
 (0)