Skip to content

Commit 76d252b

Browse files
committed
[RISCV] Rematerialize vmv.v.i
This continues the line of work started in llvm#97520, and gives a 2.5% reduction in the number of spills on SPEC CPU 2017. Program regalloc.NumSpills regalloc.NumReloads regalloc.NumReMaterialization lhs rhs diff lhs rhs diff lhs rhs diff 605.mcf_s 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0% 505.mcf_r 141.00 141.00 0.0% 372.00 372.00 0.0% 123.00 123.00 0.0% 519.lbm_r 73.00 73.00 0.0% 75.00 75.00 0.0% 18.00 18.00 0.0% 619.lbm_s 68.00 68.00 0.0% 70.00 70.00 0.0% 20.00 20.00 0.0% 631.deepsjeng_s 354.00 353.00 -0.3% 683.00 682.00 -0.1% 529.00 530.00 0.2% 531.deepsjeng_r 354.00 353.00 -0.3% 683.00 682.00 -0.1% 529.00 530.00 0.2% 625.x264_s 1896.00 1886.00 -0.5% 4583.00 4561.00 -0.5% 2086.00 2108.00 1.1% 525.x264_r 1896.00 1886.00 -0.5% 4583.00 4561.00 -0.5% 2086.00 2108.00 1.1% 508.namd_r 6665.00 6598.00 -1.0% 15649.00 15509.00 -0.9% 3014.00 3164.00 5.0% 644.nab_s 761.00 753.00 -1.1% 1199.00 1183.00 -1.3% 1542.00 1559.00 1.1% 544.nab_r 761.00 753.00 -1.1% 1199.00 1183.00 -1.3% 1542.00 1559.00 1.1% 638.imagick_s 4287.00 4181.00 -2.5% 11624.00 11342.00 -2.4% 10551.00 10884.00 3.2% 538.imagick_r 4287.00 4181.00 -2.5% 11624.00 11342.00 -2.4% 10551.00 10884.00 3.2% 602.gcc_s 12771.00 12450.00 -2.5% 28117.00 27328.00 -2.8% 49757.00 50526.00 1.5% 502.gcc_r 12771.00 12450.00 -2.5% 28117.00 27328.00 -2.8% 49757.00 50526.00 1.5% Geomean difference -2.5% -2.6% 1.8% I initially held off submitting this patch because it surprisingly introduced a lot of spills in the test diffs, but after llvm#107290 the vmv.v.is that caused them are now gone. The gist is that marking vmv.v.i as spillable decreased its spill weight, which actually resulted in more m8 registers getting evicted and spilled during register allocation. The SPEC results show this isn't an issue in practice though, and I plan on posting a separate patch to explain this in more detail.
1 parent 5b23e09 commit 76d252b

File tree

4 files changed

+60
-35
lines changed

4 files changed

+60
-35
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -168,13 +168,19 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
168168

169169
bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
170170
const MachineInstr &MI) const {
171-
if (RISCV::getRVVMCOpcode(MI.getOpcode()) == RISCV::VID_V &&
172-
MI.getOperand(1).isUndef() &&
173-
/* After RISCVInsertVSETVLI most pseudos will have implicit uses on vl and
174-
vtype. Make sure we only rematerialize before RISCVInsertVSETVLI
175-
i.e. -riscv-vsetvl-after-rvv-regalloc=true */
176-
!MI.hasRegisterImplicitUseOperand(RISCV::VTYPE))
177-
return true;
171+
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
172+
case RISCV::VMV_V_I:
173+
case RISCV::VID_V:
174+
if (MI.getOperand(1).isUndef() &&
175+
/* After RISCVInsertVSETVLI most pseudos will have implicit uses on vl
176+
and vtype. Make sure we only rematerialize before RISCVInsertVSETVLI
177+
i.e. -riscv-vsetvl-after-rvv-regalloc=true */
178+
!MI.hasRegisterImplicitUseOperand(RISCV::VTYPE))
179+
return true;
180+
break;
181+
default:
182+
break;
183+
}
178184
return TargetInstrInfo::isReallyTriviallyReMaterializable(MI);
179185
}
180186

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2478,6 +2478,7 @@ multiclass VPseudoUnaryVMV_V_X_I {
24782478
def "_X_" # mx : VPseudoUnaryNoMask<m.vrclass, GPR>,
24792479
SchedUnary<"WriteVIMovX", "ReadVIMovX", mx,
24802480
forcePassthruRead=true>;
2481+
let isReMaterializable = 1 in
24812482
def "_I_" # mx : VPseudoUnaryNoMask<m.vrclass, simm5>,
24822483
SchedNullary<"WriteVIMovI", mx,
24832484
forcePassthruRead=true>;

llvm/test/CodeGen/RISCV/rvv/remat.ll

Lines changed: 45 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -111,34 +111,51 @@ define void @vid_passthru(ptr %p, <vscale x 8 x i64> %v) {
111111
}
112112

113113
define void @vmv.v.i(ptr %p) {
114-
; CHECK-LABEL: vmv.v.i:
115-
; CHECK: # %bb.0:
116-
; CHECK-NEXT: addi sp, sp, -16
117-
; CHECK-NEXT: .cfi_def_cfa_offset 16
118-
; CHECK-NEXT: csrr a1, vlenb
119-
; CHECK-NEXT: slli a1, a1, 3
120-
; CHECK-NEXT: sub sp, sp, a1
121-
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
122-
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
123-
; CHECK-NEXT: vmv.v.i v8, 1
124-
; CHECK-NEXT: vs8r.v v8, (a0)
125-
; CHECK-NEXT: vl8re64.v v16, (a0)
126-
; CHECK-NEXT: addi a1, sp, 16
127-
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
128-
; CHECK-NEXT: vl8re64.v v24, (a0)
129-
; CHECK-NEXT: vl8re64.v v0, (a0)
130-
; CHECK-NEXT: vl8re64.v v16, (a0)
131-
; CHECK-NEXT: vs8r.v v16, (a0)
132-
; CHECK-NEXT: vs8r.v v0, (a0)
133-
; CHECK-NEXT: vs8r.v v24, (a0)
134-
; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
135-
; CHECK-NEXT: vs8r.v v16, (a0)
136-
; CHECK-NEXT: vs8r.v v8, (a0)
137-
; CHECK-NEXT: csrr a0, vlenb
138-
; CHECK-NEXT: slli a0, a0, 3
139-
; CHECK-NEXT: add sp, sp, a0
140-
; CHECK-NEXT: addi sp, sp, 16
141-
; CHECK-NEXT: ret
114+
; POSTRA-LABEL: vmv.v.i:
115+
; POSTRA: # %bb.0:
116+
; POSTRA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
117+
; POSTRA-NEXT: vmv.v.i v8, 1
118+
; POSTRA-NEXT: vs8r.v v8, (a0)
119+
; POSTRA-NEXT: vl8re64.v v16, (a0)
120+
; POSTRA-NEXT: vl8re64.v v24, (a0)
121+
; POSTRA-NEXT: vl8re64.v v0, (a0)
122+
; POSTRA-NEXT: vl8re64.v v8, (a0)
123+
; POSTRA-NEXT: vs8r.v v8, (a0)
124+
; POSTRA-NEXT: vs8r.v v0, (a0)
125+
; POSTRA-NEXT: vs8r.v v24, (a0)
126+
; POSTRA-NEXT: vs8r.v v16, (a0)
127+
; POSTRA-NEXT: vmv.v.i v8, 1
128+
; POSTRA-NEXT: vs8r.v v8, (a0)
129+
; POSTRA-NEXT: ret
130+
;
131+
; PRERA-LABEL: vmv.v.i:
132+
; PRERA: # %bb.0:
133+
; PRERA-NEXT: addi sp, sp, -16
134+
; PRERA-NEXT: .cfi_def_cfa_offset 16
135+
; PRERA-NEXT: csrr a1, vlenb
136+
; PRERA-NEXT: slli a1, a1, 3
137+
; PRERA-NEXT: sub sp, sp, a1
138+
; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
139+
; PRERA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
140+
; PRERA-NEXT: vmv.v.i v8, 1
141+
; PRERA-NEXT: vs8r.v v8, (a0)
142+
; PRERA-NEXT: vl8re64.v v16, (a0)
143+
; PRERA-NEXT: addi a1, sp, 16
144+
; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
145+
; PRERA-NEXT: vl8re64.v v24, (a0)
146+
; PRERA-NEXT: vl8re64.v v0, (a0)
147+
; PRERA-NEXT: vl8re64.v v16, (a0)
148+
; PRERA-NEXT: vs8r.v v16, (a0)
149+
; PRERA-NEXT: vs8r.v v0, (a0)
150+
; PRERA-NEXT: vs8r.v v24, (a0)
151+
; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
152+
; PRERA-NEXT: vs8r.v v16, (a0)
153+
; PRERA-NEXT: vs8r.v v8, (a0)
154+
; PRERA-NEXT: csrr a0, vlenb
155+
; PRERA-NEXT: slli a0, a0, 3
156+
; PRERA-NEXT: add sp, sp, a0
157+
; PRERA-NEXT: addi sp, sp, 16
158+
; PRERA-NEXT: ret
142159
%vmv.v.i = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(<vscale x 8 x i64> poison, i64 1, i64 -1)
143160
store volatile <vscale x 8 x i64> %vmv.v.i, ptr %p
144161

llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -519,6 +519,7 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
519519
; CHECK-NEXT: vmv.v.i v24, 0
520520
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
521521
; CHECK-NEXT: vmv1r.v v0, v7
522+
; CHECK-NEXT: vmv.v.i v24, 0
522523
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
523524
; CHECK-NEXT: vs8r.v v8, (a1)
524525
; CHECK-NEXT: slli a0, a0, 3

0 commit comments

Comments
 (0)