Skip to content

Commit 21a0176

Browse files
authored
[RISCV] Rematerialize vfmv.v.f (#108007)
This is the same principle as vmv.v.x in #107993, but for floats.
1 parent c641b61 commit 21a0176

File tree

3 files changed

+67
-0
lines changed

3 files changed

+67
-0
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
170170
const MachineInstr &MI) const {
171171
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
172172
case RISCV::VMV_V_X:
173+
case RISCV::VFMV_V_F:
173174
case RISCV::VMV_V_I:
174175
case RISCV::VID_V:
175176
if (MI.getOperand(1).isUndef() &&

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6558,6 +6558,7 @@ defm PseudoVFMERGE : VPseudoVMRG_FM;
65586558
//===----------------------------------------------------------------------===//
65596559
// 13.16. Vector Floating-Point Move Instruction
65606560
//===----------------------------------------------------------------------===//
6561+
let isReMaterializable = 1 in
65616562
defm PseudoVFMV_V : VPseudoVMV_F;
65626563

65636564
//===----------------------------------------------------------------------===//

llvm/test/CodeGen/RISCV/rvv/remat.ll

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,3 +312,68 @@ define void @vmv.v.x_live(ptr %p, i64 %x) {
312312
store volatile i64 %x, ptr %p
313313
ret void
314314
}
315+
316+
define void @vfmv.v.f(ptr %p, double %x) {
317+
; POSTRA-LABEL: vfmv.v.f:
318+
; POSTRA: # %bb.0:
319+
; POSTRA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
320+
; POSTRA-NEXT: vfmv.v.f v8, fa0
321+
; POSTRA-NEXT: vs8r.v v8, (a0)
322+
; POSTRA-NEXT: vl8re64.v v16, (a0)
323+
; POSTRA-NEXT: vl8re64.v v24, (a0)
324+
; POSTRA-NEXT: vl8re64.v v0, (a0)
325+
; POSTRA-NEXT: vl8re64.v v8, (a0)
326+
; POSTRA-NEXT: vs8r.v v8, (a0)
327+
; POSTRA-NEXT: vs8r.v v0, (a0)
328+
; POSTRA-NEXT: vs8r.v v24, (a0)
329+
; POSTRA-NEXT: vs8r.v v16, (a0)
330+
; POSTRA-NEXT: vfmv.v.f v8, fa0
331+
; POSTRA-NEXT: vs8r.v v8, (a0)
332+
; POSTRA-NEXT: fsd fa0, 0(a0)
333+
; POSTRA-NEXT: ret
334+
;
335+
; PRERA-LABEL: vfmv.v.f:
336+
; PRERA: # %bb.0:
337+
; PRERA-NEXT: addi sp, sp, -16
338+
; PRERA-NEXT: .cfi_def_cfa_offset 16
339+
; PRERA-NEXT: csrr a1, vlenb
340+
; PRERA-NEXT: slli a1, a1, 3
341+
; PRERA-NEXT: sub sp, sp, a1
342+
; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
343+
; PRERA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
344+
; PRERA-NEXT: vfmv.v.f v8, fa0
345+
; PRERA-NEXT: vs8r.v v8, (a0)
346+
; PRERA-NEXT: vl8re64.v v16, (a0)
347+
; PRERA-NEXT: addi a1, sp, 16
348+
; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
349+
; PRERA-NEXT: vl8re64.v v24, (a0)
350+
; PRERA-NEXT: vl8re64.v v0, (a0)
351+
; PRERA-NEXT: vl8re64.v v16, (a0)
352+
; PRERA-NEXT: vs8r.v v16, (a0)
353+
; PRERA-NEXT: vs8r.v v0, (a0)
354+
; PRERA-NEXT: vs8r.v v24, (a0)
355+
; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
356+
; PRERA-NEXT: vs8r.v v16, (a0)
357+
; PRERA-NEXT: vs8r.v v8, (a0)
358+
; PRERA-NEXT: fsd fa0, 0(a0)
359+
; PRERA-NEXT: csrr a0, vlenb
360+
; PRERA-NEXT: slli a0, a0, 3
361+
; PRERA-NEXT: add sp, sp, a0
362+
; PRERA-NEXT: addi sp, sp, 16
363+
; PRERA-NEXT: ret
364+
%vfmv.v.f = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(<vscale x 8 x double> poison, double %x, i64 -1)
365+
store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
366+
367+
%a = load volatile <vscale x 8 x double>, ptr %p
368+
%b = load volatile <vscale x 8 x double>, ptr %p
369+
%c = load volatile <vscale x 8 x double>, ptr %p
370+
%d = load volatile <vscale x 8 x double>, ptr %p
371+
store volatile <vscale x 8 x double> %d, ptr %p
372+
store volatile <vscale x 8 x double> %c, ptr %p
373+
store volatile <vscale x 8 x double> %b, ptr %p
374+
store volatile <vscale x 8 x double> %a, ptr %p
375+
376+
store volatile <vscale x 8 x double> %vfmv.v.f, ptr %p
377+
store volatile double %x, ptr %p
378+
ret void
379+
}

0 commit comments

Comments
 (0)