@@ -312,3 +312,68 @@ define void @vmv.v.x_live(ptr %p, i64 %x) {
312
312
store volatile i64 %x , ptr %p
313
313
ret void
314
314
}
315
+
316
+ define void @vfmv.v.f (ptr %p , double %x ) {
317
+ ; POSTRA-LABEL: vfmv.v.f:
318
+ ; POSTRA: # %bb.0:
319
+ ; POSTRA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
320
+ ; POSTRA-NEXT: vfmv.v.f v8, fa0
321
+ ; POSTRA-NEXT: vs8r.v v8, (a0)
322
+ ; POSTRA-NEXT: vl8re64.v v16, (a0)
323
+ ; POSTRA-NEXT: vl8re64.v v24, (a0)
324
+ ; POSTRA-NEXT: vl8re64.v v0, (a0)
325
+ ; POSTRA-NEXT: vl8re64.v v8, (a0)
326
+ ; POSTRA-NEXT: vs8r.v v8, (a0)
327
+ ; POSTRA-NEXT: vs8r.v v0, (a0)
328
+ ; POSTRA-NEXT: vs8r.v v24, (a0)
329
+ ; POSTRA-NEXT: vs8r.v v16, (a0)
330
+ ; POSTRA-NEXT: vfmv.v.f v8, fa0
331
+ ; POSTRA-NEXT: vs8r.v v8, (a0)
332
+ ; POSTRA-NEXT: fsd fa0, 0(a0)
333
+ ; POSTRA-NEXT: ret
334
+ ;
335
+ ; PRERA-LABEL: vfmv.v.f:
336
+ ; PRERA: # %bb.0:
337
+ ; PRERA-NEXT: addi sp, sp, -16
338
+ ; PRERA-NEXT: .cfi_def_cfa_offset 16
339
+ ; PRERA-NEXT: csrr a1, vlenb
340
+ ; PRERA-NEXT: slli a1, a1, 3
341
+ ; PRERA-NEXT: sub sp, sp, a1
342
+ ; PRERA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
343
+ ; PRERA-NEXT: vsetvli a1, zero, e64, m8, ta, ma
344
+ ; PRERA-NEXT: vfmv.v.f v8, fa0
345
+ ; PRERA-NEXT: vs8r.v v8, (a0)
346
+ ; PRERA-NEXT: vl8re64.v v16, (a0)
347
+ ; PRERA-NEXT: addi a1, sp, 16
348
+ ; PRERA-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
349
+ ; PRERA-NEXT: vl8re64.v v24, (a0)
350
+ ; PRERA-NEXT: vl8re64.v v0, (a0)
351
+ ; PRERA-NEXT: vl8re64.v v16, (a0)
352
+ ; PRERA-NEXT: vs8r.v v16, (a0)
353
+ ; PRERA-NEXT: vs8r.v v0, (a0)
354
+ ; PRERA-NEXT: vs8r.v v24, (a0)
355
+ ; PRERA-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
356
+ ; PRERA-NEXT: vs8r.v v16, (a0)
357
+ ; PRERA-NEXT: vs8r.v v8, (a0)
358
+ ; PRERA-NEXT: fsd fa0, 0(a0)
359
+ ; PRERA-NEXT: csrr a0, vlenb
360
+ ; PRERA-NEXT: slli a0, a0, 3
361
+ ; PRERA-NEXT: add sp, sp, a0
362
+ ; PRERA-NEXT: addi sp, sp, 16
363
+ ; PRERA-NEXT: ret
364
+ %vfmv.v.f = call <vscale x 8 x double > @llvm.riscv.vfmv.v.f.nxv8f64 (<vscale x 8 x double > poison, double %x , i64 -1 )
365
+ store volatile <vscale x 8 x double > %vfmv.v.f , ptr %p
366
+
367
+ %a = load volatile <vscale x 8 x double >, ptr %p
368
+ %b = load volatile <vscale x 8 x double >, ptr %p
369
+ %c = load volatile <vscale x 8 x double >, ptr %p
370
+ %d = load volatile <vscale x 8 x double >, ptr %p
371
+ store volatile <vscale x 8 x double > %d , ptr %p
372
+ store volatile <vscale x 8 x double > %c , ptr %p
373
+ store volatile <vscale x 8 x double > %b , ptr %p
374
+ store volatile <vscale x 8 x double > %a , ptr %p
375
+
376
+ store volatile <vscale x 8 x double > %vfmv.v.f , ptr %p
377
+ store volatile double %x , ptr %p
378
+ ret void
379
+ }
0 commit comments