|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2 |
| -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \ |
| 2 | +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+experimental-zfbfmin,+experimental-zvfbfmin \ |
3 | 3 | ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
|
4 |
| -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \ |
| 4 | +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+experimental-zfbfmin,+experimental-zvfbfmin \ |
5 | 5 | ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
6 | 6 |
|
7 | 7 | declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
|
@@ -528,3 +528,123 @@ entry:
|
528 | 528 |
|
529 | 529 | ret <vscale x 8 x double> %a
|
530 | 530 | }
|
| 531 | + |
| 532 | +declare <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16( |
| 533 | + <vscale x 1 x bfloat>, |
| 534 | + bfloat, |
| 535 | + iXLen); |
| 536 | + |
| 537 | +define <vscale x 1 x bfloat> @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind { |
| 538 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16: |
| 539 | +; CHECK: # %bb.0: # %entry |
| 540 | +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma |
| 541 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 542 | +; CHECK-NEXT: ret |
| 543 | +entry: |
| 544 | + %a = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16( |
| 545 | + <vscale x 1 x bfloat> undef, |
| 546 | + bfloat %0, |
| 547 | + iXLen %1) |
| 548 | + |
| 549 | + ret <vscale x 1 x bfloat> %a |
| 550 | +} |
| 551 | + |
| 552 | +declare <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16( |
| 553 | + <vscale x 2 x bfloat>, |
| 554 | + bfloat, |
| 555 | + iXLen); |
| 556 | + |
| 557 | +define <vscale x 2 x bfloat> @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind { |
| 558 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16: |
| 559 | +; CHECK: # %bb.0: # %entry |
| 560 | +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma |
| 561 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 562 | +; CHECK-NEXT: ret |
| 563 | +entry: |
| 564 | + %a = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16( |
| 565 | + <vscale x 2 x bfloat> undef, |
| 566 | + bfloat %0, |
| 567 | + iXLen %1) |
| 568 | + |
| 569 | + ret <vscale x 2 x bfloat> %a |
| 570 | +} |
| 571 | + |
| 572 | +declare <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16( |
| 573 | + <vscale x 4 x bfloat>, |
| 574 | + bfloat, |
| 575 | + iXLen); |
| 576 | + |
| 577 | +define <vscale x 4 x bfloat> @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind { |
| 578 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16: |
| 579 | +; CHECK: # %bb.0: # %entry |
| 580 | +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma |
| 581 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 582 | +; CHECK-NEXT: ret |
| 583 | +entry: |
| 584 | + %a = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16( |
| 585 | + <vscale x 4 x bfloat> undef, |
| 586 | + bfloat %0, |
| 587 | + iXLen %1) |
| 588 | + |
| 589 | + ret <vscale x 4 x bfloat> %a |
| 590 | +} |
| 591 | + |
| 592 | +declare <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16( |
| 593 | + <vscale x 8 x bfloat>, |
| 594 | + bfloat, |
| 595 | + iXLen); |
| 596 | + |
| 597 | +define <vscale x 8 x bfloat> @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind { |
| 598 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16: |
| 599 | +; CHECK: # %bb.0: # %entry |
| 600 | +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma |
| 601 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 602 | +; CHECK-NEXT: ret |
| 603 | +entry: |
| 604 | + %a = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16( |
| 605 | + <vscale x 8 x bfloat> undef, |
| 606 | + bfloat %0, |
| 607 | + iXLen %1) |
| 608 | + |
| 609 | + ret <vscale x 8 x bfloat> %a |
| 610 | +} |
| 611 | + |
| 612 | +declare <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16( |
| 613 | + <vscale x 16 x bfloat>, |
| 614 | + bfloat, |
| 615 | + iXLen); |
| 616 | + |
| 617 | +define <vscale x 16 x bfloat> @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind { |
| 618 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16: |
| 619 | +; CHECK: # %bb.0: # %entry |
| 620 | +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma |
| 621 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 622 | +; CHECK-NEXT: ret |
| 623 | +entry: |
| 624 | + %a = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16( |
| 625 | + <vscale x 16 x bfloat> undef, |
| 626 | + bfloat %0, |
| 627 | + iXLen %1) |
| 628 | + |
| 629 | + ret <vscale x 16 x bfloat> %a |
| 630 | +} |
| 631 | + |
| 632 | +declare <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16( |
| 633 | + <vscale x 32 x bfloat>, |
| 634 | + bfloat, |
| 635 | + iXLen); |
| 636 | + |
| 637 | +define <vscale x 32 x bfloat> @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind { |
| 638 | +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16: |
| 639 | +; CHECK: # %bb.0: # %entry |
| 640 | +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma |
| 641 | +; CHECK-NEXT: vfmv.v.f v8, fa0 |
| 642 | +; CHECK-NEXT: ret |
| 643 | +entry: |
| 644 | + %a = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16( |
| 645 | + <vscale x 32 x bfloat> undef, |
| 646 | + bfloat %0, |
| 647 | + iXLen %1) |
| 648 | + |
| 649 | + ret <vscale x 32 x bfloat> %a |
| 650 | +} |
0 commit comments