You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: llvm/test/CodeGen/RISCV/rvv/commutable.ll
+175Lines changed: 175 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -649,3 +649,178 @@ entry:
649
649
ret <vscale x 1 x i64> %ret
650
650
}
651
651
652
+
; vsadd.vv
653
+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
654
+
define <vscale x 1 x i64> @commutable_vsadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
655
+
; CHECK-LABEL: commutable_vsadd_vv:
656
+
; CHECK: # %bb.0: # %entry
657
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
658
+
; CHECK-NEXT: vsadd.vv v10, v8, v9
659
+
; CHECK-NEXT: vsadd.vv v8, v9, v8
660
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
661
+
; CHECK-NEXT: vadd.vv v8, v10, v8
662
+
; CHECK-NEXT: ret
663
+
entry:
664
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
665
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
666
+
%ret = add <vscale x 1 x i64> %a, %b
667
+
ret <vscale x 1 x i64> %ret
668
+
}
669
+
670
+
declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
671
+
define <vscale x 1 x i64> @commutable_vsadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
672
+
; CHECK-LABEL: commutable_vsadd_vv_masked:
673
+
; CHECK: # %bb.0:
674
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
675
+
; CHECK-NEXT: vsadd.vv v10, v8, v9, v0.t
676
+
; CHECK-NEXT: vsadd.vv v8, v9, v8, v0.t
677
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
678
+
; CHECK-NEXT: vadd.vv v8, v10, v8
679
+
; CHECK-NEXT: ret
680
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
681
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
682
+
%ret = add <vscale x 1 x i64> %a, %b
683
+
ret <vscale x 1 x i64> %ret
684
+
}
685
+
686
+
; vsaddu.vv
687
+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen);
688
+
define <vscale x 1 x i64> @commutable_vsaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
689
+
; CHECK-LABEL: commutable_vsaddu_vv:
690
+
; CHECK: # %bb.0: # %entry
691
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
692
+
; CHECK-NEXT: vsaddu.vv v10, v8, v9
693
+
; CHECK-NEXT: vsaddu.vv v8, v9, v8
694
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
695
+
; CHECK-NEXT: vadd.vv v8, v10, v8
696
+
; CHECK-NEXT: ret
697
+
entry:
698
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2)
699
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen %2)
700
+
%ret = add <vscale x 1 x i64> %a, %b
701
+
ret <vscale x 1 x i64> %ret
702
+
}
703
+
704
+
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen);
705
+
define <vscale x 1 x i64> @commutable_vsaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
706
+
; CHECK-LABEL: commutable_vsaddu_vv_masked:
707
+
; CHECK: # %bb.0:
708
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
709
+
; CHECK-NEXT: vsaddu.vv v10, v8, v9, v0.t
710
+
; CHECK-NEXT: vsaddu.vv v8, v9, v8, v0.t
711
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
712
+
; CHECK-NEXT: vadd.vv v8, v10, v8
713
+
; CHECK-NEXT: ret
714
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
715
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen %2, iXLen 1)
716
+
%ret = add <vscale x 1 x i64> %a, %b
717
+
ret <vscale x 1 x i64> %ret
718
+
}
719
+
720
+
; vaadd.vv
721
+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
722
+
define <vscale x 1 x i64> @commutable_vaadd_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
723
+
; CHECK-LABEL: commutable_vaadd_vv:
724
+
; CHECK: # %bb.0: # %entry
725
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
726
+
; CHECK-NEXT: csrwi vxrm, 0
727
+
; CHECK-NEXT: vaadd.vv v10, v8, v9
728
+
; CHECK-NEXT: vaadd.vv v8, v9, v8
729
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
730
+
; CHECK-NEXT: vadd.vv v8, v10, v8
731
+
; CHECK-NEXT: ret
732
+
entry:
733
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
734
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
735
+
%ret = add <vscale x 1 x i64> %a, %b
736
+
ret <vscale x 1 x i64> %ret
737
+
}
738
+
739
+
declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
740
+
define <vscale x 1 x i64> @commutable_vaadd_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
741
+
; CHECK-LABEL: commutable_vaadd_vv_masked:
742
+
; CHECK: # %bb.0:
743
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
744
+
; CHECK-NEXT: csrwi vxrm, 0
745
+
; CHECK-NEXT: vaadd.vv v10, v8, v9, v0.t
746
+
; CHECK-NEXT: vaadd.vv v8, v9, v8, v0.t
747
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
748
+
; CHECK-NEXT: vadd.vv v8, v10, v8
749
+
; CHECK-NEXT: ret
750
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
751
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
752
+
%ret = add <vscale x 1 x i64> %a, %b
753
+
ret <vscale x 1 x i64> %ret
754
+
}
755
+
756
+
; vaaddu.vv
757
+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
758
+
define <vscale x 1 x i64> @commutable_vaaddu_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
759
+
; CHECK-LABEL: commutable_vaaddu_vv:
760
+
; CHECK: # %bb.0: # %entry
761
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
762
+
; CHECK-NEXT: csrwi vxrm, 0
763
+
; CHECK-NEXT: vaaddu.vv v10, v8, v9
764
+
; CHECK-NEXT: vaaddu.vv v8, v9, v8
765
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
766
+
; CHECK-NEXT: vadd.vv v8, v10, v8
767
+
; CHECK-NEXT: ret
768
+
entry:
769
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
770
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
771
+
%ret = add <vscale x 1 x i64> %a, %b
772
+
ret <vscale x 1 x i64> %ret
773
+
}
774
+
775
+
declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
776
+
define <vscale x 1 x i64> @commutable_vaaddu_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
777
+
; CHECK-LABEL: commutable_vaaddu_vv_masked:
778
+
; CHECK: # %bb.0:
779
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
780
+
; CHECK-NEXT: csrwi vxrm, 0
781
+
; CHECK-NEXT: vaaddu.vv v10, v8, v9, v0.t
782
+
; CHECK-NEXT: vaaddu.vv v8, v9, v8, v0.t
783
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
784
+
; CHECK-NEXT: vadd.vv v8, v10, v8
785
+
; CHECK-NEXT: ret
786
+
%a = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
787
+
%b = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
788
+
%ret = add <vscale x 1 x i64> %a, %b
789
+
ret <vscale x 1 x i64> %ret
790
+
}
791
+
792
+
; vsmul.vv
793
+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen);
794
+
define <vscale x 1 x i64> @commutable_vsmul_vv(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
795
+
; CHECK-LABEL: commutable_vsmul_vv:
796
+
; CHECK: # %bb.0: # %entry
797
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
798
+
; CHECK-NEXT: csrwi vxrm, 0
799
+
; CHECK-NEXT: vsmul.vv v10, v8, v9
800
+
; CHECK-NEXT: vsmul.vv v8, v9, v8
801
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
802
+
; CHECK-NEXT: vadd.vv v8, v10, v8
803
+
; CHECK-NEXT: ret
804
+
entry:
805
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen 0, iXLen %2)
806
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, iXLen 0, iXLen %2)
807
+
%ret = add <vscale x 1 x i64> %a, %b
808
+
ret <vscale x 1 x i64> %ret
809
+
}
810
+
811
+
declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, iXLen, iXLen, iXLen);
812
+
define <vscale x 1 x i64> @commutable_vsmul_vv_masked(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen %2) {
813
+
; CHECK-LABEL: commutable_vsmul_vv_masked:
814
+
; CHECK: # %bb.0:
815
+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
816
+
; CHECK-NEXT: csrwi vxrm, 0
817
+
; CHECK-NEXT: vsmul.vv v10, v8, v9, v0.t
818
+
; CHECK-NEXT: vsmul.vv v8, v9, v8, v0.t
819
+
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
820
+
; CHECK-NEXT: vadd.vv v8, v10, v8
821
+
; CHECK-NEXT: ret
822
+
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
823
+
%b = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %0, <vscale x 1 x i1> %mask, iXLen 0, iXLen %2, iXLen 1)
0 commit comments