@@ -19,10 +19,9 @@ define <vscale x 4 x i32> @binop_reverse(<vscale x 4 x i32> %a, <vscale x 4 x i3
19
19
20
20
define <vscale x 4 x i32 > @binop_intrinsic_reverse (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b ) {
21
21
; CHECK-LABEL: @binop_intrinsic_reverse(
22
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
23
- ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
24
- ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> [[B_REV]])
25
- ; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
22
+ ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[A_REV:%.*]], <vscale x 4 x i32> [[B_REV:%.*]])
23
+ ; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD]])
24
+ ; CHECK-NEXT: ret <vscale x 4 x i32> [[SMAX]]
26
25
;
27
26
%a.rev = tail call <vscale x 4 x i32 > @llvm.vector.reverse.nxv4i32 (<vscale x 4 x i32 > %a )
28
27
%b.rev = tail call <vscale x 4 x i32 > @llvm.vector.reverse.nxv4i32 (<vscale x 4 x i32 > %b )
@@ -49,10 +48,10 @@ define <vscale x 4 x i32> @binop_reverse_1(<vscale x 4 x i32> %a, <vscale x 4 x
49
48
; %a.rev has multiple uses
50
49
define <vscale x 4 x i32 > @binop_intrinsic_reverse_1 (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b ) {
51
50
; CHECK-LABEL: @binop_intrinsic_reverse_1(
52
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
53
51
; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
54
- ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[A_REV]])
55
- ; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> [[B_REV]])
52
+ ; CHECK-NEXT: call void @use_nxv4i32(<vscale x 4 x i32> [[B_REV]])
53
+ ; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[B]], <vscale x 4 x i32> [[B1:%.*]])
54
+ ; CHECK-NEXT: [[SMAX:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP1]])
56
55
; CHECK-NEXT: ret <vscale x 4 x i32> [[SMAX]]
57
56
;
58
57
%a.rev = tail call <vscale x 4 x i32 > @llvm.vector.reverse.nxv4i32 (<vscale x 4 x i32 > %a )
@@ -233,9 +232,9 @@ define <vscale x 4 x float> @unop_reverse_1(<vscale x 4 x float> %a) {
233
232
234
233
define <vscale x 4 x float > @unop_intrinsic_reverse (<vscale x 4 x float > %a ) {
235
234
; CHECK-LABEL: @unop_intrinsic_reverse(
236
- ; CHECK-NEXT: [[A_REV :%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse. nxv4f32(<vscale x 4 x float> [[A :%.*]])
237
- ; CHECK-NEXT: [[NEG :%.*]] = call <vscale x 4 x float> @llvm.fabs. nxv4f32(<vscale x 4 x float> [[A_REV ]])
238
- ; CHECK-NEXT: ret <vscale x 4 x float> [[NEG ]]
235
+ ; CHECK-NEXT: [[NEG :%.*]] = call <vscale x 4 x float> @llvm.fabs. nxv4f32(<vscale x 4 x float> [[A_REV :%.*]])
236
+ ; CHECK-NEXT: [[ABS :%.*]] = call <vscale x 4 x float> @llvm.vector.reverse. nxv4f32(<vscale x 4 x float> [[NEG ]])
237
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[ABS ]]
239
238
;
240
239
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
241
240
%abs = call <vscale x 4 x float > @llvm.fabs (<vscale x 4 x float > %a.rev )
@@ -689,11 +688,8 @@ define <vscale x 4 x float> @reverse_binop_reverse(<vscale x 4 x float> %a, <vsc
689
688
690
689
define <vscale x 4 x float > @reverse_binop_intrinsic_reverse (<vscale x 4 x float > %a , <vscale x 4 x float > %b ) {
691
690
; CHECK-LABEL: @reverse_binop_intrinsic_reverse(
692
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
693
- ; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[B:%.*]])
694
- ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV]], <vscale x 4 x float> [[B_REV]])
695
- ; CHECK-NEXT: [[MAXNUM_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[ADD]])
696
- ; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM_REV]]
691
+ ; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]], <vscale x 4 x float> [[B_REV:%.*]])
692
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[ADD]]
697
693
;
698
694
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
699
695
%b.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %b )
@@ -734,12 +730,10 @@ define <vscale x 4 x float> @reverse_binop_reverse_splat_LHS(<vscale x 4 x float
734
730
735
731
define <vscale x 4 x float > @reverse_binop_reverse_intrinsic_splat_RHS (<vscale x 4 x float > %a , float %b ) {
736
732
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_splat_RHS(
737
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
738
733
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
739
734
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
740
- ; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV]], <vscale x 4 x float> [[B_SPLAT]])
741
- ; CHECK-NEXT: [[MAXNUM_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[MAXNUM]])
742
- ; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM_REV]]
735
+ ; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]], <vscale x 4 x float> [[B_SPLAT]])
736
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM]]
743
737
;
744
738
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
745
739
%b.insert = insertelement <vscale x 4 x float > poison, float %b , i32 0
@@ -751,12 +745,10 @@ define <vscale x 4 x float> @reverse_binop_reverse_intrinsic_splat_RHS(<vscale x
751
745
752
746
define <vscale x 4 x float > @reverse_binop_reverse_intrinsic_splat_LHS (<vscale x 4 x float > %a , float %b ) {
753
747
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_splat_LHS(
754
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
755
748
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
756
749
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[B_INSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
757
- ; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[B_SPLAT]], <vscale x 4 x float> [[A_REV]])
758
- ; CHECK-NEXT: [[MAXNUM_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[MAXNUM]])
759
- ; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM_REV]]
750
+ ; CHECK-NEXT: [[MAXNUM:%.*]] = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> [[B_SPLAT]], <vscale x 4 x float> [[A_REV:%.*]])
751
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[MAXNUM]]
760
752
;
761
753
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
762
754
%b.insert = insertelement <vscale x 4 x float > poison, float %b , i32 0
@@ -768,8 +760,8 @@ define <vscale x 4 x float> @reverse_binop_reverse_intrinsic_splat_LHS(<vscale x
768
760
769
761
define <4 x float > @reverse_binop_reverse_intrinsic_constant_RHS (<4 x float > %a ) {
770
762
; CHECK-LABEL: @reverse_binop_reverse_intrinsic_constant_RHS(
771
- ; CHECK-NEXT: [[MAXNUM_REV1 :%.*]] = tail call <4 x float> @llvm.vector.reverse. v4f32(<4 x float> [[MAXNUM1 :%.*]])
772
- ; CHECK-NEXT: [[MAXNUM:%.*]] = call <4 x float> @llvm.maxnum.v4f32( <4 x float> [[MAXNUM_REV1]] , <4 x float > <float 0.000000e+00, float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>)
763
+ ; CHECK-NEXT: [[TMP1 :%.*]] = call <4 x float> @llvm.maxnum. v4f32(<4 x float> [[A :%.*]], <4 x float> <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 0.000000e+00> )
764
+ ; CHECK-NEXT: [[MAXNUM:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison , <4 x i32 > <i32 3, i32 2, i32 1, i32 0>
773
765
; CHECK-NEXT: [[MAXNUM_REV:%.*]] = tail call <4 x float> @llvm.vector.reverse.v4f32(<4 x float> [[MAXNUM]])
774
766
; CHECK-NEXT: ret <4 x float> [[MAXNUM_REV]]
775
767
;
@@ -817,10 +809,8 @@ define <vscale x 4 x float> @reverse_unop_reverse(<vscale x 4 x float> %a) {
817
809
818
810
define <vscale x 4 x float > @reverse_unop_intrinsic_reverse (<vscale x 4 x float > %a ) {
819
811
; CHECK-LABEL: @reverse_unop_intrinsic_reverse(
820
- ; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]])
821
- ; CHECK-NEXT: [[ABS:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[A_REV]])
822
- ; CHECK-NEXT: [[ABS_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[ABS]])
823
- ; CHECK-NEXT: ret <vscale x 4 x float> [[ABS_REV]]
812
+ ; CHECK-NEXT: [[ABS:%.*]] = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> [[A_REV:%.*]])
813
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[ABS]]
824
814
;
825
815
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
826
816
%abs = call <vscale x 4 x float > @llvm.fabs (<vscale x 4 x float > %a.rev )
@@ -830,10 +820,8 @@ define <vscale x 4 x float> @reverse_unop_intrinsic_reverse(<vscale x 4 x float>
830
820
831
821
define <vscale x 4 x float > @reverse_unop_intrinsic_reverse_scalar_arg (<vscale x 4 x float > %a , i32 %power ) {
832
822
; CHECK-LABEL: @reverse_unop_intrinsic_reverse_scalar_arg(
833
- ; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[A1:%.*]])
834
- ; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.powi.nxv4f32.i32(<vscale x 4 x float> [[A]], i32 [[POWER:%.*]])
835
- ; CHECK-NEXT: [[POWI_REV:%.*]] = tail call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP1]])
836
- ; CHECK-NEXT: ret <vscale x 4 x float> [[POWI_REV]]
823
+ ; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.powi.nxv4f32.i32(<vscale x 4 x float> [[A:%.*]], i32 [[POWER:%.*]])
824
+ ; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
837
825
;
838
826
%a.rev = tail call <vscale x 4 x float > @llvm.vector.reverse.nxv4f32 (<vscale x 4 x float > %a )
839
827
%powi = call <vscale x 4 x float > @llvm.powi.nxv4f32 (<vscale x 4 x float > %a.rev , i32 %power )
0 commit comments