@@ -36,7 +36,7 @@ define <vscale x 8 x half> @famin_u_f16(<vscale x 8 x i1> %pg, <vscale x 8 x hal
36
36
; CHECK: // %bb.0:
37
37
; CHECK-NEXT: famin z0.h, p0/m, z0.h, z1.h
38
38
; CHECK-NEXT: ret
39
- %r = call <vscale x 8 x half > @llvm.aarch64.sve.famin.u.nxv8f16 (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b )
39
+ %r = call <vscale x 8 x half > @llvm.aarch64.sve.famin.u.nxv8f16 (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %b , <vscale x 8 x half > %a )
40
40
ret <vscale x 8 x half > %r
41
41
}
42
42
@@ -45,7 +45,7 @@ define <vscale x 4 x float> @famin_u_f32(<vscale x 4 x i1> %pg, <vscale x 4 x fl
45
45
; CHECK: // %bb.0:
46
46
; CHECK-NEXT: famin z0.s, p0/m, z0.s, z1.s
47
47
; CHECK-NEXT: ret
48
- %r = call <vscale x 4 x float > @llvm.aarch64.sve.famin.u.nxv4f32 (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b )
48
+ %r = call <vscale x 4 x float > @llvm.aarch64.sve.famin.u.nxv4f32 (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %b , <vscale x 4 x float > %a )
49
49
ret <vscale x 4 x float > %r
50
50
}
51
51
@@ -54,7 +54,7 @@ define <vscale x 2 x double> @famin_u_f64(<vscale x 2 x i1> %pg, <vscale x 2 x d
54
54
; CHECK: // %bb.0:
55
55
; CHECK-NEXT: famin z0.d, p0/m, z0.d, z1.d
56
56
; CHECK-NEXT: ret
57
- %r = call <vscale x 2 x double > @llvm.aarch64.sve.famin.u.nxv2f64 (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b )
57
+ %r = call <vscale x 2 x double > @llvm.aarch64.sve.famin.u.nxv2f64 (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %b , <vscale x 2 x double > %a )
58
58
ret <vscale x 2 x double > %r
59
59
}
60
60
@@ -90,7 +90,7 @@ define <vscale x 8 x half> @famax_u_f16(<vscale x 8 x i1> %pg, <vscale x 8 x hal
90
90
; CHECK: // %bb.0:
91
91
; CHECK-NEXT: famax z0.h, p0/m, z0.h, z1.h
92
92
; CHECK-NEXT: ret
93
- %r = call <vscale x 8 x half > @llvm.aarch64.sve.famax.u.nxv8f16 (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b )
93
+ %r = call <vscale x 8 x half > @llvm.aarch64.sve.famax.u.nxv8f16 (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %b , <vscale x 8 x half > %a )
94
94
ret <vscale x 8 x half > %r
95
95
}
96
96
@@ -99,7 +99,7 @@ define <vscale x 4 x float> @famax_u_f32(<vscale x 4 x i1> %pg, <vscale x 4 x fl
99
99
; CHECK: // %bb.0:
100
100
; CHECK-NEXT: famax z0.s, p0/m, z0.s, z1.s
101
101
; CHECK-NEXT: ret
102
- %r = call <vscale x 4 x float > @llvm.aarch64.sve.famax.u.nxv4f32 (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b )
102
+ %r = call <vscale x 4 x float > @llvm.aarch64.sve.famax.u.nxv4f32 (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %b , <vscale x 4 x float > %a )
103
103
ret <vscale x 4 x float > %r
104
104
}
105
105
@@ -108,7 +108,7 @@ define <vscale x 2 x double> @famax_u_f64(<vscale x 2 x i1> %pg, <vscale x 2 x d
108
108
; CHECK: // %bb.0:
109
109
; CHECK-NEXT: famax z0.d, p0/m, z0.d, z1.d
110
110
; CHECK-NEXT: ret
111
- %r = call <vscale x 2 x double > @llvm.aarch64.sve.famax.u.nxv2f64 (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b )
111
+ %r = call <vscale x 2 x double > @llvm.aarch64.sve.famax.u.nxv2f64 (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %b , <vscale x 2 x double > %a )
112
112
ret <vscale x 2 x double > %r
113
113
}
114
114
0 commit comments