@@ -112,155 +112,4 @@ define <vscale x 2 x double> @famax_u_f64(<vscale x 2 x i1> %pg, <vscale x 2 x d
112
112
ret <vscale x 2 x double > %r
113
113
}
114
114
115
- define <vscale x 8 x half > @select_famin_f16a (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b ) #0 {
116
- ; CHECK-LABEL: select_famin_f16a:
117
- ; CHECK: // %bb.0:
118
- ; CHECK-NEXT: famin z0.h, p0/m, z0.h, z1.h
119
- ; CHECK-NEXT: ret
120
- %all.true = call <vscale x 8 x i1 > @llvm.aarch64.sve.ptrue.nxv8i1 (i32 31 )
121
- %m = call <vscale x 8 x half > @llvm.aarch64.sve.famin.u.nxv8f16 (<vscale x 8 x i1 > %all.true , <vscale x 8 x half > %a , <vscale x 8 x half > %b )
122
- %r = select <vscale x 8 x i1 > %pg , <vscale x 8 x half > %m , <vscale x 8 x half > %a
123
- ret <vscale x 8 x half > %r
124
- }
125
-
126
- define <vscale x 8 x half > @select_famin_f16b (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b ) #0 {
127
- ; CHECK-LABEL: select_famin_f16b:
128
- ; CHECK: // %bb.0:
129
- ; CHECK-NEXT: famin z0.h, p0/m, z0.h, z1.h
130
- ; CHECK-NEXT: ret
131
- %all.true = call <vscale x 8 x i1 > @llvm.aarch64.sve.ptrue.nxv8i1 (i32 31 )
132
- %m = call <vscale x 8 x half > @llvm.aarch64.sve.famin.u.nxv8f16 (<vscale x 8 x i1 > %all.true , <vscale x 8 x half > %b , <vscale x 8 x half > %a )
133
- %r = select <vscale x 8 x i1 > %pg , <vscale x 8 x half > %m , <vscale x 8 x half > %a
134
- ret <vscale x 8 x half > %r
135
- }
136
-
137
- define <vscale x 4 x float > @select_famin_f32a (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b ) #0 {
138
- ; CHECK-LABEL: select_famin_f32a:
139
- ; CHECK: // %bb.0:
140
- ; CHECK-NEXT: famin z0.s, p0/m, z0.s, z1.s
141
- ; CHECK-NEXT: ret
142
- %all.true = call <vscale x 4 x i1 > @llvm.aarch64.sve.ptrue.nxv4i1 (i32 31 )
143
- %m = call <vscale x 4 x float > @llvm.aarch64.sve.famin.u.nxv4f32 (<vscale x 4 x i1 > %all.true , <vscale x 4 x float > %a , <vscale x 4 x float > %b )
144
- %r = select <vscale x 4 x i1 > %pg , <vscale x 4 x float > %m , <vscale x 4 x float > %a
145
- ret <vscale x 4 x float > %r
146
- }
147
-
148
- define <vscale x 4 x float > @select_famin_f32b (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b ) #0 {
149
- ; CHECK-LABEL: select_famin_f32b:
150
- ; CHECK: // %bb.0:
151
- ; CHECK-NEXT: famin z0.s, p0/m, z0.s, z1.s
152
- ; CHECK-NEXT: ret
153
- %all.true = call <vscale x 4 x i1 > @llvm.aarch64.sve.ptrue.nxv4i1 (i32 31 )
154
- %m = call <vscale x 4 x float > @llvm.aarch64.sve.famin.u.nxv4f32 (<vscale x 4 x i1 > %all.true , <vscale x 4 x float > %b , <vscale x 4 x float > %a )
155
- %r = select <vscale x 4 x i1 > %pg , <vscale x 4 x float > %m , <vscale x 4 x float > %a
156
- ret <vscale x 4 x float > %r
157
- }
158
-
159
- define <vscale x 2 x double > @select_famin_f64a (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b ) #0 {
160
- ; CHECK-LABEL: select_famin_f64a:
161
- ; CHECK: // %bb.0:
162
- ; CHECK-NEXT: famin z0.d, p0/m, z0.d, z1.d
163
- ; CHECK-NEXT: ret
164
- %all.true = call <vscale x 2 x i1 > @llvm.aarch64.sve.ptrue.nxv2i1 (i32 31 )
165
- %m = call <vscale x 2 x double > @llvm.aarch64.sve.famin.u.nxv2f64 (<vscale x 2 x i1 > %all.true , <vscale x 2 x double > %a , <vscale x 2 x double > %b )
166
- %r = select <vscale x 2 x i1 > %pg , <vscale x 2 x double > %m , <vscale x 2 x double > %a
167
- ret <vscale x 2 x double > %r
168
- }
169
-
170
- define <vscale x 2 x double > @select_famin_f64b (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b ) #0 {
171
- ; CHECK-LABEL: select_famin_f64b:
172
- ; CHECK: // %bb.0:
173
- ; CHECK-NEXT: famin z0.d, p0/m, z0.d, z1.d
174
- ; CHECK-NEXT: ret
175
- %all.true = call <vscale x 2 x i1 > @llvm.aarch64.sve.ptrue.nxv2i1 (i32 31 )
176
- %m = call <vscale x 2 x double > @llvm.aarch64.sve.famin.u.nxv2f64 (<vscale x 2 x i1 > %all.true , <vscale x 2 x double > %b , <vscale x 2 x double > %a )
177
- %r = select <vscale x 2 x i1 > %pg , <vscale x 2 x double > %m , <vscale x 2 x double > %a
178
- ret <vscale x 2 x double > %r
179
- }
180
-
181
-
182
- define <vscale x 8 x half > @select_famax_f16a (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b ) #0 {
183
- ; CHECK-LABEL: select_famax_f16a:
184
- ; CHECK: // %bb.0:
185
- ; CHECK-NEXT: famax z0.h, p0/m, z0.h, z1.h
186
- ; CHECK-NEXT: ret
187
- %all.true = call <vscale x 8 x i1 > @llvm.aarch64.sve.ptrue.nxv8i1 (i32 31 )
188
- %m = call <vscale x 8 x half > @llvm.aarch64.sve.famax.u.nxv8f16 (<vscale x 8 x i1 > %all.true , <vscale x 8 x half > %a , <vscale x 8 x half > %b )
189
- %r = select <vscale x 8 x i1 > %pg , <vscale x 8 x half > %m , <vscale x 8 x half > %a
190
- ret <vscale x 8 x half > %r
191
- }
192
-
193
- define <vscale x 8 x half > @select_famax_f16b (<vscale x 8 x i1 > %pg , <vscale x 8 x half > %a , <vscale x 8 x half > %b ) #0 {
194
- ; CHECK-LABEL: select_famax_f16b:
195
- ; CHECK: // %bb.0:
196
- ; CHECK-NEXT: famax z0.h, p0/m, z0.h, z1.h
197
- ; CHECK-NEXT: ret
198
- %all.true = call <vscale x 8 x i1 > @llvm.aarch64.sve.ptrue.nxv8i1 (i32 31 )
199
- %m = call <vscale x 8 x half > @llvm.aarch64.sve.famax.u.nxv8f16 (<vscale x 8 x i1 > %all.true , <vscale x 8 x half > %b , <vscale x 8 x half > %a )
200
- %r = select <vscale x 8 x i1 > %pg , <vscale x 8 x half > %m , <vscale x 8 x half > %a
201
- ret <vscale x 8 x half > %r
202
- }
203
-
204
- define <vscale x 4 x float > @select_famax_f32a (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b ) #0 {
205
- ; CHECK-LABEL: select_famax_f32a:
206
- ; CHECK: // %bb.0:
207
- ; CHECK-NEXT: famax z0.s, p0/m, z0.s, z1.s
208
- ; CHECK-NEXT: ret
209
- %all.true = call <vscale x 4 x i1 > @llvm.aarch64.sve.ptrue.nxv4i1 (i32 31 )
210
- %m = call <vscale x 4 x float > @llvm.aarch64.sve.famax.u.nxv4f32 (<vscale x 4 x i1 > %all.true , <vscale x 4 x float > %a , <vscale x 4 x float > %b )
211
- %r = select <vscale x 4 x i1 > %pg , <vscale x 4 x float > %m , <vscale x 4 x float > %a
212
- ret <vscale x 4 x float > %r
213
- }
214
-
215
- define <vscale x 4 x float > @select_famax_f32b (<vscale x 4 x i1 > %pg , <vscale x 4 x float > %a , <vscale x 4 x float > %b ) #0 {
216
- ; CHECK-LABEL: select_famax_f32b:
217
- ; CHECK: // %bb.0:
218
- ; CHECK-NEXT: famax z0.s, p0/m, z0.s, z1.s
219
- ; CHECK-NEXT: ret
220
- %all.true = call <vscale x 4 x i1 > @llvm.aarch64.sve.ptrue.nxv4i1 (i32 31 )
221
- %m = call <vscale x 4 x float > @llvm.aarch64.sve.famax.u.nxv4f32 (<vscale x 4 x i1 > %all.true , <vscale x 4 x float > %b , <vscale x 4 x float > %a )
222
- %r = select <vscale x 4 x i1 > %pg , <vscale x 4 x float > %m , <vscale x 4 x float > %a
223
- ret <vscale x 4 x float > %r
224
- }
225
-
226
- define <vscale x 2 x double > @select_famax_f64a (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b ) #0 {
227
- ; CHECK-LABEL: select_famax_f64a:
228
- ; CHECK: // %bb.0:
229
- ; CHECK-NEXT: famax z0.d, p0/m, z0.d, z1.d
230
- ; CHECK-NEXT: ret
231
- %all.true = call <vscale x 2 x i1 > @llvm.aarch64.sve.ptrue.nxv2i1 (i32 31 )
232
- %m = call <vscale x 2 x double > @llvm.aarch64.sve.famax.u.nxv2f64 (<vscale x 2 x i1 > %all.true , <vscale x 2 x double > %a , <vscale x 2 x double > %b )
233
- %r = select <vscale x 2 x i1 > %pg , <vscale x 2 x double > %m , <vscale x 2 x double > %a
234
- ret <vscale x 2 x double > %r
235
- }
236
-
237
- define <vscale x 2 x double > @select_famax_f64b (<vscale x 2 x i1 > %pg , <vscale x 2 x double > %a , <vscale x 2 x double > %b ) #0 {
238
- ; CHECK-LABEL: select_famax_f64b:
239
- ; CHECK: // %bb.0:
240
- ; CHECK-NEXT: famax z0.d, p0/m, z0.d, z1.d
241
- ; CHECK-NEXT: ret
242
- %all.true = call <vscale x 2 x i1 > @llvm.aarch64.sve.ptrue.nxv2i1 (i32 31 )
243
- %m = call <vscale x 2 x double > @llvm.aarch64.sve.famax.u.nxv2f64 (<vscale x 2 x i1 > %all.true , <vscale x 2 x double > %b , <vscale x 2 x double > %a )
244
- %r = select <vscale x 2 x i1 > %pg , <vscale x 2 x double > %m , <vscale x 2 x double > %a
245
- ret <vscale x 2 x double > %r
246
- }
247
-
248
- declare <vscale x 8 x i1 > @llvm.aarch64.sve.ptrue.nxv8i1 (i32 )
249
- declare <vscale x 4 x i1 > @llvm.aarch64.sve.ptrue.nxv4i1 (i32 )
250
- declare <vscale x 2 x i1 > @llvm.aarch64.sve.ptrue.nxv2i1 (i32 )
251
-
252
- declare <vscale x 8 x half > @llvm.aarch64.sve.famin.nxv8f16 (<vscale x 8 x i1 >, <vscale x 8 x half >, <vscale x 8 x half >)
253
- declare <vscale x 4 x float > @llvm.aarch64.sve.famin.nxv4f32 (<vscale x 4 x i1 >, <vscale x 4 x float >, <vscale x 4 x float >)
254
- declare <vscale x 2 x double > @llvm.aarch64.sve.famin.nxv2f64 (<vscale x 2 x i1 >, <vscale x 2 x double >, <vscale x 2 x double >)
255
- declare <vscale x 8 x half > @llvm.aarch64.sve.famin.u.nxv8f16 (<vscale x 8 x i1 >, <vscale x 8 x half >, <vscale x 8 x half >)
256
- declare <vscale x 4 x float > @llvm.aarch64.sve.famin.u.nxv4f32 (<vscale x 4 x i1 >, <vscale x 4 x float >, <vscale x 4 x float >)
257
- declare <vscale x 2 x double > @llvm.aarch64.sve.famin.u.nxv2f64 (<vscale x 2 x i1 >, <vscale x 2 x double >, <vscale x 2 x double >)
258
-
259
- declare <vscale x 8 x half > @llvm.aarch64.sve.famax.nxv8f16 (<vscale x 8 x i1 >, <vscale x 8 x half >, <vscale x 8 x half >)
260
- declare <vscale x 4 x float > @llvm.aarch64.sve.famax.nxv4f32 (<vscale x 4 x i1 >, <vscale x 4 x float >, <vscale x 4 x float >)
261
- declare <vscale x 2 x double > @llvm.aarch64.sve.famax.nxv2f64 (<vscale x 2 x i1 >, <vscale x 2 x double >, <vscale x 2 x double >)
262
- declare <vscale x 8 x half > @llvm.aarch64.sve.famax.u.nxv8f16 (<vscale x 8 x i1 >, <vscale x 8 x half >, <vscale x 8 x half >)
263
- declare <vscale x 4 x float > @llvm.aarch64.sve.famax.u.nxv4f32 (<vscale x 4 x i1 >, <vscale x 4 x float >, <vscale x 4 x float >)
264
- declare <vscale x 2 x double > @llvm.aarch64.sve.famax.u.nxv2f64 (<vscale x 2 x i1 >, <vscale x 2 x double >, <vscale x 2 x double >)
265
-
266
115
attributes #0 = { nounwind "target-features" = "+faminmax" }
0 commit comments