Skip to content

Commit 83fc48e

Browse files
author
git apple-llvm automerger
committed
Merge commit 'a60b9f1bf320' from llvm.org/release/11.x into apple/stable/20200714
2 parents 12b186d + a60b9f1 commit 83fc48e

File tree

4 files changed

+69
-30
lines changed

4 files changed

+69
-30
lines changed

llvm/lib/Target/AArch64/AArch64CallingConvention.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ static const MCPhysReg DRegList[] = {AArch64::D0, AArch64::D1, AArch64::D2,
3535
static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
3636
AArch64::Q3, AArch64::Q4, AArch64::Q5,
3737
AArch64::Q6, AArch64::Q7};
38+
static const MCPhysReg ZRegList[] = {AArch64::Z0, AArch64::Z1, AArch64::Z2,
39+
AArch64::Z3, AArch64::Z4, AArch64::Z5,
40+
AArch64::Z6, AArch64::Z7};
3841

3942
static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
4043
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
@@ -97,6 +100,8 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
97100
RegList = DRegList;
98101
else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
99102
RegList = QRegList;
103+
else if (LocVT.isScalableVector())
104+
RegList = ZRegList;
100105
else {
101106
// Not an array we want to split up after all.
102107
return false;
@@ -141,6 +146,10 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
141146
return true;
142147
}
143148

149+
if (LocVT.isScalableVector())
150+
report_fatal_error(
151+
"Passing consecutive scalable vector registers unsupported");
152+
144153
// Mark all regs in the class as unavailable
145154
for (auto Reg : RegList)
146155
State.AllocateReg(Reg);

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14923,7 +14923,14 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
1492314923

1492414924
bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
1492514925
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
14926-
return Ty->isArrayTy();
14926+
if (Ty->isArrayTy())
14927+
return true;
14928+
14929+
const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
14930+
if (TySize.isScalable() && TySize.getKnownMinSize() > 128)
14931+
return true;
14932+
14933+
return false;
1492714934
}
1492814935

1492914936
bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,

llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ define <vscale x 2 x i64> @urem_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
182182
; SMIN
183183
;
184184

185-
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
185+
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
186186
; CHECK-LABEL: smin_i8:
187187
; CHECK: // %bb.0:
188188
; CHECK-NEXT: ptrue p0.b
@@ -193,7 +193,7 @@ define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
193193
ret <vscale x 16 x i8> %min
194194
}
195195

196-
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
196+
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
197197
; CHECK-LABEL: smin_i16:
198198
; CHECK: // %bb.0:
199199
; CHECK-NEXT: ptrue p0.h
@@ -204,7 +204,7 @@ define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
204204
ret <vscale x 8 x i16> %min
205205
}
206206

207-
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
207+
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
208208
; CHECK-LABEL: smin_i32:
209209
; CHECK: // %bb.0:
210210
; CHECK-NEXT: ptrue p0.s
@@ -215,7 +215,7 @@ define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
215215
ret <vscale x 4 x i32> %min
216216
}
217217

218-
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
218+
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
219219
; CHECK-LABEL: smin_i64:
220220
; CHECK: // %bb.0:
221221
; CHECK-NEXT: ptrue p0.d
@@ -226,7 +226,7 @@ define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
226226
ret <vscale x 2 x i64> %min
227227
}
228228

229-
define <vscale x 32 x i8> @smin_split_i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c) {
229+
define <vscale x 32 x i8> @smin_split_i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
230230
; CHECK-LABEL: smin_split_i8:
231231
; CHECK: // %bb.0:
232232
; CHECK-NEXT: ptrue p0.b
@@ -238,7 +238,7 @@ define <vscale x 32 x i8> @smin_split_i8(<vscale x 32 x i8> %a, <vscale x 32 x i
238238
ret <vscale x 32 x i8> %min
239239
}
240240

241-
define <vscale x 32 x i16> @smin_split_i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c) {
241+
define <vscale x 32 x i16> @smin_split_i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
242242
; CHECK-LABEL: smin_split_i16:
243243
; CHECK: // %bb.0:
244244
; CHECK-NEXT: ptrue p0.h
@@ -252,7 +252,7 @@ define <vscale x 32 x i16> @smin_split_i16(<vscale x 32 x i16> %a, <vscale x 32
252252
ret <vscale x 32 x i16> %min
253253
}
254254

255-
define <vscale x 8 x i32> @smin_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c) {
255+
define <vscale x 8 x i32> @smin_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
256256
; CHECK-LABEL: smin_split_i32:
257257
; CHECK: // %bb.0:
258258
; CHECK-NEXT: ptrue p0.s
@@ -264,7 +264,7 @@ define <vscale x 8 x i32> @smin_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i
264264
ret <vscale x 8 x i32> %min
265265
}
266266

267-
define <vscale x 4 x i64> @smin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c) {
267+
define <vscale x 4 x i64> @smin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
268268
; CHECK-LABEL: smin_split_i64:
269269
; CHECK: // %bb.0:
270270
; CHECK-NEXT: ptrue p0.d
@@ -276,7 +276,7 @@ define <vscale x 4 x i64> @smin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i
276276
ret <vscale x 4 x i64> %min
277277
}
278278

279-
define <vscale x 8 x i8> @smin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) {
279+
define <vscale x 8 x i8> @smin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
280280
; CHECK-LABEL: smin_promote_i8:
281281
; CHECK: // %bb.0:
282282
; CHECK-NEXT: ptrue p0.h
@@ -289,7 +289,7 @@ define <vscale x 8 x i8> @smin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8
289289
ret <vscale x 8 x i8> %min
290290
}
291291

292-
define <vscale x 4 x i16> @smin_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) {
292+
define <vscale x 4 x i16> @smin_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
293293
; CHECK-LABEL: smin_promote_i16:
294294
; CHECK: // %bb.0:
295295
; CHECK-NEXT: ptrue p0.s
@@ -302,7 +302,7 @@ define <vscale x 4 x i16> @smin_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x
302302
ret <vscale x 4 x i16> %min
303303
}
304304

305-
define <vscale x 2 x i32> @smin_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) {
305+
define <vscale x 2 x i32> @smin_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
306306
; CHECK-LABEL: smin_promote_i32:
307307
; CHECK: // %bb.0:
308308
; CHECK-NEXT: ptrue p0.d
@@ -319,7 +319,7 @@ define <vscale x 2 x i32> @smin_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x
319319
; UMIN
320320
;
321321

322-
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
322+
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
323323
; CHECK-LABEL: umin_i8:
324324
; CHECK: // %bb.0:
325325
; CHECK-NEXT: ptrue p0.b
@@ -330,7 +330,7 @@ define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
330330
ret <vscale x 16 x i8> %min
331331
}
332332

333-
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
333+
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
334334
; CHECK-LABEL: umin_i16:
335335
; CHECK: // %bb.0:
336336
; CHECK-NEXT: ptrue p0.h
@@ -341,7 +341,7 @@ define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
341341
ret <vscale x 8 x i16> %min
342342
}
343343

344-
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
344+
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
345345
; CHECK-LABEL: umin_i32:
346346
; CHECK: // %bb.0:
347347
; CHECK-NEXT: ptrue p0.s
@@ -352,7 +352,7 @@ define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
352352
ret <vscale x 4 x i32> %min
353353
}
354354

355-
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
355+
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
356356
; CHECK-LABEL: umin_i64:
357357
; CHECK: // %bb.0:
358358
; CHECK-NEXT: ptrue p0.d
@@ -363,7 +363,7 @@ define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
363363
ret <vscale x 2 x i64> %min
364364
}
365365

366-
define <vscale x 4 x i64> @umin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c) {
366+
define <vscale x 4 x i64> @umin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
367367
; CHECK-LABEL: umin_split_i64:
368368
; CHECK: // %bb.0:
369369
; CHECK-NEXT: ptrue p0.d
@@ -375,7 +375,7 @@ define <vscale x 4 x i64> @umin_split_i64(<vscale x 4 x i64> %a, <vscale x 4 x i
375375
ret <vscale x 4 x i64> %min
376376
}
377377

378-
define <vscale x 8 x i8> @umin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) {
378+
define <vscale x 8 x i8> @umin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
379379
; CHECK-LABEL: umin_promote_i8:
380380
; CHECK: // %bb.0:
381381
; CHECK-NEXT: ptrue p0.h
@@ -392,7 +392,7 @@ define <vscale x 8 x i8> @umin_promote_i8(<vscale x 8 x i8> %a, <vscale x 8 x i8
392392
; SMAX
393393
;
394394

395-
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
395+
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
396396
; CHECK-LABEL: smax_i8:
397397
; CHECK: // %bb.0:
398398
; CHECK-NEXT: ptrue p0.b
@@ -403,7 +403,7 @@ define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
403403
ret <vscale x 16 x i8> %max
404404
}
405405

406-
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
406+
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
407407
; CHECK-LABEL: smax_i16:
408408
; CHECK: // %bb.0:
409409
; CHECK-NEXT: ptrue p0.h
@@ -414,7 +414,7 @@ define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
414414
ret <vscale x 8 x i16> %max
415415
}
416416

417-
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
417+
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
418418
; CHECK-LABEL: smax_i32:
419419
; CHECK: // %bb.0:
420420
; CHECK-NEXT: ptrue p0.s
@@ -425,7 +425,7 @@ define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
425425
ret <vscale x 4 x i32> %max
426426
}
427427

428-
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
428+
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
429429
; CHECK-LABEL: smax_i64:
430430
; CHECK: // %bb.0:
431431
; CHECK-NEXT: ptrue p0.d
@@ -436,7 +436,7 @@ define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
436436
ret <vscale x 2 x i64> %max
437437
}
438438

439-
define <vscale x 8 x i32> @smax_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c) {
439+
define <vscale x 8 x i32> @smax_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
440440
; CHECK-LABEL: smax_split_i32:
441441
; CHECK: // %bb.0:
442442
; CHECK-NEXT: ptrue p0.s
@@ -448,7 +448,7 @@ define <vscale x 8 x i32> @smax_split_i32(<vscale x 8 x i32> %a, <vscale x 8 x i
448448
ret <vscale x 8 x i32> %max
449449
}
450450

451-
define <vscale x 4 x i16> @smax_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) {
451+
define <vscale x 4 x i16> @smax_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
452452
; CHECK-LABEL: smax_promote_i16:
453453
; CHECK: // %bb.0:
454454
; CHECK-NEXT: ptrue p0.s
@@ -465,7 +465,7 @@ define <vscale x 4 x i16> @smax_promote_i16(<vscale x 4 x i16> %a, <vscale x 4 x
465465
; UMAX
466466
;
467467

468-
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
468+
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
469469
; CHECK-LABEL: umax_i8:
470470
; CHECK: // %bb.0:
471471
; CHECK-NEXT: ptrue p0.b
@@ -476,7 +476,7 @@ define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
476476
ret <vscale x 16 x i8> %max
477477
}
478478

479-
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
479+
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
480480
; CHECK-LABEL: umax_i16:
481481
; CHECK: // %bb.0:
482482
; CHECK-NEXT: ptrue p0.h
@@ -487,7 +487,7 @@ define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b
487487
ret <vscale x 8 x i16> %max
488488
}
489489

490-
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
490+
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
491491
; CHECK-LABEL: umax_i32:
492492
; CHECK: // %bb.0:
493493
; CHECK-NEXT: ptrue p0.s
@@ -498,7 +498,7 @@ define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b
498498
ret <vscale x 4 x i32> %max
499499
}
500500

501-
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
501+
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
502502
; CHECK-LABEL: umax_i64:
503503
; CHECK: // %bb.0:
504504
; CHECK-NEXT: ptrue p0.d
@@ -509,7 +509,7 @@ define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
509509
ret <vscale x 2 x i64> %max
510510
}
511511

512-
define <vscale x 16 x i16> @umax_split_i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c) {
512+
define <vscale x 16 x i16> @umax_split_i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
513513
; CHECK-LABEL: umax_split_i16:
514514
; CHECK: // %bb.0:
515515
; CHECK-NEXT: ptrue p0.h
@@ -521,7 +521,7 @@ define <vscale x 16 x i16> @umax_split_i16(<vscale x 16 x i16> %a, <vscale x 16
521521
ret <vscale x 16 x i16> %max
522522
}
523523

524-
define <vscale x 2 x i32> @umax_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) {
524+
define <vscale x 2 x i32> @umax_promote_i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
525525
; CHECK-LABEL: umax_promote_i32:
526526
; CHECK: // %bb.0:
527527
; CHECK-NEXT: ptrue p0.d
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
; RUN: not --crash llc < %s -mtriple aarch64-linux-gnu -mattr=+sve >/dev/null 2>%t
2+
; RUN: FileCheck %s < %t
3+
4+
; CHECK: Passing consecutive scalable vector registers unsupported
5+
6+
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
7+
target triple = "aarch64-unknown-linux-gnu"
8+
9+
define float @foo(double* %x0, double* %x1) {
10+
entry:
11+
%0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
12+
%1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
13+
%2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
14+
%3 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
15+
%call = call float @callee(float 1.000000e+00, <vscale x 8 x double> %2, <vscale x 8 x double> %3)
16+
ret float %call
17+
}
18+
19+
declare float @callee(float, <vscale x 8 x double>, <vscale x 8 x double>)
20+
21+
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
22+
declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
23+
declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1>, double*)

0 commit comments

Comments
 (0)