Skip to content

Commit 170e7a0

Browse files
committed
[AArch64][SME2] Add CodeGen support for target("aarch64.svcount").
This patch adds AArch64 CodeGen support such that the type can be passed and returned to/from functions, and also adds support to use this type in load/store operations and PHI nodes. Reviewed By: paulwalker-arm Differential Revision: https://reviews.llvm.org/D136862
1 parent b4d9ac8 commit 170e7a0

20 files changed

+318
-34
lines changed

llvm/include/llvm/CodeGen/ValueTypes.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ namespace llvm {
122122
/// Test if the given EVT has zero size, this will fail if called on a
123123
/// scalable type
124124
bool isZeroSized() const {
125-
return !isScalableVector() && getSizeInBits() == 0;
125+
return getSizeInBits().isZero();
126126
}
127127

128128
/// Test if the given EVT is simple (as opposed to being extended).
@@ -150,6 +150,12 @@ namespace llvm {
150150
return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
151151
}
152152

153+
/// Return true if this is a vector type where the runtime
154+
/// length is machine dependent
155+
bool isScalableTargetExtVT() const {
156+
return isSimple() && V.isScalableTargetExtVT();
157+
}
158+
153159
/// Return true if this is a vector value type.
154160
bool isVector() const {
155161
return isSimple() ? V.isVector() : isExtendedVector();
@@ -166,6 +172,11 @@ namespace llvm {
166172
: isExtendedFixedLengthVector();
167173
}
168174

175+
/// Return true if the type is a scalable type.
176+
bool isScalableVT() const {
177+
return isScalableVector() || isScalableTargetExtVT();
178+
}
179+
169180
/// Return true if this is a 16-bit vector type.
170181
bool is16BitVector() const {
171182
return isSimple() ? V.is16BitVector() : isExtended16BitVector();

llvm/include/llvm/CodeGen/ValueTypes.td

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,8 @@ def funcref : ValueType<0, 192>; // WebAssembly's funcref type
236236
def externref : ValueType<0, 193>; // WebAssembly's externref type
237237
def x86amx : ValueType<8192, 194>; // X86 AMX value
238238
def i64x8 : ValueType<512, 195>; // 8 Consecutive GPRs (AArch64)
239+
def aarch64svcount
240+
: ValueType<16, 196>; // AArch64 predicate-as-counter
239241

240242
def token : ValueType<0, 248>; // TokenTy
241243
def MetadataVT : ValueType<0, 249>; // Metadata

llvm/include/llvm/IR/Type.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,15 @@ class Type {
206206
/// Return true if this is a target extension type.
207207
bool isTargetExtTy() const { return getTypeID() == TargetExtTyID; }
208208

209+
/// Return true if this is a target extension type with a scalable layout.
210+
bool isScalableTargetExtTy() const;
211+
212+
/// Return true if this is a scalable vector type or a target extension type
213+
/// with a scalable layout.
214+
bool isScalableTy() const {
215+
return getTypeID() == ScalableVectorTyID || isScalableTargetExtTy();
216+
}
217+
209218
/// Return true if this is a FP type or a vector of FP.
210219
bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
211220

llvm/include/llvm/Support/MachineValueType.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -291,9 +291,10 @@ namespace llvm {
291291
externref = 193, // WebAssembly's externref type
292292
x86amx = 194, // This is an X86 AMX value
293293
i64x8 = 195, // 8 Consecutive GPRs (AArch64)
294+
aarch64svcount = 196, // AArch64 predicate-as-counter
294295

295296
FIRST_VALUETYPE = 1, // This is always the beginning of the list.
296-
LAST_VALUETYPE = i64x8, // This always remains at the end of the list.
297+
LAST_VALUETYPE = aarch64svcount, // This always remains at the end of the list.
297298
VALUETYPE_SIZE = LAST_VALUETYPE + 1,
298299

299300
// This is the current maximum for LAST_VALUETYPE.
@@ -401,6 +402,16 @@ namespace llvm {
401402
SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
402403
}
403404

405+
/// Return true if this is a custom target type that has a scalable size.
406+
bool isScalableTargetExtVT() const {
407+
return SimpleTy == MVT::aarch64svcount;
408+
}
409+
410+
/// Return true if the type is a scalable type.
411+
bool isScalableVT() const {
412+
return isScalableVector() || isScalableTargetExtVT();
413+
}
414+
404415
bool isFixedLengthVector() const {
405416
return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
406417
SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
@@ -962,6 +973,7 @@ namespace llvm {
962973
case v2i8:
963974
case v1i16:
964975
case v1f16: return TypeSize::Fixed(16);
976+
case aarch64svcount:
965977
case nxv16i1:
966978
case nxv2i8:
967979
case nxv1i16:

llvm/lib/Analysis/Loads.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ bool llvm::isDereferenceableAndAlignedPointer(
204204
const TargetLibraryInfo *TLI) {
205205
// For unsized types or scalable vectors we don't know exactly how many bytes
206206
// are dereferenced, so bail out.
207-
if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
207+
if (!Ty->isSized() || Ty->isScalableTy())
208208
return false;
209209

210210
// When dereferenceability information is provided by a dereferenceable

llvm/lib/CodeGen/CodeGenPrepare.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7696,7 +7696,7 @@ static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
76967696
// whereas scalable vectors would have to be shifted by
76977697
// <2log(vscale) + number of bits> in order to store the
76987698
// low/high parts. Bailing out for now.
7699-
if (isa<ScalableVectorType>(StoreType))
7699+
if (StoreType->isScalableTy())
77007700
return false;
77017701

77027702
if (!DL.typeSizeEqualsStoreSize(StoreType) ||

llvm/lib/CodeGen/LowLevelType.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
3131
return LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
3232
}
3333

34-
if (Ty.isSized()) {
34+
if (Ty.isSized() && !Ty.isScalableTargetExtTy()) {
3535
// Aggregates are no different from real scalars as far as GlobalISel is
3636
// concerned.
3737
auto SizeInBits = DL.getTypeSizeInBits(&Ty);

llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17769,8 +17769,8 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
1776917769
// 2. The store is scalable and the load is fixed width. We could
1777017770
// potentially support a limited number of cases here, but there has been
1777117771
// no cost-benefit analysis to prove it's worth it.
17772-
bool LdStScalable = LDMemType.isScalableVector();
17773-
if (LdStScalable != STMemType.isScalableVector())
17772+
bool LdStScalable = LDMemType.isScalableVT();
17773+
if (LdStScalable != STMemType.isScalableVT())
1777417774
return SDValue();
1777517775

1777617776
// If we are dealing with scalable vectors on a big endian platform the
@@ -19925,7 +19925,7 @@ bool DAGCombiner::mergeConsecutiveStores(StoreSDNode *St) {
1992519925
// store since we know <vscale x 16 x i8> is exactly twice as large as
1992619926
// <vscale x 8 x i8>). Until then, bail out for scalable vectors.
1992719927
EVT MemVT = St->getMemoryVT();
19928-
if (MemVT.isScalableVector())
19928+
if (MemVT.isScalableVT())
1992919929
return false;
1993019930
if (!MemVT.isSimple() || MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
1993119931
return false;
@@ -26807,7 +26807,7 @@ bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
2680726807
// BaseIndexOffset assumes that offsets are fixed-size, which
2680826808
// is not valid for scalable vectors where the offsets are
2680926809
// scaled by `vscale`, so bail out early.
26810-
if (St->getMemoryVT().isScalableVector())
26810+
if (St->getMemoryVT().isScalableVT())
2681126811
return false;
2681226812

2681326813
// Add ST's interval.

llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,6 @@ getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
496496
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
497497
CallConv);
498498

499-
unsigned PartBits = PartVT.getSizeInBits();
500499
unsigned OrigNumParts = NumParts;
501500
assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
502501
"Copying to an illegal type!");
@@ -512,6 +511,7 @@ getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
512511
return;
513512
}
514513

514+
unsigned PartBits = PartVT.getSizeInBits();
515515
if (NumParts * PartBits > ValueVT.getSizeInBits()) {
516516
// If the parts cover more bits than the value has, promote the value.
517517
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {

llvm/lib/CodeGen/ValueTypes.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,8 @@ std::string EVT::getEVTString() const {
174174
case MVT::Untyped: return "Untyped";
175175
case MVT::funcref: return "funcref";
176176
case MVT::externref: return "externref";
177+
case MVT::aarch64svcount:
178+
return "aarch64svcount";
177179
}
178180
}
179181

@@ -210,6 +212,8 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
210212
case MVT::f128: return Type::getFP128Ty(Context);
211213
case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
212214
case MVT::x86mmx: return Type::getX86_MMXTy(Context);
215+
case MVT::aarch64svcount:
216+
return TargetExtType::get(Context, "aarch64.svcount");
213217
case MVT::x86amx: return Type::getX86_AMXTy(Context);
214218
case MVT::i64x8: return IntegerType::get(Context, 512);
215219
case MVT::externref: return Type::getWasm_ExternrefTy(Context);
@@ -579,6 +583,12 @@ MVT MVT::getVT(Type *Ty, bool HandleUnknown){
579583
case Type::DoubleTyID: return MVT(MVT::f64);
580584
case Type::X86_FP80TyID: return MVT(MVT::f80);
581585
case Type::X86_MMXTyID: return MVT(MVT::x86mmx);
586+
case Type::TargetExtTyID:
587+
if (cast<TargetExtType>(Ty)->getName() == "aarch64.svcount")
588+
return MVT(MVT::aarch64svcount);
589+
if (HandleUnknown)
590+
return MVT(MVT::Other);
591+
llvm_unreachable("Unknown target ext type!");
582592
case Type::X86_AMXTyID: return MVT(MVT::x86amx);
583593
case Type::FP128TyID: return MVT(MVT::f128);
584594
case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
@@ -590,8 +600,6 @@ MVT MVT::getVT(Type *Ty, bool HandleUnknown){
590600
getVT(VTy->getElementType(), /*HandleUnknown=*/ false),
591601
VTy->getElementCount());
592602
}
593-
case Type::TargetExtTyID:
594-
return MVT(MVT::Other);
595603
}
596604
}
597605

llvm/lib/IR/Type.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,12 @@ bool Type::isIEEE() const {
8080
return APFloat::getZero(getFltSemantics()).isIEEE();
8181
}
8282

83+
bool Type::isScalableTargetExtTy() const {
84+
if (auto *TT = dyn_cast<TargetExtType>(this))
85+
return isa<ScalableVectorType>(TT->getLayoutType());
86+
return false;
87+
}
88+
8389
Type *Type::getFloatingPointTy(LLVMContext &C, const fltSemantics &S) {
8490
Type *Ty;
8591
if (&S == &APFloat::IEEEhalf())

llvm/lib/Support/LowLevelType.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ LLT::LLT(MVT VT) {
2121
init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
2222
VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
2323
/*AddressSpace=*/0);
24-
} else if (VT.isValid()) {
24+
} else if (VT.isValid() && !VT.isScalableTargetExtVT()) {
2525
// Aggregates are no different from real scalars as far as GlobalISel is
2626
// concerned.
2727
init(/*IsPointer=*/false, /*IsVector=*/false, /*IsScalar=*/true,

llvm/lib/Target/AArch64/AArch64CallingConvention.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,9 @@ def CC_AArch64_AAPCS : CallingConv<[
8282
nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
8383
CCPassIndirect<i64>>,
8484

85-
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1],
85+
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
8686
CCAssignToReg<[P0, P1, P2, P3]>>,
87-
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1],
87+
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
8888
CCPassIndirect<i64>>,
8989

9090
// Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
@@ -149,7 +149,7 @@ def RetCC_AArch64_AAPCS : CallingConv<[
149149
nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
150150
CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
151151

152-
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1],
152+
CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
153153
CCAssignToReg<[P0, P1, P2, P3]>>
154154
]>;
155155

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 35 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
415415
}
416416
}
417417

418+
if (Subtarget->hasSVE2p1() || Subtarget->hasSME2()) {
419+
addRegisterClass(MVT::aarch64svcount, &AArch64::PPRRegClass);
420+
setOperationPromotedToType(ISD::LOAD, MVT::aarch64svcount, MVT::nxv16i1);
421+
setOperationPromotedToType(ISD::STORE, MVT::aarch64svcount, MVT::nxv16i1);
422+
423+
setOperationAction(ISD::SELECT, MVT::aarch64svcount, Custom);
424+
setOperationAction(ISD::SELECT_CC, MVT::aarch64svcount, Expand);
425+
}
426+
418427
// Compute derived properties from the register classes
419428
computeRegisterProperties(Subtarget->getRegisterInfo());
420429

@@ -6429,6 +6438,9 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
64296438
RegVT.getVectorElementType() == MVT::i1) {
64306439
FuncInfo->setIsSVECC(true);
64316440
RC = &AArch64::PPRRegClass;
6441+
} else if (RegVT == MVT::aarch64svcount) {
6442+
FuncInfo->setIsSVECC(true);
6443+
RC = &AArch64::PPRRegClass;
64326444
} else if (RegVT.isScalableVector()) {
64336445
FuncInfo->setIsSVECC(true);
64346446
RC = &AArch64::ZPRRegClass;
@@ -6463,9 +6475,9 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
64636475
case CCValAssign::Full:
64646476
break;
64656477
case CCValAssign::Indirect:
6466-
assert((VA.getValVT().isScalableVector() ||
6467-
Subtarget->isWindowsArm64EC()) &&
6468-
"Indirect arguments should be scalable on most subtargets");
6478+
assert(
6479+
(VA.getValVT().isScalableVT() || Subtarget->isWindowsArm64EC()) &&
6480+
"Indirect arguments should be scalable on most subtargets");
64696481
break;
64706482
case CCValAssign::BCvt:
64716483
ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
@@ -6544,9 +6556,9 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
65446556
}
65456557

65466558
if (VA.getLocInfo() == CCValAssign::Indirect) {
6547-
assert(
6548-
(VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) &&
6549-
"Indirect arguments should be scalable on most subtargets");
6559+
assert((VA.getValVT().isScalableVT() ||
6560+
Subtarget->isWindowsArm64EC()) &&
6561+
"Indirect arguments should be scalable on most subtargets");
65506562

65516563
uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinValue();
65526564
unsigned NumParts = 1;
@@ -7399,7 +7411,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
73997411
Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
74007412
break;
74017413
case CCValAssign::Indirect:
7402-
bool isScalable = VA.getValVT().isScalableVector();
7414+
bool isScalable = VA.getValVT().isScalableVT();
74037415
assert((isScalable || Subtarget->isWindowsArm64EC()) &&
74047416
"Indirect arguments should be scalable on most subtargets");
74057417

@@ -9288,10 +9300,17 @@ SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
92889300
SDLoc DL(Op);
92899301

92909302
EVT Ty = Op.getValueType();
9303+
if (Ty == MVT::aarch64svcount) {
9304+
TVal = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i1, TVal);
9305+
FVal = DAG.getNode(ISD::BITCAST, DL, MVT::nxv16i1, FVal);
9306+
SDValue Sel =
9307+
DAG.getNode(ISD::SELECT, DL, MVT::nxv16i1, CCVal, TVal, FVal);
9308+
return DAG.getNode(ISD::BITCAST, DL, Ty, Sel);
9309+
}
9310+
92919311
if (Ty.isScalableVector()) {
9292-
SDValue TruncCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, CCVal);
92939312
MVT PredVT = MVT::getVectorVT(MVT::i1, Ty.getVectorElementCount());
9294-
SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, TruncCC);
9313+
SDValue SplatPred = DAG.getNode(ISD::SPLAT_VECTOR, DL, PredVT, CCVal);
92959314
return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
92969315
}
92979316

@@ -14876,6 +14895,9 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
1487614895
return false;
1487714896

1487814897
// FIXME: Update this method to support scalable addressing modes.
14898+
if (Ty->isScalableTargetExtTy())
14899+
return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale;
14900+
1487914901
if (isa<ScalableVectorType>(Ty)) {
1488014902
uint64_t VecElemNumBytes =
1488114903
DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
@@ -20835,7 +20857,7 @@ static SDValue performSelectCombine(SDNode *N,
2083520857
if (N0.getOpcode() != ISD::SETCC)
2083620858
return SDValue();
2083720859

20838-
if (ResVT.isScalableVector())
20860+
if (ResVT.isScalableVT())
2083920861
return SDValue();
2084020862

2084120863
// Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered
@@ -23224,15 +23246,15 @@ bool AArch64TargetLowering::shouldLocalize(
2322423246
}
2322523247

2322623248
bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
23227-
if (isa<ScalableVectorType>(Inst.getType()))
23249+
if (Inst.getType()->isScalableTy())
2322823250
return true;
2322923251

2323023252
for (unsigned i = 0; i < Inst.getNumOperands(); ++i)
23231-
if (isa<ScalableVectorType>(Inst.getOperand(i)->getType()))
23253+
if (Inst.getOperand(i)->getType()->isScalableTy())
2323223254
return true;
2323323255

2323423256
if (const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
23235-
if (isa<ScalableVectorType>(AI->getAllocatedType()))
23257+
if (AI->getAllocatedType()->isScalableTy())
2323623258
return true;
2323723259
}
2323823260

llvm/lib/Target/AArch64/AArch64RegisterInfo.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -891,7 +891,7 @@ class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
891891
// SVE predicate register classes.
892892
class PPRClass<int firstreg, int lastreg> : RegisterClass<
893893
"AArch64",
894-
[ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16,
894+
[ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1, aarch64svcount ], 16,
895895
(sequence "P%u", firstreg, lastreg)> {
896896
let Size = 16;
897897
}

llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2512,6 +2512,9 @@ let Predicates = [HasSVEorSME] in {
25122512
def : Pat<(nxv8f16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
25132513
def : Pat<(nxv4f32 (bitconvert (nxv8bf16 ZPR:$src))), (nxv4f32 ZPR:$src)>;
25142514
def : Pat<(nxv2f64 (bitconvert (nxv8bf16 ZPR:$src))), (nxv2f64 ZPR:$src)>;
2515+
2516+
def : Pat<(nxv16i1 (bitconvert (aarch64svcount PPR:$src))), (nxv16i1 PPR:$src)>;
2517+
def : Pat<(aarch64svcount (bitconvert (nxv16i1 PPR:$src))), (aarch64svcount PPR:$src)>;
25152518
}
25162519

25172520
// These allow casting from/to unpacked predicate types.

0 commit comments

Comments
 (0)