@@ -2038,7 +2038,8 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
2038
2038
return false;
2039
2039
}
2040
2040
2041
- bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT, unsigned EltSize) const {
2041
+ bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(
2042
+ EVT VT, EVT PtrVT, unsigned EltSize) const {
2042
2043
if (!Subtarget->hasSVE2())
2043
2044
return true;
2044
2045
@@ -2047,7 +2048,7 @@ bool AArch64TargetLowering::shouldExpandGetAliasLaneMask(EVT VT, EVT PtrVT, unsi
2047
2048
2048
2049
if (VT == MVT::v2i1 || VT == MVT::nxv2i1)
2049
2050
return EltSize != 8;
2050
- if( VT == MVT::v4i1 || VT == MVT::nxv4i1)
2051
+ if ( VT == MVT::v4i1 || VT == MVT::nxv4i1)
2051
2052
return EltSize != 4;
2052
2053
if (VT == MVT::v8i1 || VT == MVT::nxv8i1)
2053
2054
return EltSize != 2;
@@ -6040,12 +6041,14 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
6040
6041
case Intrinsic::aarch64_sve_whilewr_h:
6041
6042
case Intrinsic::aarch64_sve_whilewr_s:
6042
6043
case Intrinsic::aarch64_sve_whilewr_d:
6043
- return DAG.getNode(AArch64ISD::WHILEWR, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2));
6044
+ return DAG.getNode(AArch64ISD::WHILEWR, dl, Op.getValueType(),
6045
+ Op.getOperand(1), Op.getOperand(2));
6044
6046
case Intrinsic::aarch64_sve_whilerw_b:
6045
6047
case Intrinsic::aarch64_sve_whilerw_h:
6046
6048
case Intrinsic::aarch64_sve_whilerw_s:
6047
6049
case Intrinsic::aarch64_sve_whilerw_d:
6048
- return DAG.getNode(AArch64ISD::WHILERW, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2));
6050
+ return DAG.getNode(AArch64ISD::WHILERW, dl, Op.getValueType(),
6051
+ Op.getOperand(1), Op.getOperand(2));
6049
6052
case Intrinsic::aarch64_neon_abs: {
6050
6053
EVT Ty = Op.getValueType();
6051
6054
if (Ty == MVT::i64) {
@@ -6512,34 +6515,38 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
6512
6515
uint64_t EltSize = Op.getOperand(3)->getAsZExtVal();
6513
6516
bool IsWriteAfterRead = Op.getOperand(4)->getAsZExtVal() == 1;
6514
6517
switch (EltSize) {
6515
- case 1:
6516
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b : Intrinsic::aarch64_sve_whilerw_b;
6517
- break;
6518
- case 2:
6519
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h : Intrinsic::aarch64_sve_whilerw_h;
6520
- break;
6521
- case 4:
6522
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s : Intrinsic::aarch64_sve_whilerw_s;
6523
- break;
6524
- case 8:
6525
- IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d : Intrinsic::aarch64_sve_whilerw_d;
6526
- break;
6527
- default:
6528
- llvm_unreachable("Unexpected element size for get.alias.lane.mask");
6529
- break;
6518
+ case 1:
6519
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_b
6520
+ : Intrinsic::aarch64_sve_whilerw_b;
6521
+ break;
6522
+ case 2:
6523
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_h
6524
+ : Intrinsic::aarch64_sve_whilerw_h;
6525
+ break;
6526
+ case 4:
6527
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_s
6528
+ : Intrinsic::aarch64_sve_whilerw_s;
6529
+ break;
6530
+ case 8:
6531
+ IntrinsicID = IsWriteAfterRead ? Intrinsic::aarch64_sve_whilewr_d
6532
+ : Intrinsic::aarch64_sve_whilerw_d;
6533
+ break;
6534
+ default:
6535
+ llvm_unreachable("Unexpected element size for get.alias.lane.mask");
6536
+ break;
6530
6537
}
6531
6538
}
6532
- SDValue ID =
6533
- DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
6539
+ SDValue ID = DAG.getTargetConstant(IntrinsicID, dl, MVT::i64);
6534
6540
6535
6541
EVT VT = Op.getValueType();
6536
6542
if (VT.isScalableVector())
6537
6543
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, ID, Op.getOperand(1),
6538
6544
Op.getOperand(2));
6539
6545
6540
- // We can use the SVE whilelo/whilewr/whilerw instruction to lower this intrinsic by
6541
- // creating the appropriate sequence of scalable vector operations and
6542
- // then extracting a fixed-width subvector from the scalable vector.
6546
+ // We can use the SVE whilelo/whilewr/whilerw instruction to lower this
6547
+ // intrinsic by creating the appropriate sequence of scalable vector
6548
+ // operations and then extracting a fixed-width subvector from the scalable
6549
+ // vector.
6543
6550
6544
6551
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
6545
6552
EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
@@ -19927,7 +19934,8 @@ static bool isPredicateCCSettingOp(SDValue N) {
19927
19934
// get_active_lane_mask is lowered to a whilelo instruction.
19928
19935
N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask ||
19929
19936
// get_alias_lane_mask is lowered to a whilewr/rw instruction.
19930
- N.getConstantOperandVal(0) == Intrinsic::experimental_get_alias_lane_mask)))
19937
+ N.getConstantOperandVal(0) ==
19938
+ Intrinsic::experimental_get_alias_lane_mask)))
19931
19939
return true;
19932
19940
19933
19941
return false;
0 commit comments