@@ -357,7 +357,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
357
357
addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
358
358
}
359
359
360
- if (Subtarget->hasNEON ()) {
360
+ if (Subtarget->isNeonAvailable ()) {
361
361
addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
362
362
addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
363
363
// Someone set us up the NEON.
@@ -378,6 +378,28 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
378
378
addQRTypeForNEON(MVT::v2i64);
379
379
addQRTypeForNEON(MVT::v8f16);
380
380
addQRTypeForNEON(MVT::v8bf16);
381
+ } else if (Subtarget->hasNEON() ||
382
+ Subtarget->useSVEForFixedLengthVectors()) {
383
+ addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass);
384
+ addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass);
385
+
386
+ addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
387
+ addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
388
+ addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
389
+ addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
390
+ addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
391
+ addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
392
+ addRegisterClass(MVT::v4f16, &AArch64::FPR64RegClass);
393
+ addRegisterClass(MVT::v4bf16, &AArch64::FPR64RegClass);
394
+
395
+ addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
396
+ addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
397
+ addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
398
+ addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
399
+ addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
400
+ addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
401
+ addRegisterClass(MVT::v8f16, &AArch64::FPR128RegClass);
402
+ addRegisterClass(MVT::v8bf16, &AArch64::FPR128RegClass);
381
403
}
382
404
383
405
if (Subtarget->hasSVEorSME()) {
@@ -1125,7 +1147,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1125
1147
1126
1148
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1127
1149
1128
- if (Subtarget->hasNEON ()) {
1150
+ if (Subtarget->isNeonAvailable ()) {
1129
1151
// FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to
1130
1152
// silliness like this:
1131
1153
for (auto Op :
@@ -1328,6 +1350,24 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
1328
1350
// FADDP custom lowering
1329
1351
for (MVT VT : { MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1330
1352
setOperationAction(ISD::FADD, VT, Custom);
1353
+ } else {
1354
+ for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1355
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
1356
+ setOperationAction(Op, VT, Expand);
1357
+
1358
+ if (VT.is128BitVector() || VT.is64BitVector()) {
1359
+ setOperationAction(ISD::LOAD, VT, Legal);
1360
+ setOperationAction(ISD::STORE, VT, Legal);
1361
+ setOperationAction(ISD::BITCAST, VT,
1362
+ Subtarget->isLittleEndian() ? Legal : Expand);
1363
+ }
1364
+ for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
1365
+ setTruncStoreAction(VT, InnerVT, Expand);
1366
+ setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
1367
+ setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
1368
+ setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
1369
+ }
1370
+ }
1331
1371
}
1332
1372
1333
1373
if (Subtarget->hasSME()) {
@@ -9377,7 +9417,8 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
9377
9417
9378
9418
SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
9379
9419
SelectionDAG &DAG) const {
9380
- if (!Subtarget->hasNEON())
9420
+ if (!Subtarget->isNeonAvailable() &&
9421
+ !Subtarget->useSVEForFixedLengthVectors())
9381
9422
return SDValue();
9382
9423
9383
9424
EVT VT = Op.getValueType();
@@ -14110,6 +14151,13 @@ SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
14110
14151
return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
14111
14152
}
14112
14153
14154
+ bool AArch64TargetLowering::shouldExpandBuildVectorWithShuffles(
14155
+ EVT VT, unsigned DefinedValues) const {
14156
+ if (!Subtarget->isNeonAvailable())
14157
+ return false;
14158
+ return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
14159
+ }
14160
+
14113
14161
bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
14114
14162
// Currently no fixed length shuffles that require SVE are legal.
14115
14163
if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
0 commit comments