@@ -1124,6 +1124,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
1124
1124
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1125
1125
setOperationAction(ISD::SMAX, VT, Custom);
1126
1126
setOperationAction(ISD::SMIN, VT, Custom);
1127
+ setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
1127
1128
setOperationAction(ISD::SRA, VT, Custom);
1128
1129
setOperationAction(ISD::SRL, VT, Custom);
1129
1130
setOperationAction(ISD::STORE, VT, Custom);
@@ -7978,9 +7979,11 @@ SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
7978
7979
SDLoc dl(Op);
7979
7980
EVT VT = Op.getValueType();
7980
7981
EVT ElemVT = VT.getScalarType();
7981
-
7982
7982
SDValue SplatVal = Op.getOperand(0);
7983
7983
7984
+ if (useSVEForFixedLengthVectorVT(VT))
7985
+ return LowerToScalableOp(Op, DAG);
7986
+
7984
7987
// Extend input splat value where needed to fit into a GPR (32b or 64b only)
7985
7988
// FPRs don't have this restriction.
7986
7989
switch (ElemVT.getSimpleVT().SimpleTy) {
@@ -15485,6 +15488,15 @@ SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
15485
15488
// Create list of operands by converting existing ones to scalable types.
15486
15489
SmallVector<SDValue, 4> Ops;
15487
15490
for (const SDValue &V : Op->op_values()) {
15491
+ assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
15492
+
15493
+ // Pass through non-vector operands.
15494
+ if (!V.getValueType().isVector()) {
15495
+ Ops.push_back(V);
15496
+ continue;
15497
+ }
15498
+
15499
+ // "cast" fixed length vector to a scalable vector.
15488
15500
assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
15489
15501
"Only fixed length vectors are supported!");
15490
15502
Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
0 commit comments