@@ -411,6 +411,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
411
411
412
412
setOperationAction (ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
413
413
414
+ static unsigned IntegerVPOps[] = {
415
+ ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
416
+ ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR,
417
+ ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
418
+
414
419
if (!Subtarget.is64Bit ()) {
415
420
// We must custom-lower certain vXi64 operations on RV32 due to the vector
416
421
// element type being illegal.
@@ -496,6 +501,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
496
501
setOperationAction (ISD::VECREDUCE_UMAX, VT, Custom);
497
502
setOperationAction (ISD::VECREDUCE_UMIN, VT, Custom);
498
503
504
+ for (unsigned VPOpc : IntegerVPOps) {
505
+ setOperationAction (VPOpc, VT, Custom);
506
+ // RV64 must custom-legalize the i32 EVL parameter.
507
+ if (Subtarget.is64Bit ())
508
+ setOperationAction (VPOpc, MVT::i32 , Custom);
509
+ }
510
+
499
511
setOperationAction (ISD::MLOAD, VT, Custom);
500
512
setOperationAction (ISD::MSTORE, VT, Custom);
501
513
setOperationAction (ISD::MGATHER, VT, Custom);
@@ -695,6 +707,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
695
707
setOperationAction (ISD::VECREDUCE_SMIN, VT, Custom);
696
708
setOperationAction (ISD::VECREDUCE_UMAX, VT, Custom);
697
709
setOperationAction (ISD::VECREDUCE_UMIN, VT, Custom);
710
+
711
+ for (unsigned VPOpc : IntegerVPOps) {
712
+ setOperationAction (VPOpc, VT, Custom);
713
+ // RV64 must custom-legalize the i32 EVL parameter.
714
+ if (Subtarget.is64Bit ())
715
+ setOperationAction (VPOpc, MVT::i32 , Custom);
716
+ }
698
717
}
699
718
700
719
for (MVT VT : MVT::fp_fixedlen_vector_valuetypes ()) {
@@ -2367,6 +2386,32 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2367
2386
return lowerGET_ROUNDING (Op, DAG);
2368
2387
case ISD::SET_ROUNDING:
2369
2388
return lowerSET_ROUNDING (Op, DAG);
2389
+ case ISD::VP_ADD:
2390
+ return lowerVPOp (Op, DAG, RISCVISD::ADD_VL);
2391
+ case ISD::VP_SUB:
2392
+ return lowerVPOp (Op, DAG, RISCVISD::SUB_VL);
2393
+ case ISD::VP_MUL:
2394
+ return lowerVPOp (Op, DAG, RISCVISD::MUL_VL);
2395
+ case ISD::VP_SDIV:
2396
+ return lowerVPOp (Op, DAG, RISCVISD::SDIV_VL);
2397
+ case ISD::VP_UDIV:
2398
+ return lowerVPOp (Op, DAG, RISCVISD::UDIV_VL);
2399
+ case ISD::VP_SREM:
2400
+ return lowerVPOp (Op, DAG, RISCVISD::SREM_VL);
2401
+ case ISD::VP_UREM:
2402
+ return lowerVPOp (Op, DAG, RISCVISD::UREM_VL);
2403
+ case ISD::VP_AND:
2404
+ return lowerVPOp (Op, DAG, RISCVISD::AND_VL);
2405
+ case ISD::VP_OR:
2406
+ return lowerVPOp (Op, DAG, RISCVISD::OR_VL);
2407
+ case ISD::VP_XOR:
2408
+ return lowerVPOp (Op, DAG, RISCVISD::XOR_VL);
2409
+ case ISD::VP_ASHR:
2410
+ return lowerVPOp (Op, DAG, RISCVISD::SRA_VL);
2411
+ case ISD::VP_LSHR:
2412
+ return lowerVPOp (Op, DAG, RISCVISD::SRL_VL);
2413
+ case ISD::VP_SHL:
2414
+ return lowerVPOp (Op, DAG, RISCVISD::SHL_VL);
2370
2415
}
2371
2416
}
2372
2417
@@ -2828,12 +2873,18 @@ SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2828
2873
// legal equivalently-sized i8 type, so we can use that as a go-between.
2829
2874
SDValue RISCVTargetLowering::lowerVectorMaskSplat (SDValue Op,
2830
2875
SelectionDAG &DAG) const {
2831
- SDValue SplatVal = Op.getOperand (0 );
2832
- // All-zeros or all-ones splats are handled specially.
2833
- if (isa<ConstantSDNode>(SplatVal))
2834
- return Op;
2835
2876
SDLoc DL (Op);
2836
2877
MVT VT = Op.getSimpleValueType ();
2878
+ SDValue SplatVal = Op.getOperand (0 );
2879
+ // All-zeros or all-ones splats are handled specially.
2880
+ if (ISD::isConstantSplatVectorAllOnes (Op.getNode ())) {
2881
+ SDValue VL = getDefaultScalableVLOps (VT, DL, DAG, Subtarget).second ;
2882
+ return DAG.getNode (RISCVISD::VMSET_VL, DL, VT, VL);
2883
+ }
2884
+ if (ISD::isConstantSplatVectorAllZeros (Op.getNode ())) {
2885
+ SDValue VL = getDefaultScalableVLOps (VT, DL, DAG, Subtarget).second ;
2886
+ return DAG.getNode (RISCVISD::VMCLR_VL, DL, VT, VL);
2887
+ }
2837
2888
MVT XLenVT = Subtarget.getXLenVT ();
2838
2889
assert (SplatVal.getValueType () == XLenVT &&
2839
2890
" Unexpected type for i1 splat value" );
@@ -4215,6 +4266,50 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4215
4266
return convertFromScalableVector (VT, ScalableRes, DAG, Subtarget);
4216
4267
}
4217
4268
4269
+ // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4270
+ // * Operands of each node are assumed to be in the same order.
4271
+ // * The EVL operand is promoted from i32 to i64 on RV64.
4272
+ // * Fixed-length vectors are converted to their scalable-vector container
4273
+ // types.
4274
+ SDValue RISCVTargetLowering::lowerVPOp (SDValue Op, SelectionDAG &DAG,
4275
+ unsigned RISCVISDOpc) const {
4276
+ SDLoc DL (Op);
4277
+ MVT VT = Op.getSimpleValueType ();
4278
+ Optional<unsigned > EVLIdx = ISD::getVPExplicitVectorLengthIdx (Op.getOpcode ());
4279
+
4280
+ SmallVector<SDValue, 4 > Ops;
4281
+ MVT XLenVT = Subtarget.getXLenVT ();
4282
+
4283
+ for (const auto &OpIdx : enumerate(Op->ops ())) {
4284
+ SDValue V = OpIdx.value ();
4285
+ if ((unsigned )OpIdx.index () == EVLIdx) {
4286
+ Ops.push_back (DAG.getZExtOrTrunc (V, DL, XLenVT));
4287
+ continue ;
4288
+ }
4289
+ assert (!isa<VTSDNode>(V) && " Unexpected VTSDNode node!" );
4290
+ // Pass through operands which aren't fixed-length vectors.
4291
+ if (!V.getValueType ().isFixedLengthVector ()) {
4292
+ Ops.push_back (V);
4293
+ continue ;
4294
+ }
4295
+ // "cast" fixed length vector to a scalable vector.
4296
+ MVT OpVT = V.getSimpleValueType ();
4297
+ MVT ContainerVT = getContainerForFixedLengthVector (OpVT);
4298
+ assert (useRVVForFixedLengthVectorVT (OpVT) &&
4299
+ " Only fixed length vectors are supported!" );
4300
+ Ops.push_back (convertToScalableVector (ContainerVT, V, DAG, Subtarget));
4301
+ }
4302
+
4303
+ if (!VT.isFixedLengthVector ())
4304
+ return DAG.getNode (RISCVISDOpc, DL, VT, Ops);
4305
+
4306
+ MVT ContainerVT = getContainerForFixedLengthVector (VT);
4307
+
4308
+ SDValue VPOp = DAG.getNode (RISCVISDOpc, DL, ContainerVT, Ops);
4309
+
4310
+ return convertFromScalableVector (VT, VPOp, DAG, Subtarget);
4311
+ }
4312
+
4218
4313
// Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4219
4314
// a RVV indexed load. The RVV indexed load instructions only support the
4220
4315
// "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
0 commit comments