@@ -1621,12 +1621,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1621
1621
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1622
1622
MachineMemOperand::MOVolatile;
1623
1623
return true;
1624
- case Intrinsic::riscv_masked_strided_load:
1625
- return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ false,
1626
- /*IsUnitStrided*/ false);
1627
- case Intrinsic::riscv_masked_strided_store:
1628
- return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ true,
1629
- /*IsUnitStrided*/ false);
1630
1624
case Intrinsic::riscv_seg2_load:
1631
1625
case Intrinsic::riscv_seg3_load:
1632
1626
case Intrinsic::riscv_seg4_load:
@@ -9401,81 +9395,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
9401
9395
switch (IntNo) {
9402
9396
default:
9403
9397
break;
9404
- case Intrinsic::riscv_masked_strided_load: {
9405
- SDLoc DL(Op);
9406
- MVT XLenVT = Subtarget.getXLenVT();
9407
-
9408
- // If the mask is known to be all ones, optimize to an unmasked intrinsic;
9409
- // the selection of the masked intrinsics doesn't do this for us.
9410
- SDValue Mask = Op.getOperand(5);
9411
- bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
9412
-
9413
- MVT VT = Op->getSimpleValueType(0);
9414
- MVT ContainerVT = VT;
9415
- if (VT.isFixedLengthVector())
9416
- ContainerVT = getContainerForFixedLengthVector(VT);
9417
-
9418
- SDValue PassThru = Op.getOperand(2);
9419
- if (!IsUnmasked) {
9420
- MVT MaskVT = getMaskTypeFor(ContainerVT);
9421
- if (VT.isFixedLengthVector()) {
9422
- Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
9423
- PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
9424
- }
9425
- }
9426
-
9427
- auto *Load = cast<MemIntrinsicSDNode>(Op);
9428
- SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
9429
- SDValue Ptr = Op.getOperand(3);
9430
- SDValue Stride = Op.getOperand(4);
9431
- SDValue Result, Chain;
9432
-
9433
- // TODO: We restrict this to unmasked loads currently in consideration of
9434
- // the complexity of handling all falses masks.
9435
- MVT ScalarVT = ContainerVT.getVectorElementType();
9436
- if (IsUnmasked && isNullConstant(Stride) && ContainerVT.isInteger()) {
9437
- SDValue ScalarLoad =
9438
- DAG.getExtLoad(ISD::EXTLOAD, DL, XLenVT, Load->getChain(), Ptr,
9439
- ScalarVT, Load->getMemOperand());
9440
- Chain = ScalarLoad.getValue(1);
9441
- Result = lowerScalarSplat(SDValue(), ScalarLoad, VL, ContainerVT, DL, DAG,
9442
- Subtarget);
9443
- } else if (IsUnmasked && isNullConstant(Stride) && isTypeLegal(ScalarVT)) {
9444
- SDValue ScalarLoad = DAG.getLoad(ScalarVT, DL, Load->getChain(), Ptr,
9445
- Load->getMemOperand());
9446
- Chain = ScalarLoad.getValue(1);
9447
- Result = DAG.getSplat(ContainerVT, DL, ScalarLoad);
9448
- } else {
9449
- SDValue IntID = DAG.getTargetConstant(
9450
- IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
9451
- XLenVT);
9452
-
9453
- SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
9454
- if (IsUnmasked)
9455
- Ops.push_back(DAG.getUNDEF(ContainerVT));
9456
- else
9457
- Ops.push_back(PassThru);
9458
- Ops.push_back(Ptr);
9459
- Ops.push_back(Stride);
9460
- if (!IsUnmasked)
9461
- Ops.push_back(Mask);
9462
- Ops.push_back(VL);
9463
- if (!IsUnmasked) {
9464
- SDValue Policy =
9465
- DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
9466
- Ops.push_back(Policy);
9467
- }
9468
-
9469
- SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
9470
- Result =
9471
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
9472
- Load->getMemoryVT(), Load->getMemOperand());
9473
- Chain = Result.getValue(1);
9474
- }
9475
- if (VT.isFixedLengthVector())
9476
- Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
9477
- return DAG.getMergeValues({Result, Chain}, DL);
9478
- }
9479
9398
case Intrinsic::riscv_seg2_load:
9480
9399
case Intrinsic::riscv_seg3_load:
9481
9400
case Intrinsic::riscv_seg4_load:
@@ -9555,47 +9474,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
9555
9474
switch (IntNo) {
9556
9475
default:
9557
9476
break;
9558
- case Intrinsic::riscv_masked_strided_store: {
9559
- SDLoc DL(Op);
9560
- MVT XLenVT = Subtarget.getXLenVT();
9561
-
9562
- // If the mask is known to be all ones, optimize to an unmasked intrinsic;
9563
- // the selection of the masked intrinsics doesn't do this for us.
9564
- SDValue Mask = Op.getOperand(5);
9565
- bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
9566
-
9567
- SDValue Val = Op.getOperand(2);
9568
- MVT VT = Val.getSimpleValueType();
9569
- MVT ContainerVT = VT;
9570
- if (VT.isFixedLengthVector()) {
9571
- ContainerVT = getContainerForFixedLengthVector(VT);
9572
- Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
9573
- }
9574
- if (!IsUnmasked) {
9575
- MVT MaskVT = getMaskTypeFor(ContainerVT);
9576
- if (VT.isFixedLengthVector())
9577
- Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
9578
- }
9579
-
9580
- SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
9581
-
9582
- SDValue IntID = DAG.getTargetConstant(
9583
- IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
9584
- XLenVT);
9585
-
9586
- auto *Store = cast<MemIntrinsicSDNode>(Op);
9587
- SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
9588
- Ops.push_back(Val);
9589
- Ops.push_back(Op.getOperand(3)); // Ptr
9590
- Ops.push_back(Op.getOperand(4)); // Stride
9591
- if (!IsUnmasked)
9592
- Ops.push_back(Mask);
9593
- Ops.push_back(VL);
9594
-
9595
- return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
9596
- Ops, Store->getMemoryVT(),
9597
- Store->getMemOperand());
9598
- }
9599
9477
case Intrinsic::riscv_seg2_store:
9600
9478
case Intrinsic::riscv_seg3_store:
9601
9479
case Intrinsic::riscv_seg4_store:
@@ -17509,43 +17387,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
17509
17387
// By default we do not combine any intrinsic.
17510
17388
default:
17511
17389
return SDValue();
17512
- case Intrinsic::riscv_masked_strided_load: {
17513
- MVT VT = N->getSimpleValueType(0);
17514
- auto *Load = cast<MemIntrinsicSDNode>(N);
17515
- SDValue PassThru = N->getOperand(2);
17516
- SDValue Base = N->getOperand(3);
17517
- SDValue Stride = N->getOperand(4);
17518
- SDValue Mask = N->getOperand(5);
17519
-
17520
- // If the stride is equal to the element size in bytes, we can use
17521
- // a masked.load.
17522
- const unsigned ElementSize = VT.getScalarStoreSize();
17523
- if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
17524
- StrideC && StrideC->getZExtValue() == ElementSize)
17525
- return DAG.getMaskedLoad(VT, DL, Load->getChain(), Base,
17526
- DAG.getUNDEF(XLenVT), Mask, PassThru,
17527
- Load->getMemoryVT(), Load->getMemOperand(),
17528
- ISD::UNINDEXED, ISD::NON_EXTLOAD);
17529
- return SDValue();
17530
- }
17531
- case Intrinsic::riscv_masked_strided_store: {
17532
- auto *Store = cast<MemIntrinsicSDNode>(N);
17533
- SDValue Value = N->getOperand(2);
17534
- SDValue Base = N->getOperand(3);
17535
- SDValue Stride = N->getOperand(4);
17536
- SDValue Mask = N->getOperand(5);
17537
-
17538
- // If the stride is equal to the element size in bytes, we can use
17539
- // a masked.store.
17540
- const unsigned ElementSize = Value.getValueType().getScalarStoreSize();
17541
- if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
17542
- StrideC && StrideC->getZExtValue() == ElementSize)
17543
- return DAG.getMaskedStore(Store->getChain(), DL, Value, Base,
17544
- DAG.getUNDEF(XLenVT), Mask,
17545
- Value.getValueType(), Store->getMemOperand(),
17546
- ISD::UNINDEXED, false);
17547
- return SDValue();
17548
- }
17549
17390
case Intrinsic::riscv_vcpop:
17550
17391
case Intrinsic::riscv_vcpop_mask:
17551
17392
case Intrinsic::riscv_vfirst:
0 commit comments