@@ -8564,6 +8564,8 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
8564
8564
8565
8565
SDValue SITargetLowering::performMemSDNodeCombine (MemSDNode *N,
8566
8566
DAGCombinerInfo &DCI) const {
8567
+ // FIXME: getBasePtr does not work correctly for intrinsic nodes and will find
8568
+ // the intrinsic ID, not the pointer.
8567
8569
SDValue Ptr = N->getBasePtr ();
8568
8570
SelectionDAG &DAG = DCI.DAG ;
8569
8571
SDLoc SL (N);
@@ -10477,8 +10479,6 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
10477
10479
if (getTargetMachine ().getOptLevel () == CodeGenOpt::None)
10478
10480
return SDValue ();
10479
10481
switch (N->getOpcode ()) {
10480
- default :
10481
- return AMDGPUTargetLowering::PerformDAGCombine (N, DCI);
10482
10482
case ISD::ADD:
10483
10483
return performAddCombine (N, DCI);
10484
10484
case ISD::SUB:
@@ -10505,35 +10505,6 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
10505
10505
return performMinMaxCombine (N, DCI);
10506
10506
case ISD::FMA:
10507
10507
return performFMACombine (N, DCI);
10508
- case ISD::LOAD: {
10509
- if (SDValue Widended = widenLoad (cast<LoadSDNode>(N), DCI))
10510
- return Widended;
10511
- LLVM_FALLTHROUGH;
10512
- }
10513
- case ISD::STORE:
10514
- case ISD::ATOMIC_LOAD:
10515
- case ISD::ATOMIC_STORE:
10516
- case ISD::ATOMIC_CMP_SWAP:
10517
- case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
10518
- case ISD::ATOMIC_SWAP:
10519
- case ISD::ATOMIC_LOAD_ADD:
10520
- case ISD::ATOMIC_LOAD_SUB:
10521
- case ISD::ATOMIC_LOAD_AND:
10522
- case ISD::ATOMIC_LOAD_OR:
10523
- case ISD::ATOMIC_LOAD_XOR:
10524
- case ISD::ATOMIC_LOAD_NAND:
10525
- case ISD::ATOMIC_LOAD_MIN:
10526
- case ISD::ATOMIC_LOAD_MAX:
10527
- case ISD::ATOMIC_LOAD_UMIN:
10528
- case ISD::ATOMIC_LOAD_UMAX:
10529
- case ISD::ATOMIC_LOAD_FADD:
10530
- case AMDGPUISD::ATOMIC_INC:
10531
- case AMDGPUISD::ATOMIC_DEC:
10532
- case AMDGPUISD::ATOMIC_LOAD_FMIN:
10533
- case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
10534
- if (DCI.isBeforeLegalize ())
10535
- break ;
10536
- return performMemSDNodeCombine (cast<MemSDNode>(N), DCI);
10537
10508
case ISD::AND:
10538
10509
return performAndCombine (N, DCI);
10539
10510
case ISD::OR:
@@ -10598,7 +10569,21 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
10598
10569
return performExtractVectorEltCombine (N, DCI);
10599
10570
case ISD::INSERT_VECTOR_ELT:
10600
10571
return performInsertVectorEltCombine (N, DCI);
10572
+ case ISD::LOAD: {
10573
+ if (SDValue Widended = widenLoad (cast<LoadSDNode>(N), DCI))
10574
+ return Widended;
10575
+ LLVM_FALLTHROUGH;
10601
10576
}
10577
+ default : {
10578
+ if (!DCI.isBeforeLegalize ()) {
10579
+ if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N))
10580
+ return performMemSDNodeCombine (MemNode, DCI);
10581
+ }
10582
+
10583
+ break ;
10584
+ }
10585
+ }
10586
+
10602
10587
return AMDGPUTargetLowering::PerformDAGCombine (N, DCI);
10603
10588
}
10604
10589
0 commit comments