@@ -2544,6 +2544,14 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2544
2544
if (!MemR || !isa<VPWidenLoadRecipe>(MemR) || !MemR->isReverse ())
2545
2545
continue ;
2546
2546
2547
+ auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2548
+ VPValue *Ptr = VecEndPtr->getPtr ();
2549
+ Value *PtrUV = Ptr->getUnderlyingValue ();
2550
+ // Memory cost model requires the pointer operand of memory access
2551
+ // instruction.
2552
+ if (!PtrUV)
2553
+ continue ;
2554
+
2547
2555
Instruction &Ingredient = MemR->getIngredient ();
2548
2556
Type *ElementTy = getLoadStoreType (&Ingredient);
2549
2557
@@ -2554,10 +2562,9 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2554
2562
return false ;
2555
2563
const InstructionCost CurrentCost = MemR->computeCost (VF, Ctx);
2556
2564
const InstructionCost StridedLoadStoreCost =
2557
- Ctx.TTI .getStridedMemoryOpCost (
2558
- Instruction::Load, DataTy,
2559
- getLoadStorePointerOperand (&Ingredient), MemR->isMasked (),
2560
- Alignment, Ctx.CostKind , &Ingredient);
2565
+ Ctx.TTI .getStridedMemoryOpCost (Instruction::Load, DataTy, PtrUV,
2566
+ MemR->isMasked (), Alignment,
2567
+ Ctx.CostKind , &Ingredient);
2561
2568
return StridedLoadStoreCost < CurrentCost;
2562
2569
};
2563
2570
@@ -2567,10 +2574,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2567
2574
2568
2575
// The stride of consecutive reverse access must be -1.
2569
2576
int64_t Stride = -1 ;
2570
- auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2571
- VPValue *Ptr = VecEndPtr->getPtr ();
2572
- auto *GEP = dyn_cast<GetElementPtrInst>(
2573
- Ptr->getUnderlyingValue ()->stripPointerCasts ());
2577
+ auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts ());
2574
2578
// Create a new vector pointer for strided access.
2575
2579
auto *NewPtr = new VPVectorPointerRecipe (Ptr, ElementTy, /* Stride=*/ true ,
2576
2580
GEP ? GEP->getNoWrapFlags ()
0 commit comments