@@ -2536,6 +2536,14 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2536
2536
if (!MemR || !isa<VPWidenLoadRecipe>(MemR) || !MemR->isReverse ())
2537
2537
continue ;
2538
2538
2539
+ auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2540
+ VPValue *Ptr = VecEndPtr->getPtr ();
2541
+ Value *PtrUV = Ptr->getUnderlyingValue ();
2542
+ // Memory cost model requires the pointer operand of memory access
2543
+ // instruction.
2544
+ if (!PtrUV)
2545
+ continue ;
2546
+
2539
2547
Instruction &Ingredient = MemR->getIngredient ();
2540
2548
Type *ElementTy = getLoadStoreType (&Ingredient);
2541
2549
@@ -2546,10 +2554,9 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2546
2554
return false ;
2547
2555
const InstructionCost CurrentCost = MemR->computeCost (VF, Ctx);
2548
2556
const InstructionCost StridedLoadStoreCost =
2549
- Ctx.TTI .getStridedMemoryOpCost (
2550
- Instruction::Load, DataTy,
2551
- getLoadStorePointerOperand (&Ingredient), MemR->isMasked (),
2552
- Alignment, Ctx.CostKind , &Ingredient);
2557
+ Ctx.TTI .getStridedMemoryOpCost (Instruction::Load, DataTy, PtrUV,
2558
+ MemR->isMasked (), Alignment,
2559
+ Ctx.CostKind , &Ingredient);
2553
2560
return StridedLoadStoreCost < CurrentCost;
2554
2561
};
2555
2562
@@ -2559,10 +2566,7 @@ void VPlanTransforms::convertToStridedAccesses(VPlan &Plan, VPCostContext &Ctx,
2559
2566
2560
2567
// The stride of consecutive reverse access must be -1.
2561
2568
int64_t Stride = -1 ;
2562
- auto *VecEndPtr = cast<VPVectorEndPointerRecipe>(MemR->getAddr ());
2563
- VPValue *Ptr = VecEndPtr->getPtr ();
2564
- auto *GEP = dyn_cast<GetElementPtrInst>(
2565
- Ptr->getUnderlyingValue ()->stripPointerCasts ());
2569
+ auto *GEP = dyn_cast<GetElementPtrInst>(PtrUV->stripPointerCasts ());
2566
2570
// Create a new vector pointer for strided access.
2567
2571
auto *NewPtr = new VPVectorPointerRecipe (Ptr, ElementTy, /* Stride=*/ true ,
2568
2572
GEP ? GEP->getNoWrapFlags ()
0 commit comments