@@ -249,15 +249,15 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
249
249
Type *LoadTy = Load->getType ();
250
250
unsigned AS = Load->getPointerAddressSpace ();
251
251
InstructionCost OldCost =
252
- TTI.getMemoryOpCost (Instruction::Load, LoadTy, Alignment, AS);
252
+ TTI.getMemoryOpCost (Instruction::Load, LoadTy, Alignment, AS, CostKind );
253
253
APInt DemandedElts = APInt::getOneBitSet (MinVecNumElts, 0 );
254
254
OldCost +=
255
255
TTI.getScalarizationOverhead (MinVecTy, DemandedElts,
256
256
/* Insert */ true , HasExtract, CostKind);
257
257
258
258
// New pattern: load VecPtr
259
259
InstructionCost NewCost =
260
- TTI.getMemoryOpCost (Instruction::Load, MinVecTy, Alignment, AS);
260
+ TTI.getMemoryOpCost (Instruction::Load, MinVecTy, Alignment, AS, CostKind );
261
261
// Optionally, we are shuffling the loaded vector element(s) into place.
262
262
// For the mask set everything but element 0 to undef to prevent poison from
263
263
// propagating from the extra loaded memory. This will also optionally
@@ -1341,7 +1341,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
1341
1341
1342
1342
InstructionCost OriginalCost =
1343
1343
TTI.getMemoryOpCost (Instruction::Load, VecTy, LI->getAlign (),
1344
- LI->getPointerAddressSpace ());
1344
+ LI->getPointerAddressSpace (), CostKind );
1345
1345
InstructionCost ScalarizedCost = 0 ;
1346
1346
1347
1347
Instruction *LastCheckedInst = LI;
@@ -1389,7 +1389,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
1389
1389
Index ? Index->getZExtValue () : -1 );
1390
1390
ScalarizedCost +=
1391
1391
TTI.getMemoryOpCost (Instruction::Load, VecTy->getElementType (),
1392
- Align (1 ), LI->getPointerAddressSpace ());
1392
+ Align (1 ), LI->getPointerAddressSpace (), CostKind );
1393
1393
ScalarizedCost += TTI.getAddressComputationCost (VecTy->getElementType ());
1394
1394
}
1395
1395
0 commit comments