Skip to content

Commit b752542

Browse files
committed
[SCEVExpander] Remove GEP add rec splitting code (NFCI)
I don't believe this is relevant anymore with opaque pointers, where we always expand the entire offset, without splitting it into parts.
1 parent ab73bd3 commit b752542

File tree

2 files changed

+3
-82
lines changed

2 files changed

+3
-82
lines changed

llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -440,8 +440,6 @@ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
440440

441441
/// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
442442
/// ptrtoint+arithmetic+inttoptr.
443-
Value *expandAddToGEP(const SCEV *const *op_begin, const SCEV *const *op_end,
444-
Type *Ty, Value *V);
445443
Value *expandAddToGEP(const SCEV *Op, Type *Ty, Value *V);
446444

447445
/// Find a previous Value in ExprValueMap for expand.

llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp

Lines changed: 3 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -286,69 +286,6 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
286286
return BO;
287287
}
288288

289-
/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
290-
/// is the number of SCEVAddRecExprs present, which are kept at the end of
291-
/// the list.
292-
///
293-
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
294-
Type *Ty,
295-
ScalarEvolution &SE) {
296-
unsigned NumAddRecs = 0;
297-
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
298-
++NumAddRecs;
299-
// Group Ops into non-addrecs and addrecs.
300-
SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
301-
SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
302-
// Let ScalarEvolution sort and simplify the non-addrecs list.
303-
const SCEV *Sum = NoAddRecs.empty() ?
304-
SE.getConstant(Ty, 0) :
305-
SE.getAddExpr(NoAddRecs);
306-
// If it returned an add, use the operands. Otherwise it simplified
307-
// the sum into a single value, so just use that.
308-
Ops.clear();
309-
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
310-
append_range(Ops, Add->operands());
311-
else if (!Sum->isZero())
312-
Ops.push_back(Sum);
313-
// Then append the addrecs.
314-
Ops.append(AddRecs.begin(), AddRecs.end());
315-
}
316-
317-
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
318-
/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
319-
/// This helps expose more opportunities for folding parts of the expressions
320-
/// into GEP indices.
321-
///
322-
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
323-
Type *Ty,
324-
ScalarEvolution &SE) {
325-
// Find the addrecs.
326-
SmallVector<const SCEV *, 8> AddRecs;
327-
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
328-
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
329-
const SCEV *Start = A->getStart();
330-
if (Start->isZero()) break;
331-
const SCEV *Zero = SE.getConstant(Ty, 0);
332-
AddRecs.push_back(SE.getAddRecExpr(Zero,
333-
A->getStepRecurrence(SE),
334-
A->getLoop(),
335-
A->getNoWrapFlags(SCEV::FlagNW)));
336-
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
337-
Ops[i] = Zero;
338-
append_range(Ops, Add->operands());
339-
e += Add->getNumOperands();
340-
} else {
341-
Ops[i] = Start;
342-
}
343-
}
344-
if (!AddRecs.empty()) {
345-
// Add the addrecs onto the end of the list.
346-
Ops.append(AddRecs.begin(), AddRecs.end());
347-
// Resort the operand list, moving any constants to the front.
348-
SimplifyAddOperands(Ops, Ty, SE);
349-
}
350-
}
351-
352289
/// expandAddToGEP - Expand an addition expression with a pointer type into
353290
/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
354291
/// BasicAliasAnalysis and other passes analyze the result. See the rules
@@ -376,20 +313,11 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
376313
/// loop-invariant portions of expressions, after considering what
377314
/// can be folded using target addressing modes.
378315
///
379-
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
380-
const SCEV *const *op_end, Type *Ty,
381-
Value *V) {
382-
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
383-
384-
// Split AddRecs up into parts as either of the parts may be usable
385-
// without the other.
386-
SplitAddRecs(Ops, Ty, SE);
387-
316+
Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Type *Ty, Value *V) {
388317
assert(!isa<Instruction>(V) ||
389318
SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
390319

391-
// Expand the operands for a plain byte offset.
392-
Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty);
320+
Value *Idx = expandCodeForImpl(Offset, Ty);
393321

394322
// Fold a GEP with constant operands.
395323
if (Constant *CLHS = dyn_cast<Constant>(V))
@@ -434,11 +362,6 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
434362
return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "scevgep");
435363
}
436364

437-
Value *SCEVExpander::expandAddToGEP(const SCEV *Op, Type *Ty, Value *V) {
438-
const SCEV *const Ops[1] = {Op};
439-
return expandAddToGEP(Ops, Ops + 1, Ty, V);
440-
}
441-
442365
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
443366
/// SCEV expansion. If they are nested, this is the most nested. If they are
444367
/// neighboring, pick the later.
@@ -575,7 +498,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
575498
X = SE.getSCEV(U->getValue());
576499
NewOps.push_back(X);
577500
}
578-
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), Ty, Sum);
501+
Sum = expandAddToGEP(SE.getAddExpr(NewOps), Ty, Sum);
579502
} else if (Op->isNonConstantNegative()) {
580503
// Instead of doing a negate and add, just do a subtract.
581504
Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty);

0 commit comments

Comments
 (0)