@@ -122,15 +122,15 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
122
122
// question (in this case rewind to p), or
123
123
// - just give up. It is up to caller to make sure the pointer is pointing
124
124
// to the base address the object.
125
- //
125
+ //
126
126
// We go for 2nd option for simplicity.
127
127
if (!isIdentifiedObject (V))
128
128
return false ;
129
129
130
130
// This function needs to use the aligned object size because we allow
131
131
// reads a bit past the end given sufficient alignment.
132
132
uint64_t ObjectSize = getObjectSize (V, TD, TLI, /* RoundToAlign*/ true );
133
-
133
+
134
134
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
135
135
}
136
136
@@ -163,7 +163,7 @@ namespace {
163
163
EK_SignExt,
164
164
EK_ZeroExt
165
165
};
166
-
166
+
167
167
struct VariableGEPIndex {
168
168
const Value *V;
169
169
ExtensionKind Extension;
@@ -200,7 +200,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
200
200
Offset = 0 ;
201
201
return V;
202
202
}
203
-
203
+
204
204
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
205
205
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand (1 ))) {
206
206
switch (BOp->getOpcode ()) {
@@ -231,7 +231,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
231
231
}
232
232
}
233
233
}
234
-
234
+
235
235
// Since GEP indices are sign extended anyway, we don't care about the high
236
236
// bits of a sign or zero extended value - just scales and offsets. The
237
237
// extensions have to be consistent though.
@@ -248,10 +248,10 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
248
248
TD, Depth+1 );
249
249
Scale = Scale.zext (OldWidth);
250
250
Offset = Offset.zext (OldWidth);
251
-
251
+
252
252
return Result;
253
253
}
254
-
254
+
255
255
Scale = 1 ;
256
256
Offset = 0 ;
257
257
return V;
@@ -276,7 +276,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
276
276
const DataLayout *TD) {
277
277
// Limit recursion depth to limit compile time in crazy cases.
278
278
unsigned MaxLookup = 6 ;
279
-
279
+
280
280
BaseOffs = 0 ;
281
281
do {
282
282
// See if this is a bitcast or GEP.
@@ -291,7 +291,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
291
291
}
292
292
return V;
293
293
}
294
-
294
+
295
295
if (Op->getOpcode () == Instruction::BitCast) {
296
296
V = Op->getOperand (0 );
297
297
continue ;
@@ -308,15 +308,15 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
308
308
V = Simplified;
309
309
continue ;
310
310
}
311
-
311
+
312
312
return V;
313
313
}
314
-
314
+
315
315
// Don't attempt to analyze GEPs over unsized objects.
316
316
if (!cast<PointerType>(GEPOp->getOperand (0 )->getType ())
317
317
->getElementType ()->isSized ())
318
318
return V;
319
-
319
+
320
320
// If we are lacking DataLayout information, we can't compute the offets of
321
321
// elements computed by GEPs. However, we can handle bitcast equivalent
322
322
// GEPs.
@@ -326,7 +326,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
326
326
V = GEPOp->getOperand (0 );
327
327
continue ;
328
328
}
329
-
329
+
330
330
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
331
331
gep_type_iterator GTI = gep_type_begin (GEPOp);
332
332
for (User::const_op_iterator I = GEPOp->op_begin ()+1 ,
@@ -337,38 +337,37 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
337
337
// For a struct, add the member offset.
338
338
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue ();
339
339
if (FieldNo == 0 ) continue ;
340
-
340
+
341
341
BaseOffs += TD->getStructLayout (STy)->getElementOffset (FieldNo);
342
342
continue ;
343
343
}
344
-
344
+
345
345
// For an array/pointer, add the element offset, explicitly scaled.
346
346
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
347
347
if (CIdx->isZero ()) continue ;
348
348
BaseOffs += TD->getTypeAllocSize (*GTI)*CIdx->getSExtValue ();
349
349
continue ;
350
350
}
351
-
351
+
352
352
uint64_t Scale = TD->getTypeAllocSize (*GTI);
353
353
ExtensionKind Extension = EK_NotExtended;
354
-
354
+
355
355
// If the integer type is smaller than the pointer size, it is implicitly
356
356
// sign extended to pointer size.
357
357
unsigned Width = cast<IntegerType>(Index->getType ())->getBitWidth ();
358
358
if (TD->getPointerSizeInBits () > Width)
359
359
Extension = EK_SignExt;
360
-
360
+
361
361
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
362
362
APInt IndexScale (Width, 0 ), IndexOffset (Width, 0 );
363
363
Index = GetLinearExpression (Index, IndexScale, IndexOffset, Extension,
364
364
*TD, 0 );
365
-
365
+
366
366
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
367
367
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
368
368
BaseOffs += IndexOffset.getSExtValue ()*Scale;
369
369
Scale *= IndexScale.getSExtValue ();
370
-
371
-
370
+
372
371
// If we already had an occurrence of this index variable, merge this
373
372
// scale into it. For example, we want to handle:
374
373
// A[x][x] -> x*16 + x*4 -> x*20
@@ -381,33 +380,33 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
381
380
break ;
382
381
}
383
382
}
384
-
383
+
385
384
// Make sure that we have a scale that makes sense for this target's
386
385
// pointer size.
387
386
if (unsigned ShiftBits = 64 -TD->getPointerSizeInBits ()) {
388
387
Scale <<= ShiftBits;
389
388
Scale = (int64_t )Scale >> ShiftBits;
390
389
}
391
-
390
+
392
391
if (Scale) {
393
392
VariableGEPIndex Entry = {Index, Extension,
394
393
static_cast <int64_t >(Scale)};
395
394
VarIndices.push_back (Entry);
396
395
}
397
396
}
398
-
397
+
399
398
// Analyze the base pointer next.
400
399
V = GEPOp->getOperand (0 );
401
400
} while (--MaxLookup);
402
-
401
+
403
402
// If the chain of expressions is too deep, just return early.
404
403
return V;
405
404
}
406
405
407
406
// / GetIndexDifference - Dest and Src are the variable indices from two
408
407
// / decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
409
408
// / pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
410
- // / difference between the two pointers.
409
+ // / difference between the two pointers.
411
410
static void GetIndexDifference (SmallVectorImpl<VariableGEPIndex> &Dest,
412
411
const SmallVectorImpl<VariableGEPIndex> &Src) {
413
412
if (Src.empty ()) return ;
@@ -416,12 +415,12 @@ static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
416
415
const Value *V = Src[i].V ;
417
416
ExtensionKind Extension = Src[i].Extension ;
418
417
int64_t Scale = Src[i].Scale ;
419
-
418
+
420
419
// Find V in Dest. This is N^2, but pointer indices almost never have more
421
420
// than a few variable indexes.
422
421
for (unsigned j = 0 , e = Dest.size (); j != e; ++j) {
423
422
if (Dest[j].V != V || Dest[j].Extension != Extension) continue ;
424
-
423
+
425
424
// If we found it, subtract off Scale V's from the entry in Dest. If it
426
425
// goes to zero, remove the entry.
427
426
if (Dest[j].Scale != Scale)
@@ -431,7 +430,7 @@ static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
431
430
Scale = 0 ;
432
431
break ;
433
432
}
434
-
433
+
435
434
// If we didn't consume this entry, add it to the end of the Dest list.
436
435
if (Scale) {
437
436
VariableGEPIndex Entry = { V, Extension, -Scale };
@@ -526,7 +525,7 @@ namespace {
526
525
return (AliasAnalysis*)this ;
527
526
return this ;
528
527
}
529
-
528
+
530
529
private:
531
530
// AliasCache - Track alias queries to guard against recursion.
532
531
typedef std::pair<Location, Location> LocPair;
@@ -696,7 +695,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
696
695
" AliasAnalysis query involving multiple functions!" );
697
696
698
697
const Value *Object = GetUnderlyingObject (Loc.Ptr , TD);
699
-
698
+
700
699
// If this is a tail call and Loc.Ptr points to a stack location, we know that
701
700
// the tail call cannot access or modify the local stack.
702
701
// We cannot exclude byval arguments here; these belong to the caller of
@@ -706,7 +705,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
706
705
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction ()))
707
706
if (CI->isTailCall ())
708
707
return NoModRef;
709
-
708
+
710
709
// If the pointer is to a locally allocated object that does not escape,
711
710
// then the call can not mod/ref the pointer unless the call takes the pointer
712
711
// as an argument, and itself doesn't capture it.
@@ -722,7 +721,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
722
721
if (!(*CI)->getType ()->isPointerTy () ||
723
722
(!CS.doesNotCapture (ArgNo) && !CS.isByValArgument (ArgNo)))
724
723
continue ;
725
-
724
+
726
725
// If this is a no-capture pointer argument, see if we can tell that it
727
726
// is impossible to alias the pointer we're checking. If not, we have to
728
727
// assume that the call could touch the pointer, even though it doesn't
@@ -732,7 +731,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
732
731
break ;
733
732
}
734
733
}
735
-
734
+
736
735
if (!PassedAsArg)
737
736
return NoModRef;
738
737
}
@@ -821,7 +820,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
821
820
}
822
821
823
822
// We can bound the aliasing properties of memset_pattern16 just as we can
824
- // for memcpy/memset. This is particularly important because the
823
+ // for memcpy/memset. This is particularly important because the
825
824
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
826
825
// whenever possible.
827
826
else if (TLI.has (LibFunc::memset_pattern16) &&
@@ -925,35 +924,35 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
925
924
GEP1VariableIndices.clear ();
926
925
}
927
926
}
928
-
927
+
929
928
// If we get a No or May, then return it immediately, no amount of analysis
930
929
// will improve this situation.
931
930
if (BaseAlias != MustAlias) return BaseAlias;
932
-
931
+
933
932
// Otherwise, we have a MustAlias. Since the base pointers alias each other
934
933
// exactly, see if the computed offset from the common pointer tells us
935
934
// about the relation of the resulting pointer.
936
935
const Value *GEP1BasePtr =
937
936
DecomposeGEPExpression (GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
938
-
937
+
939
938
int64_t GEP2BaseOffset;
940
939
SmallVector<VariableGEPIndex, 4 > GEP2VariableIndices;
941
940
const Value *GEP2BasePtr =
942
941
DecomposeGEPExpression (GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
943
-
942
+
944
943
// DecomposeGEPExpression and GetUnderlyingObject should return the
945
944
// same result except when DecomposeGEPExpression has no DataLayout.
946
945
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
947
946
assert (TD == 0 &&
948
947
" DecomposeGEPExpression and GetUnderlyingObject disagree!" );
949
948
return MayAlias;
950
949
}
951
-
950
+
952
951
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
953
952
// symbolic difference.
954
953
GEP1BaseOffset -= GEP2BaseOffset;
955
954
GetIndexDifference (GEP1VariableIndices, GEP2VariableIndices);
956
-
955
+
957
956
} else {
958
957
// Check to see if these two pointers are related by the getelementptr
959
958
// instruction. If one pointer is a GEP with a non-zero index of the other
@@ -975,7 +974,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
975
974
976
975
const Value *GEP1BasePtr =
977
976
DecomposeGEPExpression (GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
978
-
977
+
979
978
// DecomposeGEPExpression and GetUnderlyingObject should return the
980
979
// same result except when DecomposeGEPExpression has no DataLayout.
981
980
if (GEP1BasePtr != UnderlyingV1) {
@@ -984,7 +983,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
984
983
return MayAlias;
985
984
}
986
985
}
987
-
986
+
988
987
// In the two GEP Case, if there is no difference in the offsets of the
989
988
// computed pointers, the resultant pointers are a must alias. This
990
989
// hapens when we have two lexically identical GEP's (for example).
@@ -1226,7 +1225,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
1226
1225
if ((isa<ConstantPointerNull>(O2) && isKnownNonNull (O1)) ||
1227
1226
(isa<ConstantPointerNull>(O1) && isKnownNonNull (O2)))
1228
1227
return NoAlias;
1229
-
1228
+
1230
1229
// If one pointer is the result of a call/invoke or load and the other is a
1231
1230
// non-escaping local object within the same function, then we know the
1232
1231
// object couldn't escape to a point where the call could return it.
@@ -1248,7 +1247,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
1248
1247
if ((V1Size != UnknownSize && isObjectSmallerThan (O2, V1Size, *TD, *TLI)) ||
1249
1248
(V2Size != UnknownSize && isObjectSmallerThan (O1, V2Size, *TD, *TLI)))
1250
1249
return NoAlias;
1251
-
1250
+
1252
1251
// Check the cache before climbing up use-def chains. This also terminates
1253
1252
// otherwise infinitely recursive queries.
1254
1253
LocPair Locs (Location (V1, V1Size, V1TBAAInfo),
0 commit comments