|
15 | 15 | #include "PPC.h"
|
16 | 16 | #include "PPCRegisterInfo.h"
|
17 | 17 | #include "PPCTargetMachine.h"
|
| 18 | +#include "llvm/CodeGen/Analysis.h" |
18 | 19 | #include "llvm/CodeGen/MachineFunction.h"
|
19 | 20 | #include "llvm/CodeGen/MachineScheduler.h"
|
20 | 21 | #include "llvm/IR/Attributes.h"
|
@@ -146,18 +147,21 @@ void PPCSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
|
146 | 147 | IsLittleEndian = (TargetTriple.getArch() == Triple::ppc64le);
|
147 | 148 | }
|
148 | 149 |
|
149 |
| -/// hasLazyResolverStub - Return true if accesses to the specified global have |
150 |
| -/// to go through a dyld lazy resolution stub. This means that an extra load |
151 |
| -/// is required to get the address of the global. |
| 150 | +/// Return true if accesses to the specified global have to go through a dyld |
| 151 | +/// lazy resolution stub. This means that an extra load is required to get the |
| 152 | +/// address of the global. |
152 | 153 | bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV) const {
|
153 |
| - // We never have stubs if HasLazyResolverStubs=false or if in static mode. |
154 |
| - if (!HasLazyResolverStubs || TM.getRelocationModel() == Reloc::Static) |
| 154 | + if (!HasLazyResolverStubs) |
155 | 155 | return false;
|
156 |
| - bool isDecl = GV->isDeclaration(); |
157 |
| - if (GV->hasHiddenVisibility() && !isDecl && !GV->hasCommonLinkage()) |
158 |
| - return false; |
159 |
| - return GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() || |
160 |
| - GV->hasCommonLinkage() || isDecl; |
| 156 | + if (!shouldAssumeDSOLocal(TM.getRelocationModel(), TM.getTargetTriple(), |
| 157 | + *GV->getParent(), GV)) |
| 158 | + return true; |
| 159 | + // 32 bit macho has no relocation for a-b if a is undefined, even if b is in |
| 160 | + // the section that is being relocated. This means we have to use o load even |
| 161 | + // for GVs that are known to be local to the dso. |
| 162 | + if (GV->isDeclarationForLinker() || GV->hasCommonLinkage()) |
| 163 | + return true; |
| 164 | + return false; |
161 | 165 | }
|
162 | 166 |
|
163 | 167 | // Embedded cores need aggressive scheduling (and some others also benefit).
|
|
0 commit comments