diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 0d708b58d5c8..9ba4c040b7d7 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -199,7 +199,7 @@ void InputSectionBase::relocate(uint8_t *Buf, uint8_t *BufEnd, SymVA = Out::Got->getEntryAddr(Body); if (Body.isTls()) Type = Target->getTlsGotReloc(); - } else if (!Target->relocNeedsCopy(Type, Body) && + } else if (!Target->needsCopyRel(Type, Body) && isa>(Body)) { continue; } else if (Target->isTlsDynReloc(Type) || diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp index bb5e0e16111e..723da267e6ff 100644 --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -258,7 +258,7 @@ template void RelocationSection::writeTo(uint8_t *Buf) { uint32_t Type = RI.getType(Config->Mips64EL); if (applyTlsDynamicReloc(Body, Type, P, reinterpret_cast(Buf))) continue; - bool NeedsCopy = Body && Target->relocNeedsCopy(Type, *Body); + bool NeedsCopy = Body && Target->needsCopyRel(Type, *Body); bool NeedsGot = Body && Target->relocNeedsGot(Type, *Body); bool CanBePreempted = canBePreempted(Body, NeedsGot); bool LazyReloc = Body && Target->supportsLazyRelocations() && diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp index e5568ddfabf9..ab2d80864e16 100644 --- a/lld/ELF/Target.cpp +++ b/lld/ELF/Target.cpp @@ -83,7 +83,7 @@ public: void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr, uint64_t PltEntryAddr, int32_t Index, unsigned RelOff) const override; - bool relocNeedsCopy(uint32_t Type, const SymbolBody &S) const override; + bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override; void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, @@ -117,7 +117,7 @@ public: void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr, uint64_t PltEntryAddr, int32_t Index, unsigned RelOff) const override; - bool relocNeedsCopy(uint32_t Type, const SymbolBody &S) const override; + bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override; void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, @@ -169,7 +169,7 @@ public: void writePltEntry(uint8_t *Buf, uint64_t GotAddr, uint64_t GotEntryAddr, uint64_t PltEntryAddr, int32_t Index, unsigned RelOff) const override; - bool relocNeedsCopy(uint32_t Type, const SymbolBody &S) const override; + bool needsCopyRel(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override; bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override; void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, @@ -226,7 +226,7 @@ bool TargetInfo::isTlsOptimized(unsigned Type, const SymbolBody *S) const { uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; } -bool TargetInfo::relocNeedsCopy(uint32_t Type, const SymbolBody &S) const { +bool TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { return false; } @@ -327,7 +327,7 @@ void X86TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr, write32le(Buf + 12, -Index * PltEntrySize - PltZeroEntrySize - 16); } -bool X86TargetInfo::relocNeedsCopy(uint32_t Type, const SymbolBody &S) const { +bool X86TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { if (Type == R_386_32 || Type == R_386_16 || Type == R_386_8) if (auto *SS = dyn_cast>(&S)) return SS->Sym.getType() == STT_OBJECT; @@ -555,8 +555,7 @@ void X86_64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr, write32le(Buf + 12, -Index * PltEntrySize - PltZeroEntrySize - 16); } -bool X86_64TargetInfo::relocNeedsCopy(uint32_t Type, - const SymbolBody &S) const { +bool X86_64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { if (Type == R_X86_64_32S || Type == R_X86_64_32 || Type == R_X86_64_PC32 || Type == R_X86_64_64) if (auto *SS = dyn_cast>(&S)) @@ -583,7 +582,7 @@ unsigned X86_64TargetInfo::getPltRefReloc(unsigned Type) const { } bool X86_64TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const { - if (relocNeedsCopy(Type, S)) + if (needsCopyRel(Type, S)) return false; switch (Type) { @@ -1122,8 +1121,7 @@ void AArch64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotAddr, GotEntryAddr); } -bool AArch64TargetInfo::relocNeedsCopy(uint32_t Type, - const SymbolBody &S) const { +bool AArch64TargetInfo::needsCopyRel(uint32_t Type, const SymbolBody &S) const { if (Config->Shared) return false; switch (Type) { diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h index 816a2c127572..6bf23db825ec 100644 --- a/lld/ELF/Target.h +++ b/lld/ELF/Target.h @@ -54,13 +54,13 @@ public: int32_t Index, unsigned RelOff) const = 0; virtual bool isRelRelative(uint32_t Type) const; virtual bool isSizeDynReloc(uint32_t Type, const SymbolBody &S) const; - virtual bool relocNeedsCopy(uint32_t Type, const SymbolBody &S) const; virtual bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const = 0; virtual bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const = 0; virtual void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, uint64_t SA, uint64_t ZA = 0, uint8_t *PairedLoc = nullptr) const = 0; virtual bool isTlsOptimized(unsigned Type, const SymbolBody *S) const; + virtual bool needsCopyRel(uint32_t Type, const SymbolBody &S) const; virtual unsigned relocateTlsOptimize(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, uint64_t SA, const SymbolBody &S) const; diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp index cae1f20278d1..522eb6261c12 100644 --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -245,7 +245,7 @@ void Writer::scanRelocs( if (auto *E = dyn_cast>(Body)) { if (E->NeedsCopy) continue; - if (Target->relocNeedsCopy(Type, *Body)) + if (Target->needsCopyRel(Type, *Body)) E->NeedsCopy = true; } NeedsPlt = Target->relocNeedsPlt(Type, *Body);