/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
/*
* amd64 machine dependent and ELF file class dependent functions.
* Contains routines for performing function binding and symbol relocations.
*/
#include <stdio.h>
#include <sys/elf_amd64.h>
#include <dlfcn.h>
#include <synch.h>
#include <string.h>
#include <debug.h>
#include <reloc.h>
#include <conv.h>
#include "_rtld.h"
#include "_audit.h"
#include "_elf.h"
#include "_inline_gen.h"
#include "_inline_reloc.h"
#include "msg.h"
int
{
/*
* Check machine type and flags.
*/
return (0);
}
return (1);
}
void
{
/*
* There is no need to analyze ld.so because we don't map in any of
* its dependencies. However we may map these dependencies in later
* (as if ld.so had dlopened them), so initialize the plt and the
* permission information.
*/
}
/* 0x00 */ 0x55, /* pushq %rbp */
/* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */
/* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */
/* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */
0x00, 0x00, 0x00,
/* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */
/* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
/* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */
/* 0x20 */
};
/*
* And the virutal outstanding relocations against the
* above block are:
*
* reloc offset Addend symbol
* R_AMD64_PC32 0x0b -4 trace_fields
* R_AMD64_64 0x15 0 elf_plt_trace
*/
/*
* the dynamic plt entry is:
*
* pushq %rbp
* movq %rsp, %rbp
* subq $0x10, %rsp
* leaq trace_fields(%rip), %r11
* movq %r11, -0x8(%rbp)
* movq $elf_plt_trace, %r11
* jmp *%r11
* dyn_data:
* .align 8
* uintptr_t reflmp
* uintptr_t deflmp
* uint_t symndx
* uint_t sb_flags
* Sym symdef
*/
static caddr_t
{
extern int elf_plt_trace();
/*
* We only need to add the glue code if there is an auditing
* library that is interested in this binding.
*/
(pltndx * dyn_plt_ent_size));
/*
* Have we initialized this dynamic plt entry yet? If we haven't do it
* now. Otherwise this function has been called before, but from a
* different plt (ie. from another shared object). In that case
* we just set the plt to point to the new dyn_plt.
*/
if (*dyn_plt == 0) {
sizeof (dyn_plt_template));
/*
* relocate:
* leaq trace_fields(%rip), %r11
* R_AMD64_PC32 0x0b -4 trace_fields
*/
*fail = 1;
return (0);
}
/*
* relocating:
* movq $elf_plt_trace, %r11
* R_AMD64_64 0x15 0 elf_plt_trace
*/
*fail = 1;
return (0);
}
dyndata++;
}
}
/*
* Function binding routine - invoked on the first call to a function through
* the procedure linkage table;
* passes first through an assembly language interface.
*
* Takes the offset into the relocation table of the associated
* relocation entry and the address of the link map (rt_private_map struct)
* for the entry.
*
* Returns the address of the function referenced after re-writing the PLT
* entry to invoke the function directly.
*
* On error, causes process to terminate with a signal.
*/
{
char *name;
/*
* For compatibility with libthread (TI_VERSION 1) we track the entry
* value. A zero value indicates we have recursed into ld.so.1 to
* further process a locking request. Under this recursion we disable
* tsort and cleanup activities.
*/
}
/*
* Perform some basic sanity checks. If we didn't get a load map or
* the relocation offset is invalid then its possible someone has walked
* over the .got entries or jumped to plt0 out of the blue.
*/
}
/*
* Use relocation entry to get symbol table entry and symbol name.
*/
/*
* Determine the last link-map of this list, this'll be the starting
* point for any tsort() processing.
*/
/*
* Find definition for symbol. Initialize the symbol lookup, and
* symbol result, data structures.
*/
}
/*
* Record that this new link map is now bound to the caller.
*/
}
&sb_flags);
}
if (!(rtld_flags & RT_FL_NOBIND)) {
int fail = 0;
&fail);
if (fail)
} else {
/*
* Write standard PLT entry to jump directly
* to newly bound function.
*/
}
}
/*
* Print binding information and rebuild PLT entry.
*/
/*
* Complete any processing for newly loaded objects. Note we don't
* know exactly where any new objects are loaded (we know the object
* that supplied the symbol, but others may have been loaded lazily as
* we searched for the symbol), so sorting starts from the last
* link-map know on entry to this routine.
*/
if (entry)
/*
* Some operations like dldump() or dlopen()'ing a relocatable object
* result in objects being loaded on rtld's link-map, make sure these
* objects are initialized also.
*/
/*
* Make sure the object to which we've bound has had it's .init fired.
* Cleanup before return to user code.
*/
if (entry) {
}
if (lmflags & LML_FLG_RTLDLM)
return (symval);
}
/*
* Read and process the relocations for one link object, we assume all
* relocation sections for loadable segments are stored contiguously in
* the file.
*/
int
{
/*
* Although only necessary for lazy binding, initialize the first
* global offset entry to go to elf_rtbndr(). dbx(1) seems
* to find this useful.
*/
/*
* Make sure the segment is writable.
*/
if ((((mpp =
return (0);
}
/*
* Initialize the plt start and end addresses.
*/
/*
* If we've been called upon to promote an RTLD_LAZY object to an
* RTLD_NOW then we're only interested in scaning the .plt table.
* An uninitialized .plt is the case where the associated got entry
* points back to the plt itself. Determine the range of the real .plt
* entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
*/
if (plt) {
return (1);
/*
* Initialize the symbol lookup, and symbol result, data
* structures.
*/
return (1);
} else {
/*
* The relocation sections appear to the run-time linker as a
* single table. Determine the address of the beginning and end
* of this table. There are two different interpretations of
* the ABI at this point:
*
* o The REL table and its associated RELSZ indicate the
* concatenation of *all* relocation sections (this is the
* model our link-editor constructs).
*
* o The REL table and its associated RELSZ indicate the
* concatenation of all *but* the .plt relocations. These
* relocations are specified individually by the JMPREL and
* PLTRELSZ entries.
*
* Determine from our knowledege of the relocation range and
* .plt range, the range of the total relocation table. Note
* that one other ABI assumption seems to be that the .plt
* relocations always follow any other relocations, the
* following range checking drops that assumption.
*/
if (pltbgn) {
}
}
return (1);
}
/*
* If we're processing a dynamic executable in lazy mode there is no
* need to scan the .rel.plt table, however if we're processing a shared
* object in lazy mode the .got addresses associated to each .plt must
* be relocated to reflect the location of the shared object.
*/
noplt = 1;
/*
* Loop through relocations.
*/
/*
* If this is a RELATIVE relocation in a shared object (the
* common case), and if we are not debugging, then jump into a
* tighter relocation loop (elf_reloc_relative).
*/
if ((rtype == R_AMD64_RELATIVE) &&
if (relacount) {
textrel, 0);
relacount = 0;
} else {
}
break;
}
/*
* If this is a shared object, add the base address to offset.
*/
/*
* If we're processing lazy bindings, we have to step
* through the plt entries and add the base address
* to the corresponding got entry.
*/
(rtype == R_AMD64_JUMP_SLOT) &&
plthint = 0;
continue;
}
}
/*
* Optimizations.
*/
if (rtype == R_AMD64_NONE)
continue;
continue;
}
/*
* If we're promoting plts, determine if this one has already
* been written.
*/
continue;
/*
* If this relocation is not against part of the image
* mapped into memory we skip it.
*/
rsymndx);
continue;
}
binfo = 0;
/*
* If a symbol index is specified then get the symbol table
* entry, locate the symbol definition, and determine its
* address.
*/
if (rsymndx) {
/*
* If a Syminfo section is provided, determine if this
* symbol is deferred, and if so, skip this relocation.
*/
continue;
/*
* Get the local symbol table entry.
*/
/*
* If this is a local symbol, just use the base address.
* (we should have no local relocations in the
* executable).
*/
/*
* Special case TLS relocations.
*/
if (rtype == R_AMD64_DTPMOD64) {
/*
* Use the TLS modid.
*/
} else if ((rtype == R_AMD64_TPOFF64) ||
(rtype == R_AMD64_TPOFF32)) {
ret = 0;
break;
}
}
} else {
/*
* If the symbol index is equal to the previous
* symbol index relocation we processed then
* reuse the previous values. (Note that there
* have been cases where a relocation exists
* against a copy relocation symbol, our ld(1)
* should optimize this away, but make sure we
* don't use the same symbol information should
* this case exist).
*/
(rtype != R_AMD64_COPY)) {
/* LINTED */
if (psymdef == 0) {
continue;
}
/* LINTED */
/* LINTED */
/* LINTED */
/* LINTED */
/* LINTED */
/* LINTED */
/* LINTED */
&sb_flags);
}
} else {
/*
* Lookup the symbol definition.
* Initialize the symbol lookup, and
* symbol result, data structure.
*/
in_nfavl)) {
}
/*
* If the symbol is not found and the
* reference was not to a weak symbol,
* report an error. Weak references
* may be unresolved.
*/
/* BEGIN CSTYLED */
if (symdef == 0) {
continue;
ret = 0;
break;
} else {
psymdef = 0;
continue;
}
}
/* END CSTYLED */
/*
* If symbol was found in an object
* other than the referencing object
* then record the binding.
*/
FL1_RT_NOINIFIN) == 0)) {
AL_CNT_RELBIND) == 0) {
ret = 0;
break;
}
}
/*
* Calculate the location of definition;
* symbol value plus base address of
* containing shared object.
*/
else
STT_TLS))
/*
* Retain this symbol index and the
* value in case it can be used for the
* subsequent relocations.
*/
if (rtype != R_AMD64_COPY) {
}
&sb_flags);
}
}
/*
* If relocation is PC-relative, subtract
* offset address.
*/
if (IS_PC_RELATIVE(rtype))
/*
* Special case TLS relocations.
*/
if (rtype == R_AMD64_DTPMOD64) {
/*
* Relocation value is the TLS modid.
*/
} else if ((rtype == R_AMD64_TPOFF64) ||
(rtype == R_AMD64_TPOFF32)) {
value)) == 0) {
ret = 0;
break;
}
}
}
} else {
/*
* Special cases.
*/
if (rtype == R_AMD64_DTPMOD64) {
/*
* TLS relocation value is the TLS modid.
*/
} else
}
/*
* Make sure the segment is writable.
*/
ret = 0;
break;
}
/*
* Call relocation routine to perform required relocation.
*/
switch (rtype) {
case R_AMD64_COPY:
ret = 0;
break;
case R_AMD64_JUMP_SLOT:
int fail = 0;
if (fail)
ret = 0;
} else {
/*
* Write standard PLT entry to jump directly
* to newly bound function.
*/
}
break;
default:
/*
* Write the relocation out.
*/
ret = 0;
}
if ((ret == 0) &&
break;
if (binfo) {
}
}
}
/*
* Initialize the first few got entries so that function calls go to
* elf_rtbndr:
*
* GOT[GOT_XLINKMAP] = the address of the link map
* GOT[GOT_XRTLD] = the address of rtbinder
*/
void
{
/* LINTED */
}
/*
* Plt writing interface to allow debugging initialization to be generic.
*/
/* ARGSUSED1 */
{
DBG_CALL(pltcntfull++);
return (PLT_T_FULL);
}
/*
* Provide a machine specific interface to the conversion routine. By calling
* the machine specific version, rather than the generic version, we insure that
* ld.so.1.
*/
const char *
{
}