elf.c revision 41072f3cdbe3949252c173e744eb182ef4cc525f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 1988 AT&T
* All Rights Reserved
*
*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Object file dependent support for ELF objects.
*/
#include "_synonyms.h"
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <dlfcn.h>
#include "conv.h"
#include "_rtld.h"
#include "_audit.h"
#include "_elf.h"
#include "msg.h"
#include "debug.h"
/*
* Default and secure dependency search paths.
*/
static Pnode elf_dflt_dirs[] = {
#if defined(_ELF64)
#ifndef SGS_PRE_UNIFIED_PROCESS
#endif
LA_SER_DEFAULT, 0, 0 }
#else
#ifndef SGS_PRE_UNIFIED_PROCESS
#endif
LA_SER_DEFAULT, 0, 0 }
#endif
};
static Pnode elf_secure_dirs[] = {
#if defined(_ELF64)
#ifndef SGS_PRE_UNIFIED_PROCESS
#endif
{ MSG_ORIG(MSG_PTH_USRLIBSE_64), 0,
LA_SER_SECURE, 0, 0 }
#else
#ifndef SGS_PRE_UNIFIED_PROCESS
#endif
LA_SER_SECURE, 0, 0 }
#endif
};
/*
* Defines for local functions.
*/
static ulong_t elf_entry_pt(void);
static char *elf_get_so(const char *, const char *);
static void elf_unmap_so(Rt_map *);
/*
* Functions and data accessed through indirect pointers.
*/
};
/*
* Redefine NEEDED name if necessary.
*/
static Pnode *
{
/*
* For ABI compliance, if we are asked for ld.so.1, then really give
*/
if (((*name == '/') &&
#if defined(_ELF64)
#else
#endif
if (pnp)
return (0);
}
return (pnp);
}
}
/*
* Determine if we have been given an ELF file and if so determine if the file
* is compatible. Returns 1 if true, else 0 and sets the reject descriptor
* with associated error information.
*/
static int
{
/*
* Determine if we're an elf file. If not simply return, we don't set
* any rejection information as this test allows use to scroll through
* the objects we support (ELF, AOUT).
*/
return (0);
}
/*
* Check class and encoding.
*/
/* LINTED */
return (0);
}
return (0);
}
return (0);
}
/*
* Verify machine specific flags, and hardware capability requirements.
*/
return (0);
/*
* Verify ELF version. ??? is this too restrictive ???
*/
return (0);
}
return (1);
}
/*
* The runtime linker employs lazy loading to provide the libraries needed for
* debugging, preloading .o's and dldump(). As these are seldom used, the
* standard startup of ld.so.1 doesn't initialize all the information necessary
* to perform plt relocation on ld.so.1's link-map. The first time lazy loading
* is called we get here to perform these initializations:
*
* o elf_needed() is called to set up the DYNINFO() indexes for each lazy
* dependency. Typically, for all other objects, this is called during
* analyze_so(), but as ld.so.1 is set-contained we skip this processing.
*
* o For intel, ld.so.1's JMPSLOT relocations need relative updates. These
* are by default skipped thus delaying all relative relocation processing
* on every invocation of ld.so.1.
*/
int
{
return (1);
/*
* As we need to refer to the DYNINFO() information, insure that it has
* been initialized.
*/
return (0);
#if defined(i386)
/*
* This is a kludge to give ld.so.1 a performance benefit on i386.
* It's based around two factors.
*
* o JMPSLOT relocations (PLT's) actually need a relative relocation
* applied to the GOT entry so that they can find PLT0.
*
* o ld.so.1 does not exercise *any* PLT's before it has made a call
* to elf_lazy_load(). This is because all dynamic dependencies
* are recorded as lazy dependencies.
*/
#endif
return (1);
}
/*
* Lazy load an object.
*/
Rt_map *
{
const char *name;
/*
* If this dependency has already been processed, we're done.
*/
return (nlmp);
/*
* Determine the initial dependency name, and indicate that this
* dependencies processing has initiated.
*/
/*
* Expand the requested name if necessary.
*/
return (0);
/*
* Provided the object on the head of the link-map has completed its
* relocation, create a new link-map control list for this request.
*/
AL_CNT_LMLISTS)) == 0) {
return (0);
}
} else {
lmc = 0;
}
/*
* Load the associated object.
*/
/*
* Remove any expanded pathname infrastructure. Reduce the pending lazy
* dependency count of the caller, together with the link-map lists
* count of objects that still have lazy dependencies pending.
*/
/*
* Finish processing the objects associated with this request.
*/
/*
* If the dependency has been successfully processed, and it is part of
* a link-map control list that is equivalent, or less, that the callers
* control list, create an association between the caller and this
* dependency. If this dependency isn't yet apart of the callers
* link-map control list, then it is still apart of a list that is being
* relocated. As the relocation of an object on this list might still
* fail, we can't yet bind the caller to this object. To do so, would
* be locking the object so that it couldn't be deleted. Mark this
* object as free, and it will be reprocessed when this dependency is
* next referenced.
*/
if (nlmp) {
} else {
}
}
/*
* After a successful load, any objects collected on the new link-map
* control list will have been moved to the callers link-map control
* list. This control list can now be deleted.
*/
if (lmc) {
if (nlmp == 0)
}
return (nlmp);
}
/*
* Return the entry point of the ELF executable.
*/
static ulong_t
elf_entry_pt(void)
{
}
/*
* Unmap a given ELF shared object from the address space.
*/
static void
{
/*
* If this link map represents a relocatable object concatenation, then
* the image was simply generated in allocated memory. Free the memory.
*
* Note: the memory was originally allocated in the libelf:_elf_outmap
* routine and would normally have been free'd in elf_outsync(), but
* because we 'interpose' on that routine the memory wasn't free'd at
* that time.
*/
return;
}
/*
* If padding was enabled via rtld_db, then we have at least one page
* in front of the image - and possibly a trailing page.
* Unmap the front page first:
*/
}
/*
* Unmap any trailing padding.
*/
}
/*
* Unmmap all mapped segments.
*/
}
/*
* Determine if a dependency requires a particular version and if so verify
* that the version exists in the dependency.
*/
static int
{
/*
* Traverse the callers version needed information and determine if any
* specific versions are required from the dependency.
*/
/*
* Determine if a needed entry matches this dependency.
*/
continue;
/*
* Validate that each version required actually exists in the
* dependency.
*/
char *version, *define;
int found = 0;
continue;
continue;
found++;
break;
}
/*
* If we're being traced print out any matched version
* when the verbose (-v) option is in effect. Always
* print any unmatched versions.
*/
if (found) {
continue;
} else {
if (rtld_flags & RT_FL_SILENCERR)
continue;
}
continue;
}
/*
* If the version hasn't been found then this is a
* candidate for a fatal error condition. Weak
* version definition requirements are silently
* ignored. Also, if the image inspected for a version
* definition has no versioning recorded at all then
* silently ignore this (this provides better backward
* compatibility to old images created prior to
* versioning being available). Both of these skipped
* diagnostics are available under tracing (see above).
*/
return (0);
}
}
return (1);
}
return (1);
}
/*
* Search through the dynamic section for DT_NEEDED entries and perform one
* of two functions. If only the first argument is specified then load the
* defined shared object, otherwise add the link map representing the defined
* link map the the dlopen list.
*/
static int
{
/*
* Process each shared object on needed list.
*/
return (1);
char *name;
int silent = 0;
case DT_POSFLAG_1:
!(lmtflags & LML_TFLG_NOLAZYLD))
lazy = 1;
continue;
case DT_NEEDED:
case DT_USED:
if (flags)
/*
* NOTE, libc.so.1 can't be lazy loaded. Although a
* lazy position flag won't be produced when a RTLDINFO
* .dynamic entry is found (introduced with the UPM in
* Solaris 10), it was possible to mark libc for lazy
* loading on previous releases. To reduce the overhead
* of testing for this occurrence, only carry out this
* check for the first object on the link-map list
* (there aren't many applications built without libc).
*/
lazy = 0;
/*
* Don't bring in lazy loaded objects yet unless we've
* been asked to attempt to load all available objects
* (crle(1) sets LD_FLAGS=loadavail). Even under
* RTLD_NOW we don't process this - RTLD_NOW will cause
* relocation processing which in turn might trigger
* lazy loading, but its possible that the object has a
* lazy loaded file with no bindings (i.e., it should
* never have been a dependency in the first place).
*/
if (lazy) {
if ((lmflags & LML_FLG_LOADAVAIL) == 0) {
continue;
}
/*
* Silence any error messages - see description
* under elf_lookup_filtee().
*/
if ((rtld_flags & RT_FL_SILENCERR) == 0) {
silent = 1;
}
}
break;
case DT_AUXILIARY:
continue;
case DT_SUNW_AUXILIARY:
continue;
case DT_FILTER:
continue;
case DT_SUNW_FILTER:
continue;
default:
continue;
}
/*
* Establish the objects name, load it and establish a binding
* with the caller.
*/
nlmp = 0;
/*
* Clean up any infrastructure, including the removal of the
* error suppression state, if it had been previously set in
* this routine.
*/
if (pnp)
if (silent)
/*
* If the object could not be mapped, continue if error
* suppression is established or we're here with ldd(1).
*/
continue;
else
return (0);
}
}
return (1);
}
static int
{
int err;
/*
* If memory reservations have been established for alternative objects
* determine if this object falls within the reservation, if it does no
* further checking is required.
*/
if (rtld_flags & RT_FL_MEMRESV) {
return (0);
}
/*
* Determine the mappings presently in use by this process.
*/
return (1);
return (1);
}
return (1);
return (1);
}
/*
* Determine if the supplied address clashes with any of the present
* process mappings.
*/
const char *str;
continue;
/*
* We have a memory clash. See if one of the known dynamic
* dependency mappings represents this space so as to provide
* the user a more meaningful message.
*/
else
return (1);
}
return (0);
}
/*
* Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN
* are used to obtained an aligned reservation from anonymous memory. If
* MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard
* reservation using the file as backing.
*/
static Am_ret
{
#if defined(MAP_ALIGN)
if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) {
}
#endif
return (amret);
return (AM_OK);
/*
* If an anonymous memory request failed (which should only be the
* case if it is unsupported on the system we're running on), establish
* the initial mapping directly from the file.
*/
*maddr = 0;
fd, 0)) == MAP_FAILED) {
return (AM_ERROR);
}
return (AM_NOSUP);
}
static void *
{
#if defined(MAP_TEXT) && defined(MAP_INITDATA)
static int notd = 0;
/*
* If MAP_TEXT and MAP_INITDATA are available, select the appropriate
* flag.
*/
if (notd == 0) {
else
mflag |= MAP_INITDATA;
}
#endif
return (0);
#if defined(MAP_TEXT) && defined(MAP_INITDATA)
/*
* MAP_TEXT and MAP_INITDATA may not be supported on this
* platform, try again without.
*/
notd = 1;
}
#endif
return (MAP_FAILED);
}
/*
* Map in a file.
*/
static caddr_t
const char *name, /* actual name stored for pathname */
int fixed, /* image is resolved to a fixed addr */
int fd, /* images file descriptor */
{
int skipfseg; /* skip mapping first segment */
int mperm; /* segment permissions */
/*
* If padding is required extend both the front and rear of the image.
* To insure the image itself is mapped at the correct alignment the
* initial padding is rounded up to the nearest page. Once the image is
* mapped the excess can be pruned to the nearest page required for the
* actual padding itself.
*/
if (fixed)
else
}
/*
* Determine the initial permissions used to map in the first segment.
* If this segments memsz is greater that its filesz then the difference
* must be zeroed. Make sure this segment is writable.
*/
mperm = 0;
mperm |= PROT_WRITE;
/*
* Determine whether or not to let system reserve address space based on
* whether this is a dynamic executable (addresses in object are fixed)
* or a shared object (addresses in object are relative to the objects'
* base).
*/
if (fixed) {
/*
* Determine the reservation address and size, and insure that
* this reservation isn't already in use.
*/
return (0);
}
/*
* As this is a fixed image, all segments must be individually
* mapped.
*/
skipfseg = 0;
} else {
/*
* If this isn't a fixed image, reserve enough address space for
* the entire image to be mapped. The amount of reservation is
* the range between the beginning of the first, and end of the
* last loadable segment, together with any padding, plus the
* alignment of the first segment.
*
* The optimal reservation is made as a no-reserve mapping from
* anonymous memory. Each segment is then mapped into this
* reservation. If the anonymous mapping capability isn't
* available, the reservation is obtained from the file itself.
* In this case the first segment of the image is mapped as part
* of the reservation, thus only the following segments need to
* be remapped.
*/
return (0);
/*
* If this reservation has been obtained from anonymous memory,
* then all segments must be individually mapped. Otherwise,
* the first segment heads the reservation.
*/
skipfseg = 0;
else
skipfseg = 1;
/*
* For backward compatibility (where MAP_ALIGN isn't available),
* insure the alignment of the reservation is adequate for this
* object, and if not remap the object to obtain the correct
* alignment.
*/
return (0);
/*
* As ths image has been realigned, the first segment
* of the file needs to be remapped to its correct
* location.
*/
skipfseg = 0;
} else
/*
* If this reservation included padding, remove any excess for
* the start of the image (the padding was adjusted to insure
* the image was aligned appropriately).
*/
if (esize) {
}
}
/*
* At this point we know the initial location of the image, and its
* size. Pass these back to the caller for inclusion in the link-map
* that will eventually be created.
*/
/*
* The first loadable segment is now pointed to by maddr. This segment
* will eventually contain the elf header and program headers, so reset
* the program header. Pass this back to the caller for inclusion in
* the link-map so it can be used for later unmapping operations.
*/
/* LINTED */
/*
* If padding is required at the front of the image, obtain that now.
* Note, if we've already obtained a reservation from anonymous memory
* then this reservation will already include suitable padding.
* Otherwise this reservation is backed by the file, or in the case of
* a fixed image, doesn't yet exist. Map the padding so that it is
* suitably protected (PROT_NONE), and insure the first segment of the
* file is mapped to its correct location.
*/
if (padsize) {
return (0);
skipfseg = 0;
}
}
/*
* Map individual segments. For a fixed image, these will each be
* unique mappings. For a reservation these will fill in the
* reservation.
*/
/*
* Skip non-loadable segments or segments that don't occupy
* any memory.
*/
continue;
/*
* Establish this segments address relative to our base.
*/
/*
* Determine the mapping protection from the segment attributes.
* Also determine the etext address from the last loadable
* segment which has permissions but no write access.
*/
mperm = 0;
mperm |= PROT_WRITE;
else
}
/*
* Determine the type of mapping required.
*/
/*
* Potentially, we can defer the loading of any SUNWBSS
* segment, depending on whether the symbols it provides
* have been bound to. In this manner, large segments
* that are interposed upon between shared libraries
* may not require mapping. Note, that the mapping
* information is recorded in our mapping descriptor at
* this time.
*/
flen = 0;
/*
* If this segment has no backing file and no flags
* specified, then it defines a reservation. At this
* point all standard loadable segments will have been
* processed. The segment reservation is mapped
*/
return (0);
flen = 0;
/*
* If this segment has no backing file then it defines a
*/
return (0);
flen = 0;
} else {
/*
* This mapping originates from the file. Determine the
* file offset to which the mapping will be directed
* (must be aligned) and how much to map (might be more
* than the file in the case of .bss).
*/
/*
* If this is a non-fixed, non-anonymous mapping, and no
* padding is involved, then the first loadable segment
* is already part of the initial reservation. In this
* case there is no need to remap this segment.
*/
int phdr_mperm = mperm;
/*
* If this segments memsz is greater that its
* filesz then the difference must be zeroed.
* Make sure this segment is writable.
*/
mperm |= PROT_WRITE;
MAP_FAILED) {
return (0);
}
}
/*
* If the memory occupancy of the segment overflows the
* definition in the file, we need to "zero out" the end
* of the mapping we've established, and if necessary,
* memory must end on a double word boundary to satisfy
* zero().
*/
/*
* Determine whether the number of bytes that
* must be zero'ed overflow to the next page.
* If not, simply clear the exact bytes
* (filesz to memsz) from this page. Otherwise,
* clear the remaining bytes of this page, and
*/
else {
MAP_FIXED | MAP_PRIVATE) ==
return (0);
}
}
}
}
/*
* Unmap anything from the last mapping address to this one and
* update the mapping claim pointer.
*/
}
/*
* Retain this segments mapping information.
*/
(*mmapcnt)++;
}
/*
* If padding is required at the end of the image, obtain that now.
* Note, if we've already obtained a reservation from anonymous memory
* then this reservation will already include suitable padding.
*/
if (padsize) {
/*
* maddr is currently page aligned from the last segment
* mapping.
*/
return (0);
}
}
/*
* Unmap any final reservation.
*/
return (faddr);
}
/*
* A null symbol interpretor. Used if a filter has no associated filtees.
*/
/* ARGSUSED0 */
static Sym *
{
return ((Sym *)0);
}
/*
* Disable filtee use.
*/
static void
{
/*
* If this is an object filter, free the filtee's duplication.
*/
/*
* Indicate that this filtee is no longer available.
*/
}
/*
* Indicate that this standard filtee is no longer available.
*/
if (SYMSFLTRCNT(lmp))
SYMSFLTRCNT(lmp)--;
} else {
/*
* Indicate that this auxiliary filtee is no longer available.
*/
if (SYMAFLTRCNT(lmp))
SYMAFLTRCNT(lmp)--;
}
}
/*
* Find symbol interpreter - filters.
* This function is called when the symbols from a shared object should
* be resolved from the shared objects filtees instead of from within itself.
*
* A symbol name of 0 is used to trigger filtee loading.
*/
static Sym *
{
int any;
/*
* Indicate that the filter has been used. If a binding already exists
* to the caller, indicate that this object is referenced. This insures
* we don't generate false unreferenced diagnostics from ldd -u/U or
* debugging. Don't create a binding regardless, as this filter may
* have been dlopen()'ed.
*/
break;
}
}
}
}
}
/*
* If this is the first call to process this filter, establish the
* filtee list. If a configuration file exists, determine if any
* filtee associations for this filter, and its filtee reference, are
* defined. Otherwise, process the filtee reference. Any token
* expansion is also completed at this point (i.e., $PLATFORM).
*/
if (rtld_flags2 & RT_FL2_FLTCFG)
filtees, PN_SER_FILTEE, 0)) == 0) {
return ((Sym *)0);
}
}
}
/*
* Traverse the filtee list, dlopen()'ing any objects specified and
* using their group handle to lookup the symbol.
*/
int mode;
continue;
/*
* Establish the mode of the filtee from the filter. As filtees
* are loaded via a dlopen(), make sure that RTLD_GROUP is set
* and the filtees aren't global. It would be nice to have
* RTLD_FIRST used here also, but as filters got out long before
* RTLD_FIRST was introduced it's a little too late now.
*/
mode &= ~RTLD_GLOBAL;
/*
* Insure that any auxiliary filter can locate symbols from its
* caller.
*/
mode |= RTLD_PARENT;
/*
* Process any hardware capability directory. Establish a new
* link-map control list from which to analyze any newly added
* objects. Note that an lmc may already be allocated from a
* previous filtee dlopen() that failed.
*/
if ((lmc == 0) &&
sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0))
return ((Sym *)0);
if (lmc)
else
}
continue;
/*
* Process an individual filtee.
*/
int audit = 0;
ghp = 0;
/*
* Determine if the reference link map is already
* loaded. As an optimization compare the filtee with
* our interpretor. The most common filter is
*/
#if defined(_ELF64)
#else
#endif
/*
* Create an association between ld.so.1 and
* the filter.
*/
nlmp = 0;
/*
* Establish the filter handle to prevent any
* recursion.
*/
/*
* any return from the auditor, as we can't
* allow ignore filtering to ld.so.1, otherwise
* nothing is going to work.
*/
nlmp, 0);
} else {
/*
* Establish a new link-map control list from
* which to analyze any newly added objects.
* Note that an lmc may already be allocated
* from a previous filtee dlopen() that failed.
*/
if ((lmc == 0) &&
0, sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0))
return ((Sym *)0);
if (lmc)
else
/*
* Load the filtee.
*/
FLG_RT_HANDLE, &rej);
remove_rej(&rej);
}
/*
* Establish the filter handle to prevent any
* recursion.
*/
}
/*
* return of 0 indicates the auditor wishes to
* ignore this filtee.
*/
nlmp, 0) == 0) {
audit = 1;
nlmp = 0;
}
}
/*
* Finish processing the objects associated with
* this request. Create an association between
* this object and the originating filter to
* provide sufficient information to tear down
* this filtee if necessary.
*/
nlmp = 0;
/*
* If the filtee has been successfully
* processed, and it is part of a link-map
* control list that is equivalent, or less,
* than the filter control list, create an
* association between the filter and filtee.
* This association provides sufficient
* information to tear down the filter and
* filtee if necessary.
*/
nlmp = 0;
}
/*
* Generate a diagnostic if the filtee couldn't be
* loaded, null out the pnode entry, and continue
* the search. Otherwise, retain this group handle
* for future symbol searches.
*/
if (nlmp == 0) {
if (ghp)
if (lmc) {
}
continue;
}
}
/*
* If we're just here to trigger filtee loading skip the symbol
* lookup so we'll continue looking for additional filtees.
*/
if (name) {
any++;
/*
* Look for the symbol in the handles dependencies.
*/
continue;
/*
* If our parent is a dependency don't look at
* it (otherwise we are in a recursive loop).
* This situation can occur with auxiliary
* filters if the filtee has a dependency on the
* filter. This dependency isn't necessary as
* auxiliary filters are opened RTLD_PARENT, but
* users may still unknowingly add an explicit
* dependency to the parent.
*/
continue;
binfo)) != 0) ||
break;
}
/*
* If this filtee has just been loaded (nlmp != 0),
* determine whether the filtee was triggered by a
* relocation from an object that is still being
* relocated on a leaf link-map control list. As the
* relocation of an object on this list might still
* fail, we can't yet bind the filter to the filtee.
* To do so, would be locking the filtee so that it
* couldn't be deleted, and the filtee itself could have
* bound to an object that must be torn down. Insure
* the caller isn't bound to the handle at this time.
* Any association will be reestablished when the filter
* is later referenced and the filtee has propagated to
* the same link-map control list.
*/
}
if (sym) {
if (lmc)
*binfo |= DBG_BINFO_FILTEE;
return (sym);
}
}
/*
* If this object is tagged to terminate filtee processing we're
* done.
*/
break;
}
if (lmc)
/*
* If we're just here to trigger filtee loading then we're done.
*/
if (name == 0)
return ((Sym *)0);
/*
* If no filtees have been found for a filter, clean up any Pnode
* structures and disable their search completely. For auxiliary
* filters we can reselect the symbol search function so that we never
* enter this routine again for this object. For standard filters we
* use the null symbol routine.
*/
if (any == 0) {
return ((Sym *)0);
}
return ((Sym *)0);
}
/*
* Focal point for disabling error messages for auxiliary filters. As an
* auxiliary filter allows for filtee use, but provides a fallback should a
* filtee not exist (or fail to load), any errors generated as a consequence of
* trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR
* suppresses errors generated by eprint(), but insures a debug diagnostic is
* produced. ldd(1) employs printf(), and here, the selection of whether to
* print a diagnostic in regards to auxiliary filters is a little more complex.
*
* . The determination of whether to produce an ldd message, or a fatal
* error message is driven by LML_FLG_TRC_ENABLE.
* . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN,
* (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s),
* and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u).
*
* . If the calling object is lddstub, then several classes of message are
* suppressed. The user isn't trying to diagnose lddstub, this is simply
* a stub executable employed to preload a user specified library against.
*
* . If RT_FL_SILENCERR is in effect then any generic ldd() messages should
* be suppressed. All detailed ldd messages should still be produced.
*/
Sym *
{
int silent = 0;
/*
* Make sure this entry is still acting as a filter. We may have tried
* to process this previously, and disabled it if the filtee couldn't
* be processed. However, other entries may provide different filtees
* that are yet to be completed.
*/
return ((Sym *)0);
/*
* Indicate whether an error message is required should this filtee not
* be found, based on the type of filter.
*/
silent = 1;
}
if (silent)
return (sym);
}
/*
* Compute the elf hash value (as defined in the ELF access library).
* The form of the hash table is:
*
* |--------------|
* | # of buckets |
* |--------------|
* | # of chains |
* |--------------|
* | bucket[] |
* |--------------|
* | chain[] |
* |--------------|
*/
{
while (*name) {
uint_t g;
if ((g = (hval & 0xf0000000)) != 0)
hval ^= g >> 24;
hval &= ~g;
}
}
/*
* If flag argument has LKUP_SPEC set, we treat undefined symbols of type
* function specially in the executable - if they have a value, even though
* undefined, we use that value. This allows us to associate all references
* to a function's address to a single place in the process: the plt entry
* for that function in the executable. Calls to lookup from plt binding
* routines do NOT set LKUP_SPEC in the flag.
*/
Sym *
{
char *strtabptr, *strtabname;
return ((Sym *)0);
/* LINTED */
/*
* Get the first symbol on hash chain and initialize the string
* and symbol table pointers.
*/
return ((Sym *)0);
while (ndx) {
/*
* Compare the symbol found with the name required. If the
* names don't match continue with the next hash entry.
*/
continue;
return ((Sym *)0);
}
/*
* If we find a match and the symbol is defined, return the
* symbol pointer and the link map in which it was found.
*/
*binfo |= DBG_BINFO_FOUND;
*binfo |= DBG_BINFO_INTERPOSE;
return (sym);
break;
/*
* If we find a match and the symbol is undefined, the
* symbol type is a function, and the value of the symbol
* is non zero, then this is a special case. This allows
* the resolution of a function address to the plt[] entry.
* See SPARC ABI, Dynamic Linking, Function Addresses for
* more details.
*/
*binfo |= DBG_BINFO_INTERPOSE;
return (sym);
}
/*
* Undefined symbol.
*/
return (sym);
return ((Sym *)0);
}
/*
* We've found a match. Determine if the defining object contains
* symbol binding information.
*/
/* LINTED */
/*
* If this is a direct binding request, but the symbol definition has
* disabled directly binding to it (presumably because the symbol
* definition has been changed since the referring object was built),
* indicate this failure so that the caller can fall back to a standard
* symbol search. Clear any debug binding information for cleanliness.
*/
*binfo |= BINFO_DIRECTDIS;
*binfo &= ~DBG_BINFO_MSK;
return ((Sym *)0);
}
/*
* Determine whether this object is acting as a filter.
*/
return (sym);
/*
* Determine if this object offers per-symbol filtering, and if so,
* whether this symbol references a filtee.
*/
/*
* If this is a standard filter reference, and no standard
* filtees remain to be inspected, we're done. If this is an
* auxiliary filter reference, and no auxiliary filtees remain,
* we'll fall through in case any object filtering is available.
*/
(SYMSFLTRCNT(ilmp) == 0))
return ((Sym *)0);
SYMAFLTRCNT(ilmp))) {
/*
* This symbol has an associated filtee. Lookup the
* symbol in the filtee, and if it is found return it.
* If the symbol doesn't exist, and this is a standard
* filter, return an error, otherwise fall through to
* catch any object filtering that may be available.
*/
sip->si_boundto)) != 0)
return (fsym);
return ((Sym *)0);
}
}
/*
* Determine if this object provides global filtering.
*/
/*
* This object has an associated filtee. Lookup the
* symbol in the filtee, and if it is found return it.
* If the symbol doesn't exist, and this is a standard
* filter, return and error, otherwise return the symbol
* within the filter itself.
*/
OBJFLTRNDX(ilmp))) != 0)
return (fsym);
}
if (flags1 & FL1_RT_OBJSFLTR)
return ((Sym *)0);
}
return (sym);
}
/*
* Create a new Rt_map structure for an ELF object and initialize
* all values.
*/
Rt_map *
{
/*
* Allocate space for the link-map and private elf information. Once
* these are allocated and initialized, we can use remove_so(0, lmp) to
* tear down the link-map should any failures occur.
*/
return (0);
return (0);
}
/*
* All fields not filled in were set to 0 by calloc.
*/
/*
* If this is a shared object, add the base address to each address.
* if this is an executable, use address as is.
*/
base = 0;
} else
/*
* Fill in rest of the link map entries with information from the file's
* dynamic structure.
*/
if (ld) {
void *rtldinfo;
/* CSTYLED */
case DT_SYMTAB:
break;
case DT_STRTAB:
break;
case DT_SYMENT:
break;
case DT_FEATURE_1:
crle = 1;
break;
case DT_MOVESZ:
break;
case DT_MOVEENT:
break;
case DT_MOVETAB:
break;
case DT_REL:
case DT_RELA:
/*
* At this time we can only handle 1 type of
* relocation per object.
*/
break;
case DT_RELSZ:
case DT_RELASZ:
break;
case DT_RELENT:
case DT_RELAENT:
break;
case DT_RELCOUNT:
case DT_RELACOUNT:
break;
case DT_TEXTREL:
break;
case DT_HASH:
break;
case DT_PLTGOT:
break;
case DT_PLTRELSZ:
break;
case DT_JMPREL:
break;
case DT_INIT:
break;
case DT_FINI:
break;
case DT_INIT_ARRAY:
base);
break;
case DT_INIT_ARRAYSZ:
break;
case DT_FINI_ARRAY:
base);
break;
case DT_FINI_ARRAYSZ:
break;
case DT_PREINIT_ARRAY:
base);
break;
case DT_PREINIT_ARRAYSZ:
break;
case DT_RPATH:
case DT_RUNPATH:
break;
case DT_FILTER:
break;
case DT_AUXILIARY:
if (!(rtld_flags & RT_FL_NOAUXFLTR)) {
}
break;
case DT_SUNW_FILTER:
SYMSFLTRCNT(lmp)++;
break;
case DT_SUNW_AUXILIARY:
if (!(rtld_flags & RT_FL_NOAUXFLTR)) {
SYMAFLTRCNT(lmp)++;
}
break;
case DT_DEPAUDIT:
if (!(rtld_flags & RT_FL_NOAUDIT))
break;
case DT_CONFIG:
break;
case DT_DEBUG:
/*
* DT_DEBUG entries are only created in
* dynamic objects that require an interpretor
* (ie. all dynamic executables and some shared
* objects), and provide for a hand-shake with
* debuggers. This entry is initialized to
* zero by the link-editor. If a debugger has
* us and updated this entry set the debugger
* flag, and finish initializing the debugging
* structure (see setup() also). Switch off any
* configuration object use as most debuggers
* can't handle fixed dynamic executables as
* dependencies, and we can't handle requests
* like object padding for alternative objects.
*/
rtld_flags |=
break;
case DT_VERNEED:
base);
break;
case DT_VERNEEDNUM:
/* LINTED */
break;
case DT_VERDEF:
break;
case DT_VERDEFNUM:
/* LINTED */
break;
case DT_BIND_NOW:
}
break;
case DT_FLAGS:
}
break;
case DT_FLAGS_1:
}
crle = 1;
#ifndef EXPAND_RELATIVE
#endif
/*
* If this object identifies itself as an
* interposer, but relocation processing has
* already started, then demote it. It's too
* late to guarantee complete interposition.
*/
else {
(void) printf(
}
}
break;
case DT_SYMINFO:
base);
break;
case DT_SYMINENT:
break;
case DT_PLTPAD:
break;
case DT_PLTPADSZ:
break;
case DT_SUNW_RTLDINF:
if ((lml->lm_info_lmp != 0) &&
break;
}
/*
* We maintain a list of DT_SUNW_RTLDINFO
* structures for a given object. This permits
* the RTLDINFO structures to be grouped
* functionly inside of a shared object.
*
* For example, we could have one for
* thread_init, and another for atexit
* reservations.
*/
sizeof (void *), AL_CNT_RTLDINFO) == 0) {
return (0);
}
break;
case M_DT_REGISTER:
break;
case M_DT_PLTRESERVE:
base);
break;
}
}
else
pltpadsz);
}
/*
* Allocate Dynamic Info structure
*/
sizeof (Dyninfo))) == 0) {
return (0);
}
}
/*
* If configuration file use hasn't been disabled, and a configuration
* file hasn't already been set via an environment variable, see if any
* application specific configuration file is specified. An LD_CONFIG
* setting is used first, but if this image was generated via crle(1)
* then a default configuration file is a fall-back.
*/
if (cfile)
else if (crle) {
#ifndef EXPAND_RELATIVE
#endif
}
}
if (rpath)
if (fltr) {
/*
* If this object is a global filter, duplicate the filtee
* string name(s) so that REFNAME() is available in core files.
* This cludge was useful for debuggers at one point, but only
* when the filtee name was an individual full path.
*/
return (0);
}
}
if (rtld_flags & RT_FL_RELATIVE)
/*
* For Intel ABI compatibility. It's possible that a JMPREL can be
* specified without any other relocations (e.g. a dynamic executable
* normally only contains .plt relocations). If this is the case then
* no REL, RELSZ or RELENT will have been created. For us to be able
* to traverse the .plt relocations under LD_BIND_NOW we need to know
* the RELENT for these relocations. Refer to elf_reloc() for more
* details.
*/
/*
* Establish any per-object auditing. If we're establishing `main's
* link-map its too early to go searching for audit objects so just
* hold the object name for later (see setup()).
*/
if (audit) {
if (*cp) {
return (0);
}
return (0);
}
}
}
}
return (0);
}
return (0);
}
/*
* Add the mapped object to the end of the link map list.
*/
return (lmp);
}
/*
*/
void
{
case CA_SUNW_HW_1:
break;
case CA_SUNW_SF_1:
}
cap++;
}
}
/*
* Map in an ELF object.
* Takes an open file descriptor for the object to map and its pathname; returns
* a pointer to a Rt_map structure for this object, or 0 on error.
*/
static Rt_map *
int fd)
{
int i; /* general temporary */
int fixed;
/* LINTED */
/*
* If this a relocatable object then special processing is required.
*/
/*
* If this isn't a dynamic executable or shared object we can't process
* it. If this is a dynamic executable then all addresses are fixed.
*/
fixed = 1;
fixed = 0;
else {
return (0);
}
/*
* If our original mapped page was not large enough to hold all the
* program headers remap them.
*/
return (0);
}
fmap_setup();
return (0);
}
/* LINTED */
}
/* LINTED */
/*
* Get entry point.
*/
/*
* Point at program headers and perform some basic validation.
*/
if (fph == 0) {
/* LINTED argument lph is initialized in first pass */
pname);
return (0);
}
}
#if defined(MAP_ALIGN)
/*
* Make sure the maximum page alignment is a power of 2 >= the system
* page size, for use with MAP_ALIGN.
*/
#endif
/*
* We'd better have at least one loadable segment, together with some
* specified file and memory size.
*/
return (0);
}
/*
* Check that the files size accounts for the loadable sections
* we're going to map in (failure to do this may cause spurious
* bus errors if we're given a truncated file).
*/
return (0);
}
/*
* Memsize must be page rounded so that if we add object padding
* at the end it will start at the beginning of a page.
*/
/*
* Determine if an existing mapping is acceptable.
*/
/*
* If this is the interpreter then it has already been mapped
* and we have the address so don't map it again. Note that
* the common occurrence of a reference to the interpretor
* (libdl -> ld.so.1) will have been caught during filter
* initialization (see elf_lookup_filtee()). However, some
* ELF implementations are known to record libc.so.1 as the
* interpretor, and thus this test catches this behavior.
*/
/*
* If the mapping required has already been established from
* the initial page we don't need to do anything more. Reset
* the fmap address so then any later files start a new fmap.
* This is really an optimization for filters, such as libdl.so,
* which should only require one page.
*/
fmap_setup();
}
/*
* Allocate a mapping array to retain mapped segment information.
*/
return (0);
/*
* If we're reusing an existing mapping determine the objects etext
* address. Otherwise map the file (which will calculate the etext
* address as part of the mapping process).
*/
if (faddr) {
if (fixed)
base = 0;
else
/* LINTED */
continue;
mmapcnt++;
}
}
} else {
/*
* Map the file.
*/
return (0);
}
/*
* Calculate absolute base addresses and entry points.
*/
if (!fixed) {
if (mld)
/* LINTED */
if (cap)
/* LINTED */
}
/*
* Create new link map structure for newly mapped shared object.
*/
mmapcnt))) {
return (0);
}
/*
* Start the system loading in the ELF information we'll be processing.
*/
}
/*
* If this shared object contains a any special segments, record them.
*/
if (swph) {
}
if (tlph) {
}
if (unwindph)
if (cap)
return (lmp);
}
/*
* Function to correct protection settings. Segments are all mapped initially
* with permissions as given in the segment header. We need to turn on write
* permissions on a text segment if there are any relocations against that
* segment, and them turn write permission back off again before returning
* control to the user. This function turns the permission on or off depending
* on the value of the argument.
*/
int
{
/*
* If this is an allocated image (ie. a relocatable object) we can't
* mprotect() anything.
*/
return (1);
continue;
return (0);
}
}
return (1);
}
/*
* Build full pathname of shared object from given directory name and filename.
*/
static char *
{
return (pname);
}
/*
* The copy relocation is recorded in a copy structure which will be applied
* after all other relocations are carried out. This provides for copying data
* that must be relocated itself (ie. pointers in shared objects). This
* structure also provides a means of binding RTLD_GROUP dependencies to any
* copy relocations that have been taken from any group members.
*
* If the size of the .bss area available for the copy information is not the
* same as the source of the data inform the user if we're under ldd(1) control
* (this checking was only established in 5.3, so by only issuing an error via
* ldd(1) we maintain the standard set by previous releases).
*/
int
{
else
AL_CNT_COPYREL) == 0) {
return (0);
else
return (1);
}
sizeof (Rt_map *), AL_CNT_COPYREL) == 0) {
return (0);
else
return (1);
}
}
/*
* If we are tracing (ldd), warn the user if
* 1) the size from the reference symbol differs from the
* copy definition. We can only copy as much data as the
* reference (dynamic executables) entry allows.
* 2) the copy definition has STV_PROTECTED visibility.
*/
else
}
}
}
return (1);
}
/*
* Determine the symbol location of an address within a link-map. Look for
* the nearest symbol (whose value is less than or equal to the required
* address). This is the object specific part of dladdr().
*/
static void
{
const char *str;
/*
* If we don't have a .hash table there are no symbols to look at.
*/
return;
base = 0;
else
continue;
continue;
continue;
/*
* Note, because we accept local and global symbols we could
* find a section symbol that matches the associated address,
* which means that the symbol name will be null. In this
* case continue the search in case we can find a global
* symbol of the same value.
*/
break;
}
if (_sym) {
if (_flags == RTLD_DL_SYMENT)
else if (_flags == RTLD_DL_LINKMAP)
}
}
static void
{
/*
* Cleanup any link-maps added to this dynamic list and free it.
*/
}
/*
* This routine is called upon to search for a symbol from the dependencies of
* the initial link-map. To maintain lazy loadings goal of reducing the number
* of objects mapped, any symbol search is first carried out using the objects
* that already exist in the process (either on a link-map list or handle).
* If a symbol can't be found, and lazy dependencies are still pending, this
* routine loads the dependencies in an attempt to locate the symbol.
*
* Only new objects are inspected as we will have already inspected presently
* loaded objects before calling this routine. However, a new object may not
* be new - although the di_lmp might be zero, the object may have been mapped
* as someone elses dependency. Thus there's a possibility of some symbol
* search duplication.
*/
Sym *
{
return (0);
/*
* Loop through the DT_NEEDED entries examining each object for
* the symbol. If the symbol is not found the object is in turn
* added to the alist, so that its DT_NEEDED entires may be
* examined.
*/
continue;
/*
* If this entry defines a lazy dependency try loading
* it. If the file can't be loaded, consider this
* non-fatal and continue the search (lazy loaded
* dependencies need not exist and their loading should
* only be fatal if called from a relocation).
*
* If the file is already loaded and relocated we must
* still inspect it for symbols, even though it might
* have already been searched. This lazy load operation
* might have promoted the permissions of the object,
* and thus made the object applicable for this symbol
* search, whereas before the object might have been
* skipped.
*/
continue;
/*
* If this object isn't yet a part of the dynamic list
* then inspect it for the symbol. If the symbol isn't
* found add the object to the dynamic list so that we
* can inspect its dependencies.
*/
continue;
break;
/*
* Some dlsym() operations are already traversing a
* link-map (dlopen(0)), and thus there's no need to
* build our own dynamic dependency list.
*/
sizeof (Rt_map *), AL_CNT_LAZYFIND) == 0) {
return (0);
}
}
}
if (sym)
break;
}
return (sym);
}
/*
* Warning message for bad r_offset.
*/
void
{
const char *name = (char *)0;
int trace;
(((rtld_flags & RT_FL_SILENCERR) == 0) ||
trace = 1;
else
trace = 0;
return;
if (rsymndx) {
}
if (name == 0)
if (trace) {
const char *rstr;
return;
}
}