elf.c revision 75e7992ad4e186443b61dab39f79c9d79802f295
2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A
2N/A/*
2N/A * Copyright (c) 1988 AT&T
2N/A * All Rights Reserved
2N/A *
2N/A * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
2N/A * Use is subject to license terms.
2N/A */
2N/A#pragma ident "%Z%%M% %I% %E% SMI"
2N/A
2N/A/*
2N/A * Object file dependent support for ELF objects.
2N/A */
2N/A#include "_synonyms.h"
2N/A
2N/A#include <stdio.h>
2N/A#include <sys/procfs.h>
2N/A#include <sys/mman.h>
2N/A#include <sys/debug.h>
2N/A#include <string.h>
2N/A#include <limits.h>
2N/A#include <dlfcn.h>
2N/A#include <debug.h>
2N/A#include <conv.h>
2N/A#include "_rtld.h"
2N/A#include "_audit.h"
2N/A#include "_elf.h"
2N/A#include "msg.h"
2N/A
2N/A/*
2N/A * Default and secure dependency search paths.
2N/A */
2N/Astatic Pnode elf_dflt_dirs[] = {
2N/A#if defined(_ELF64)
2N/A#ifndef SGS_PRE_UNIFIED_PROCESS
2N/A { MSG_ORIG(MSG_PTH_LIB_64), 0, MSG_PTH_LIB_64_SIZE,
2N/A LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] },
2N/A#endif
2N/A { MSG_ORIG(MSG_PTH_USRLIB_64), 0, MSG_PTH_USRLIB_64_SIZE,
2N/A LA_SER_DEFAULT, 0, 0 }
2N/A#else
2N/A#ifndef SGS_PRE_UNIFIED_PROCESS
2N/A { MSG_ORIG(MSG_PTH_LIB), 0, MSG_PTH_LIB_SIZE,
2N/A LA_SER_DEFAULT, 0, &elf_dflt_dirs[1] },
2N/A#endif
2N/A { MSG_ORIG(MSG_PTH_USRLIB), 0, MSG_PTH_USRLIB_SIZE,
2N/A LA_SER_DEFAULT, 0, 0 }
2N/A#endif
2N/A};
2N/A
2N/Astatic Pnode elf_secure_dirs[] = {
2N/A#if defined(_ELF64)
2N/A#ifndef SGS_PRE_UNIFIED_PROCESS
2N/A { MSG_ORIG(MSG_PTH_LIBSE_64), 0, MSG_PTH_LIBSE_64_SIZE,
2N/A LA_SER_SECURE, 0, &elf_secure_dirs[1] },
2N/A#endif
2N/A { MSG_ORIG(MSG_PTH_USRLIBSE_64), 0,
2N/A MSG_PTH_USRLIBSE_64_SIZE,
2N/A LA_SER_SECURE, 0, 0 }
2N/A#else
2N/A#ifndef SGS_PRE_UNIFIED_PROCESS
2N/A { MSG_ORIG(MSG_PTH_LIBSE), 0, MSG_PTH_LIBSE_SIZE,
2N/A LA_SER_SECURE, 0, &elf_secure_dirs[1] },
2N/A#endif
2N/A { MSG_ORIG(MSG_PTH_USRLIBSE), 0, MSG_PTH_USRLIBSE_SIZE,
2N/A LA_SER_SECURE, 0, 0 }
2N/A#endif
2N/A};
2N/A
2N/A/*
2N/A * Defines for local functions.
2N/A */
2N/Astatic Pnode *elf_fix_name(const char *, Rt_map *, uint_t);
2N/Astatic int elf_are_u(Rej_desc *);
2N/Astatic void elf_dladdr(ulong_t, Rt_map *, Dl_info *, void **, int);
2N/Astatic ulong_t elf_entry_pt(void);
2N/Astatic char *elf_get_so(const char *, const char *);
2N/Astatic Rt_map *elf_map_so(Lm_list *, Aliste, const char *, const char *, int);
2N/Astatic int elf_needed(Lm_list *, Aliste, Rt_map *);
2N/Astatic void elf_unmap_so(Rt_map *);
2N/Astatic int elf_verify_vers(const char *, Rt_map *, Rt_map *);
2N/A
2N/A/*
2N/A * Functions and data accessed through indirect pointers.
2N/A */
2N/AFct elf_fct = {
2N/A elf_are_u,
2N/A elf_entry_pt,
2N/A elf_map_so,
2N/A elf_unmap_so,
2N/A elf_needed,
2N/A lookup_sym,
2N/A elf_reloc,
2N/A elf_dflt_dirs,
2N/A elf_secure_dirs,
2N/A elf_fix_name,
2N/A elf_get_so,
2N/A elf_dladdr,
2N/A dlsym_handle,
2N/A elf_verify_vers,
2N/A elf_set_prot
2N/A};
2N/A
2N/A
2N/A/*
2N/A * Redefine NEEDED name if necessary.
2N/A */
2N/Astatic Pnode *
2N/Aelf_fix_name(const char *name, Rt_map *clmp, uint_t orig)
2N/A{
2N/A /*
2N/A * For ABI compliance, if we are asked for ld.so.1, then really give
2N/A * them libsys.so.1 (the SONAME of libsys.so.1 is ld.so.1).
2N/A */
2N/A if (((*name == '/') &&
2N/A /* BEGIN CSTYLED */
2N/A#if defined(_ELF64)
2N/A (strcmp(name, MSG_ORIG(MSG_PTH_RTLD_64)) == 0)) ||
2N/A#else
2N/A (strcmp(name, MSG_ORIG(MSG_PTH_RTLD)) == 0)) ||
2N/A#endif
2N/A (strcmp(name, MSG_ORIG(MSG_FIL_RTLD)) == 0)) {
2N/A /* END CSTYLED */
2N/A Pnode *pnp;
2N/A
2N/A DBG_CALL(Dbg_file_fixname(LIST(clmp), name,
2N/A MSG_ORIG(MSG_PTH_LIBSYS)));
2N/A if (((pnp = calloc(sizeof (Pnode), 1)) == 0) ||
2N/A ((pnp->p_name = strdup(MSG_ORIG(MSG_PTH_LIBSYS))) == 0)) {
2N/A if (pnp)
2N/A free(pnp);
2N/A return (0);
2N/A }
2N/A pnp->p_len = MSG_PTH_LIBSYS_SIZE;
2N/A pnp->p_orig = (orig & PN_SER_MASK);
2N/A return (pnp);
2N/A }
2N/A
2N/A return (expand_paths(clmp, name, orig, 0));
2N/A}
2N/A
2N/A/*
2N/A * Determine if we have been given an ELF file and if so determine if the file
2N/A * is compatible. Returns 1 if true, else 0 and sets the reject descriptor
2N/A * with associated error information.
2N/A */
2N/Astatic int
2N/Aelf_are_u(Rej_desc *rej)
2N/A{
2N/A Ehdr *ehdr;
2N/A
2N/A /*
2N/A * Determine if we're an elf file. If not simply return, we don't set
2N/A * any rejection information as this test allows use to scroll through
2N/A * the objects we support (ELF, AOUT).
2N/A */
2N/A if (fmap->fm_fsize < sizeof (Ehdr) ||
2N/A fmap->fm_maddr[EI_MAG0] != ELFMAG0 ||
2N/A fmap->fm_maddr[EI_MAG1] != ELFMAG1 ||
2N/A fmap->fm_maddr[EI_MAG2] != ELFMAG2 ||
2N/A fmap->fm_maddr[EI_MAG3] != ELFMAG3) {
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * Check class and encoding.
2N/A */
2N/A /* LINTED */
2N/A ehdr = (Ehdr *)fmap->fm_maddr;
2N/A if (ehdr->e_ident[EI_CLASS] != M_CLASS) {
2N/A rej->rej_type = SGS_REJ_CLASS;
2N/A rej->rej_info = (uint_t)ehdr->e_ident[EI_CLASS];
2N/A return (0);
2N/A }
2N/A if (ehdr->e_ident[EI_DATA] != M_DATA) {
2N/A rej->rej_type = SGS_REJ_DATA;
2N/A rej->rej_info = (uint_t)ehdr->e_ident[EI_DATA];
2N/A return (0);
2N/A }
2N/A if ((ehdr->e_type != ET_REL) && (ehdr->e_type != ET_EXEC) &&
2N/A (ehdr->e_type != ET_DYN)) {
2N/A rej->rej_type = SGS_REJ_TYPE;
2N/A rej->rej_info = (uint_t)ehdr->e_type;
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * Verify machine specific flags, and hardware capability requirements.
2N/A */
2N/A if ((elf_mach_flags_check(rej, ehdr) == 0) ||
2N/A ((rtld_flags2 & RT_FL2_HWCAP) && (hwcap_check(rej, ehdr) == 0)))
2N/A return (0);
2N/A
2N/A /*
2N/A * Verify ELF version. ??? is this too restrictive ???
2N/A */
2N/A if (ehdr->e_version > EV_CURRENT) {
2N/A rej->rej_type = SGS_REJ_VERSION;
2N/A rej->rej_info = (uint_t)ehdr->e_version;
2N/A return (0);
2N/A }
2N/A return (1);
2N/A}
2N/A
2N/A/*
2N/A * The runtime linker employs lazy loading to provide the libraries needed for
2N/A * debugging, preloading .o's and dldump(). As these are seldom used, the
2N/A * standard startup of ld.so.1 doesn't initialize all the information necessary
2N/A * to perform plt relocation on ld.so.1's link-map. The first time lazy loading
2N/A * is called we get here to perform these initializations:
2N/A *
2N/A * o elf_needed() is called to set up the DYNINFO() indexes for each lazy
2N/A * dependency. Typically, for all other objects, this is called during
2N/A * analyze_so(), but as ld.so.1 is set-contained we skip this processing.
2N/A *
2N/A * o For intel, ld.so.1's JMPSLOT relocations need relative updates. These
2N/A * are by default skipped thus delaying all relative relocation processing
2N/A * on every invocation of ld.so.1.
2N/A */
2N/Aint
2N/Aelf_rtld_load()
2N/A{
2N/A Lm_list *lml = &lml_rtld;
2N/A Rt_map *lmp = lml->lm_head;
2N/A
2N/A if (lml->lm_flags & LML_FLG_PLTREL)
2N/A return (1);
2N/A
2N/A /*
2N/A * As we need to refer to the DYNINFO() information, insure that it has
2N/A * been initialized.
2N/A */
2N/A if (elf_needed(lml, ALIST_OFF_DATA, lmp) == 0)
2N/A return (0);
2N/A
2N/A#if defined(__i386)
2N/A /*
2N/A * This is a kludge to give ld.so.1 a performance benefit on i386.
2N/A * It's based around two factors.
2N/A *
2N/A * o JMPSLOT relocations (PLT's) actually need a relative relocation
2N/A * applied to the GOT entry so that they can find PLT0.
2N/A *
2N/A * o ld.so.1 does not exercise *any* PLT's before it has made a call
2N/A * to elf_lazy_load(). This is because all dynamic dependencies
2N/A * are recorded as lazy dependencies.
2N/A */
2N/A (void) elf_reloc_relacount((ulong_t)JMPREL(lmp),
2N/A (ulong_t)(PLTRELSZ(lmp) / RELENT(lmp)), (ulong_t)RELENT(lmp),
2N/A (ulong_t)ADDR(lmp));
2N/A#endif
2N/A
2N/A lml->lm_flags |= LML_FLG_PLTREL;
2N/A return (1);
2N/A}
2N/A
2N/A/*
2N/A * Lazy load an object.
2N/A */
2N/ARt_map *
2N/Aelf_lazy_load(Rt_map *clmp, Slookup *slp, uint_t ndx, const char *sym)
2N/A{
2N/A Rt_map *nlmp, *hlmp;
2N/A Dyninfo *dip = &DYNINFO(clmp)[ndx], *pdip;
2N/A uint_t flags = 0;
2N/A Pnode *pnp;
2N/A const char *name;
2N/A Lm_list *lml = LIST(clmp);
2N/A Lm_cntl *lmc;
2N/A Aliste lmco;
2N/A
2N/A /*
2N/A * If this dependency has already been processed, we're done.
2N/A */
2N/A if (((nlmp = (Rt_map *)dip->di_info) != 0) ||
2N/A (dip->di_flags & FLG_DI_LDD_DONE))
2N/A return (nlmp);
2N/A
2N/A /*
2N/A * If we're running under ldd(1), indicate that this dependency has been
2N/A * processed (see test above). It doesn't matter whether the object is
2N/A * successfully loaded or not, this flag simply ensures that we don't
2N/A * repeatedly attempt to load an object that has already failed to load.
2N/A * To do so would create multiple failure diagnostics for the same
2N/A * object under ldd(1).
2N/A */
2N/A if (lml->lm_flags & LML_FLG_TRC_ENABLE)
2N/A dip->di_flags |= FLG_DI_LDD_DONE;
2N/A
2N/A /*
2N/A * Determine the initial dependency name.
2N/A */
2N/A name = STRTAB(clmp) + DYN(clmp)[ndx].d_un.d_val;
2N/A DBG_CALL(Dbg_file_lazyload(clmp, name, sym));
2N/A
2N/A /*
2N/A * If this object needs to establish its own group, make sure a handle
2N/A * is created.
2N/A */
2N/A if (dip->di_flags & FLG_DI_GROUP)
2N/A flags |= (FLG_RT_SETGROUP | FLG_RT_HANDLE);
2N/A
2N/A /*
2N/A * Lazy dependencies are identified as DT_NEEDED entries with a
2N/A * DF_P1_LAZYLOAD flag in the previous DT_POSFLAG_1 element. The
2N/A * dynamic information element that corresponds to the DT_POSFLAG_1
2N/A * entry is free, and thus used to store the present entrance
2N/A * identifier. This identifier is used to prevent multiple attempts to
2N/A * load a failed lazy loadable dependency within the same runtime linker
2N/A * operation. However, future attempts to reload this dependency are
2N/A * still possible.
2N/A */
2N/A if (ndx && (pdip = dip - 1) && (pdip->di_flags & FLG_DI_POSFLAG1))
2N/A pdip->di_info = (void *)slp->sl_id;
2N/A
2N/A /*
2N/A * Expand the requested name if necessary.
2N/A */
2N/A if ((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0)
2N/A return (0);
2N/A
2N/A /*
2N/A * Provided the object on the head of the link-map has completed its
2N/A * relocation, create a new link-map control list for this request.
2N/A */
2N/A hlmp = lml->lm_head;
2N/A if (FLAGS(hlmp) & FLG_RT_RELOCED) {
2N/A if ((lmc = alist_append(&lml->lm_lists, 0, sizeof (Lm_cntl),
2N/A AL_CNT_LMLISTS)) == 0) {
2N/A remove_pnode(pnp);
2N/A return (0);
2N/A }
2N/A lmco = (Aliste)((char *)lmc - (char *)lml->lm_lists);
2N/A } else {
2N/A lmc = 0;
2N/A lmco = ALIST_OFF_DATA;
2N/A }
2N/A
2N/A /*
2N/A * Load the associated object.
2N/A */
2N/A dip->di_info = nlmp =
2N/A load_one(lml, lmco, pnp, clmp, MODE(clmp), flags, 0);
2N/A
2N/A /*
2N/A * Remove any expanded pathname infrastructure. Reduce the pending lazy
2N/A * dependency count of the caller, together with the link-map lists
2N/A * count of objects that still have lazy dependencies pending.
2N/A */
2N/A remove_pnode(pnp);
2N/A if (--LAZY(clmp) == 0)
2N/A LIST(clmp)->lm_lazy--;
2N/A
2N/A /*
2N/A * Finish processing the objects associated with this request, and
2N/A * create an association between the caller and this dependency.
2N/A */
2N/A if (nlmp && ((bind_one(clmp, nlmp, BND_NEEDED) == 0) ||
2N/A (analyze_lmc(lml, lmco, nlmp) == 0) ||
2N/A (relocate_lmc(lml, lmco, clmp, nlmp) == 0)))
2N/A dip->di_info = nlmp = 0;
2N/A
2N/A /*
2N/A * If this lazyload has failed, and we've created a new link-map
2N/A * control list to which this request has added objects, then remove
2N/A * all the objects that have been associated to this request.
2N/A */
2N/A if ((nlmp == 0) && lmc && lmc->lc_head)
2N/A remove_lmc(lml, clmp, lmc, lmco, name);
2N/A
2N/A /*
2N/A * Finally, remove any link-map control list that was created.
2N/A */
2N/A if (lmc)
2N/A remove_cntl(lml, lmco);
2N/A
2N/A /*
2N/A * If this lazy loading failed, record the fact, and bump the lazy
2N/A * counts.
2N/A */
2N/A if (nlmp == 0) {
2N/A dip->di_flags |= FLG_DI_LAZYFAIL;
2N/A if (LAZY(clmp)++ == 0)
2N/A LIST(clmp)->lm_lazy++;
2N/A }
2N/A
2N/A return (nlmp);
2N/A}
2N/A
2N/A/*
2N/A * Return the entry point of the ELF executable.
2N/A */
2N/Astatic ulong_t
2N/Aelf_entry_pt(void)
2N/A{
2N/A return (ENTRY(lml_main.lm_head));
2N/A}
2N/A
2N/A/*
2N/A * Unmap a given ELF shared object from the address space.
2N/A */
2N/Astatic void
2N/Aelf_unmap_so(Rt_map *lmp)
2N/A{
2N/A caddr_t addr;
2N/A size_t size;
2N/A Mmap *mmaps;
2N/A
2N/A /*
2N/A * If this link map represents a relocatable object concatenation, then
2N/A * the image was simply generated in allocated memory. Free the memory.
2N/A *
2N/A * Note: the memory was originally allocated in the libelf:_elf_outmap
2N/A * routine and would normally have been free'd in elf_outsync(), but
2N/A * because we 'interpose' on that routine the memory wasn't free'd at
2N/A * that time.
2N/A */
2N/A if (FLAGS(lmp) & FLG_RT_IMGALLOC) {
2N/A free((void *)ADDR(lmp));
2N/A return;
2N/A }
2N/A
2N/A /*
2N/A * If padding was enabled via rtld_db, then we have at least one page
2N/A * in front of the image - and possibly a trailing page.
2N/A * Unmap the front page first:
2N/A */
2N/A if (PADSTART(lmp) != ADDR(lmp)) {
2N/A addr = (caddr_t)M_PTRUNC(PADSTART(lmp));
2N/A size = ADDR(lmp) - (ulong_t)addr;
2N/A (void) munmap(addr, size);
2N/A }
2N/A
2N/A /*
2N/A * Unmap any trailing padding.
2N/A */
2N/A if (M_PROUND((PADSTART(lmp) + PADIMLEN(lmp))) >
2N/A M_PROUND(ADDR(lmp) + MSIZE(lmp))) {
2N/A addr = (caddr_t)M_PROUND(ADDR(lmp) + MSIZE(lmp));
2N/A size = M_PROUND(PADSTART(lmp) + PADIMLEN(lmp)) - (ulong_t)addr;
2N/A (void) munmap(addr, size);
2N/A }
2N/A
2N/A /*
2N/A * Unmmap all mapped segments.
2N/A */
2N/A for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++)
2N/A (void) munmap(mmaps->m_vaddr, mmaps->m_msize);
2N/A}
2N/A
2N/A/*
2N/A * Determine if a dependency requires a particular version and if so verify
2N/A * that the version exists in the dependency.
2N/A */
2N/Astatic int
2N/Aelf_verify_vers(const char *name, Rt_map *clmp, Rt_map *nlmp)
2N/A{
2N/A Verneed *vnd = VERNEED(clmp);
2N/A int _num, num = VERNEEDNUM(clmp);
2N/A char *cstrs = (char *)STRTAB(clmp);
2N/A Lm_list *lml = LIST(clmp);
2N/A
2N/A /*
2N/A * Traverse the callers version needed information and determine if any
2N/A * specific versions are required from the dependency.
2N/A */
2N/A DBG_CALL(Dbg_ver_need_title(LIST(clmp), NAME(clmp)));
2N/A for (_num = 1; _num <= num; _num++,
2N/A vnd = (Verneed *)((Xword)vnd + vnd->vn_next)) {
2N/A Half cnt = vnd->vn_cnt;
2N/A Vernaux *vnap;
2N/A char *nstrs, *need;
2N/A
2N/A /*
2N/A * Determine if a needed entry matches this dependency.
2N/A */
2N/A need = (char *)(cstrs + vnd->vn_file);
2N/A if (strcmp(name, need) != 0)
2N/A continue;
2N/A
2N/A if ((lml->lm_flags & LML_FLG_TRC_VERBOSE) &&
2N/A ((FLAGS1(clmp) & FL1_RT_LDDSTUB) == 0))
2N/A (void) printf(MSG_INTL(MSG_LDD_VER_FIND), name);
2N/A
2N/A /*
2N/A * Validate that each version required actually exists in the
2N/A * dependency.
2N/A */
2N/A nstrs = (char *)STRTAB(nlmp);
2N/A
2N/A for (vnap = (Vernaux *)((Xword)vnd + vnd->vn_aux); cnt;
2N/A cnt--, vnap = (Vernaux *)((Xword)vnap + vnap->vna_next)) {
2N/A char *version, *define;
2N/A Verdef *vdf = VERDEF(nlmp);
2N/A ulong_t _num, num = VERDEFNUM(nlmp);
2N/A int found = 0;
2N/A
2N/A version = (char *)(cstrs + vnap->vna_name);
2N/A DBG_CALL(Dbg_ver_need_entry(lml, 0, need, version));
2N/A
2N/A for (_num = 1; _num <= num; _num++,
2N/A vdf = (Verdef *)((Xword)vdf + vdf->vd_next)) {
2N/A Verdaux *vdap;
2N/A
2N/A if (vnap->vna_hash != vdf->vd_hash)
2N/A continue;
2N/A
2N/A vdap = (Verdaux *)((Xword)vdf + vdf->vd_aux);
2N/A define = (char *)(nstrs + vdap->vda_name);
2N/A if (strcmp(version, define) != 0)
2N/A continue;
2N/A
2N/A found++;
2N/A break;
2N/A }
2N/A
2N/A /*
2N/A * If we're being traced print out any matched version
2N/A * when the verbose (-v) option is in effect. Always
2N/A * print any unmatched versions.
2N/A */
2N/A if (lml->lm_flags & LML_FLG_TRC_ENABLE) {
2N/A /* BEGIN CSTYLED */
2N/A if (found) {
2N/A if (!(lml->lm_flags & LML_FLG_TRC_VERBOSE))
2N/A continue;
2N/A
2N/A (void) printf(MSG_ORIG(MSG_LDD_VER_FOUND),
2N/A need, version, NAME(nlmp));
2N/A } else {
2N/A if (rtld_flags & RT_FL_SILENCERR)
2N/A continue;
2N/A
2N/A (void) printf(MSG_INTL(MSG_LDD_VER_NFOUND),
2N/A need, version);
2N/A }
2N/A /* END CSTYLED */
2N/A continue;
2N/A }
2N/A
2N/A /*
2N/A * If the version hasn't been found then this is a
2N/A * candidate for a fatal error condition. Weak
2N/A * version definition requirements are silently
2N/A * ignored. Also, if the image inspected for a version
2N/A * definition has no versioning recorded at all then
2N/A * silently ignore this (this provides better backward
2N/A * compatibility to old images created prior to
2N/A * versioning being available). Both of these skipped
2N/A * diagnostics are available under tracing (see above).
2N/A */
2N/A if ((found == 0) && (num != 0) &&
2N/A (!(vnap->vna_flags & VER_FLG_WEAK))) {
2N/A eprintf(lml, ERR_FATAL,
2N/A MSG_INTL(MSG_VER_NFOUND), need, version,
2N/A NAME(clmp));
2N/A return (0);
2N/A }
2N/A }
2N/A }
2N/A DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD));
2N/A return (1);
2N/A}
2N/A
2N/A/*
2N/A * Search through the dynamic section for DT_NEEDED entries and perform one
2N/A * of two functions. If only the first argument is specified then load the
2N/A * defined shared object, otherwise add the link map representing the defined
2N/A * link map the the dlopen list.
2N/A */
2N/Astatic int
2N/Aelf_needed(Lm_list *lml, Aliste lmco, Rt_map *clmp)
2N/A{
2N/A Dyn *dyn, *pdyn;
2N/A ulong_t ndx = 0;
2N/A uint_t lazy, flags;
2N/A Word lmflags = lml->lm_flags;
2N/A Word lmtflags = lml->lm_tflags;
2N/A
2N/A /*
2N/A * Process each shared object on needed list.
2N/A */
2N/A if (DYN(clmp) == 0)
2N/A return (1);
2N/A
2N/A for (dyn = (Dyn *)DYN(clmp), pdyn = NULL; dyn->d_tag != DT_NULL;
2N/A pdyn = dyn++, ndx++) {
2N/A Dyninfo *dip = &DYNINFO(clmp)[ndx];
2N/A Rt_map *nlmp = 0;
2N/A char *name;
2N/A int silent = 0;
2N/A Pnode *pnp;
2N/A
2N/A switch (dyn->d_tag) {
2N/A case DT_POSFLAG_1:
2N/A dip->di_flags |= FLG_DI_POSFLAG1;
2N/A continue;
2N/A case DT_NEEDED:
2N/A case DT_USED:
2N/A lazy = flags = 0;
2N/A dip->di_flags |= FLG_DI_NEEDED;
2N/A
2N/A if (pdyn && (pdyn->d_tag == DT_POSFLAG_1)) {
2N/A if ((pdyn->d_un.d_val & DF_P1_LAZYLOAD) &&
2N/A ((lmtflags & LML_TFLG_NOLAZYLD) == 0)) {
2N/A dip->di_flags |= FLG_DI_LAZY;
2N/A lazy = 1;
2N/A }
2N/A if (pdyn->d_un.d_val & DF_P1_GROUPPERM) {
2N/A dip->di_flags |= FLG_DI_GROUP;
2N/A flags =
2N/A (FLG_RT_SETGROUP | FLG_RT_HANDLE);
2N/A }
2N/A }
2N/A
2N/A name = (char *)STRTAB(clmp) + dyn->d_un.d_val;
2N/A
2N/A /*
2N/A * NOTE, libc.so.1 can't be lazy loaded. Although a
2N/A * lazy position flag won't be produced when a RTLDINFO
2N/A * .dynamic entry is found (introduced with the UPM in
2N/A * Solaris 10), it was possible to mark libc for lazy
2N/A * loading on previous releases. To reduce the overhead
2N/A * of testing for this occurrence, only carry out this
2N/A * check for the first object on the link-map list
2N/A * (there aren't many applications built without libc).
2N/A */
2N/A if (lazy && (lml->lm_head == clmp) &&
2N/A (strcmp(name, MSG_ORIG(MSG_FIL_LIBC)) == 0))
2N/A lazy = 0;
2N/A
2N/A /*
2N/A * Don't bring in lazy loaded objects yet unless we've
2N/A * been asked to attempt to load all available objects
2N/A * (crle(1) sets LD_FLAGS=loadavail). Even under
2N/A * RTLD_NOW we don't process this - RTLD_NOW will cause
2N/A * relocation processing which in turn might trigger
2N/A * lazy loading, but its possible that the object has a
2N/A * lazy loaded file with no bindings (i.e., it should
2N/A * never have been a dependency in the first place).
2N/A */
2N/A if (lazy) {
2N/A if ((lmflags & LML_FLG_LOADAVAIL) == 0) {
2N/A LAZY(clmp)++;
2N/A lazy = flags = 0;
2N/A continue;
2N/A }
2N/A
2N/A /*
2N/A * Silence any error messages - see description
2N/A * under elf_lookup_filtee().
2N/A */
2N/A if ((rtld_flags & RT_FL_SILENCERR) == 0) {
2N/A rtld_flags |= RT_FL_SILENCERR;
2N/A silent = 1;
2N/A }
2N/A }
2N/A break;
2N/A case DT_AUXILIARY:
2N/A dip->di_flags |= FLG_DI_AUXFLTR;
2N/A continue;
2N/A case DT_SUNW_AUXILIARY:
2N/A dip->di_flags |= (FLG_DI_AUXFLTR | FLG_DI_SYMFLTR);
2N/A continue;
2N/A case DT_FILTER:
2N/A dip->di_flags |= FLG_DI_STDFLTR;
2N/A continue;
2N/A case DT_SUNW_FILTER:
2N/A dip->di_flags |= (FLG_DI_STDFLTR | FLG_DI_SYMFLTR);
2N/A continue;
2N/A default:
2N/A continue;
2N/A }
2N/A
2N/A DBG_CALL(Dbg_file_needed(clmp, name));
2N/A
2N/A /*
2N/A * If we're running under ldd(1), indicate that this dependency
2N/A * has been processed. It doesn't matter whether the object is
2N/A * successfully loaded or not, this flag simply ensures that we
2N/A * don't repeatedly attempt to load an object that has already
2N/A * failed to load. To do so would create multiple failure
2N/A * diagnostics for the same object under ldd(1).
2N/A */
2N/A if (lml->lm_flags & LML_FLG_TRC_ENABLE)
2N/A dip->di_flags |= FLG_DI_LDD_DONE;
2N/A
2N/A /*
2N/A * Establish the objects name, load it and establish a binding
2N/A * with the caller.
2N/A */
2N/A if (((pnp = elf_fix_name(name, clmp, PN_SER_NEEDED)) == 0) ||
2N/A ((nlmp = load_one(lml, lmco, pnp, clmp, MODE(clmp),
2N/A flags, 0)) == 0) || (bind_one(clmp, nlmp, BND_NEEDED) == 0))
2N/A nlmp = 0;
2N/A
2N/A /*
2N/A * Clean up any infrastructure, including the removal of the
2N/A * error suppression state, if it had been previously set in
2N/A * this routine.
2N/A */
2N/A if (pnp)
2N/A remove_pnode(pnp);
2N/A if (silent)
2N/A rtld_flags &= ~RT_FL_SILENCERR;
2N/A
2N/A if ((dip->di_info = (void *)nlmp) == 0) {
2N/A /*
2N/A * If the object could not be mapped, continue if error
2N/A * suppression is established or we're here with ldd(1).
2N/A */
2N/A if ((MODE(clmp) & RTLD_CONFGEN) || (lmflags &
2N/A (LML_FLG_LOADAVAIL | LML_FLG_TRC_ENABLE)))
2N/A continue;
2N/A else
2N/A return (0);
2N/A }
2N/A }
2N/A
2N/A if (LAZY(clmp))
2N/A lml->lm_lazy++;
2N/A
2N/A return (1);
2N/A}
2N/A
2N/Astatic int
2N/Aelf_map_check(Lm_list *lml, const char *name, caddr_t vaddr, Off size)
2N/A{
2N/A prmap_t *maps, *_maps;
2N/A int pfd, num, _num;
2N/A caddr_t eaddr = vaddr + size;
2N/A int err;
2N/A
2N/A /*
2N/A * If memory reservations have been established for alternative objects
2N/A * determine if this object falls within the reservation, if it does no
2N/A * further checking is required.
2N/A */
2N/A if (rtld_flags & RT_FL_MEMRESV) {
2N/A Rtc_head *head = (Rtc_head *)config->c_bgn;
2N/A
2N/A if ((vaddr >= (caddr_t)(uintptr_t)head->ch_resbgn) &&
2N/A (eaddr <= (caddr_t)(uintptr_t)head->ch_resend))
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * Determine the mappings presently in use by this process.
2N/A */
2N/A if ((pfd = pr_open(lml)) == FD_UNAVAIL)
2N/A return (1);
2N/A
2N/A if (ioctl(pfd, PIOCNMAP, (void *)&num) == -1) {
2N/A err = errno;
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name,
2N/A strerror(err));
2N/A return (1);
2N/A }
2N/A
2N/A if ((maps = malloc((num + 1) * sizeof (prmap_t))) == 0)
2N/A return (1);
2N/A
2N/A if (ioctl(pfd, PIOCMAP, (void *)maps) == -1) {
2N/A err = errno;
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_PROC), name,
2N/A strerror(err));
2N/A free(maps);
2N/A return (1);
2N/A }
2N/A
2N/A /*
2N/A * Determine if the supplied address clashes with any of the present
2N/A * process mappings.
2N/A */
2N/A for (_num = 0, _maps = maps; _num < num; _num++, _maps++) {
2N/A caddr_t _eaddr = _maps->pr_vaddr + _maps->pr_size;
2N/A Rt_map *lmp;
2N/A const char *str;
2N/A
2N/A if ((eaddr < _maps->pr_vaddr) || (vaddr >= _eaddr))
2N/A continue;
2N/A
2N/A /*
2N/A * We have a memory clash. See if one of the known dynamic
2N/A * dependency mappings represents this space so as to provide
2N/A * the user a more meaningful message.
2N/A */
2N/A if ((lmp = _caller(vaddr, 0)) != 0)
2N/A str = NAME(lmp);
2N/A else
2N/A str = MSG_INTL(MSG_STR_UNKNOWN);
2N/A
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_MAPINUSE), name,
2N/A EC_NATPTR(vaddr), EC_OFF(size), str);
2N/A return (1);
2N/A }
2N/A free(maps);
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * Obtain a memory reservation. On newer systems, both MAP_ANON and MAP_ALIGN
2N/A * are used to obtained an aligned reservation from anonymous memory. If
2N/A * MAP_ANON isn't available, then MAP_ALIGN isn't either, so obtain a standard
2N/A * reservation using the file as backing.
2N/A */
2N/Astatic Am_ret
2N/Aelf_map_reserve(Lm_list *lml, const char *name, caddr_t *maddr, Off msize,
2N/A int mperm, int fd, Xword align)
2N/A{
2N/A Am_ret amret;
2N/A int mflag = MAP_PRIVATE | MAP_NORESERVE;
2N/A
2N/A#if defined(MAP_ALIGN)
2N/A if ((rtld_flags2 & RT_FL2_NOMALIGN) == 0) {
2N/A mflag |= MAP_ALIGN;
2N/A *maddr = (caddr_t)align;
2N/A }
2N/A#endif
2N/A if ((amret = anon_map(lml, maddr, msize, PROT_NONE, mflag)) == AM_ERROR)
2N/A return (amret);
2N/A
2N/A if (amret == AM_OK)
2N/A return (AM_OK);
2N/A
2N/A /*
2N/A * If an anonymous memory request failed (which should only be the
2N/A * case if it is unsupported on the system we're running on), establish
2N/A * the initial mapping directly from the file.
2N/A */
2N/A *maddr = 0;
2N/A if ((*maddr = mmap(*maddr, msize, mperm, MAP_PRIVATE,
2N/A fd, 0)) == MAP_FAILED) {
2N/A int err = errno;
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), name,
2N/A strerror(err));
2N/A return (AM_ERROR);
2N/A }
2N/A return (AM_NOSUP);
2N/A}
2N/A
2N/Astatic void *
2N/Aelf_map_textdata(caddr_t addr, Off flen, int mperm, int phdr_mperm, int mflag,
2N/A int fd, Off foff)
2N/A{
2N/A#if defined(MAP_TEXT) && defined(MAP_INITDATA)
2N/A static int notd = 0;
2N/A
2N/A /*
2N/A * If MAP_TEXT and MAP_INITDATA are available, select the appropriate
2N/A * flag.
2N/A */
2N/A if (notd == 0) {
2N/A if ((phdr_mperm & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC)
2N/A mflag |= MAP_TEXT;
2N/A else
2N/A mflag |= MAP_INITDATA;
2N/A }
2N/A#endif
2N/A if (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff) != MAP_FAILED)
2N/A return (0);
2N/A
2N/A#if defined(MAP_TEXT) && defined(MAP_INITDATA)
2N/A if ((notd == 0) && (errno == EINVAL)) {
2N/A /*
2N/A * MAP_TEXT and MAP_INITDATA may not be supported on this
2N/A * platform, try again without.
2N/A */
2N/A notd = 1;
2N/A mflag &= ~(MAP_TEXT | MAP_INITDATA);
2N/A
2N/A return (mmap((caddr_t)addr, flen, mperm, mflag, fd, foff));
2N/A }
2N/A#endif
2N/A return (MAP_FAILED);
2N/A}
2N/A
2N/A/*
2N/A * Map in a file.
2N/A */
2N/Astatic caddr_t
2N/Aelf_map_it(
2N/A Lm_list *lml, /* link-map list */
2N/A const char *name, /* actual name stored for pathname */
2N/A Off fsize, /* total mapping claim of the file */
2N/A Ehdr *ehdr, /* ELF header of file */
2N/A Phdr *fphdr, /* first loadable Phdr */
2N/A Phdr *lphdr, /* last loadable Phdr */
2N/A Phdr **rrphdr, /* return first Phdr in reservation */
2N/A caddr_t *rraddr, /* return start of reservation */
2N/A Off *rrsize, /* return total size of reservation */
2N/A int fixed, /* image is resolved to a fixed addr */
2N/A int fd, /* images file descriptor */
2N/A Xword align, /* image segments maximum alignment */
2N/A Mmap *mmaps, /* mmap information array and */
2N/A uint_t *mmapcnt) /* mapping count */
2N/A{
2N/A caddr_t raddr; /* reservation address */
2N/A Off rsize; /* reservation size */
2N/A Phdr *phdr; /* working program header poiner */
2N/A caddr_t maddr; /* working mmap address */
2N/A caddr_t faddr; /* working file address */
2N/A size_t padsize; /* object padding requirement */
2N/A size_t padpsize = 0; /* padding size rounded to next page */
2N/A size_t padmsize = 0; /* padding size rounded for alignment */
2N/A int skipfseg; /* skip mapping first segment */
2N/A int mperm; /* segment permissions */
2N/A Am_ret amret = AM_NOSUP;
2N/A
2N/A /*
2N/A * If padding is required extend both the front and rear of the image.
2N/A * To insure the image itself is mapped at the correct alignment the
2N/A * initial padding is rounded up to the nearest page. Once the image is
2N/A * mapped the excess can be pruned to the nearest page required for the
2N/A * actual padding itself.
2N/A */
2N/A if ((padsize = r_debug.rtd_objpad) != 0) {
2N/A padpsize = M_PROUND(padsize);
2N/A if (fixed)
2N/A padmsize = padpsize;
2N/A else
2N/A padmsize = S_ROUND(padsize, align);
2N/A }
2N/A
2N/A /*
2N/A * Determine the initial permissions used to map in the first segment.
2N/A * If this segments memsz is greater that its filesz then the difference
2N/A * must be zeroed. Make sure this segment is writable.
2N/A */
2N/A mperm = 0;
2N/A if (fphdr->p_flags & PF_R)
2N/A mperm |= PROT_READ;
2N/A if (fphdr->p_flags & PF_X)
2N/A mperm |= PROT_EXEC;
2N/A if ((fphdr->p_flags & PF_W) || (fphdr->p_memsz > fphdr->p_filesz))
2N/A mperm |= PROT_WRITE;
2N/A
2N/A /*
2N/A * Determine whether or not to let system reserve address space based on
2N/A * whether this is a dynamic executable (addresses in object are fixed)
2N/A * or a shared object (addresses in object are relative to the objects'
2N/A * base).
2N/A */
2N/A if (fixed) {
2N/A /*
2N/A * Determine the reservation address and size, and insure that
2N/A * this reservation isn't already in use.
2N/A */
2N/A faddr = maddr = (caddr_t)M_PTRUNC((ulong_t)fphdr->p_vaddr);
2N/A raddr = maddr - padpsize;
2N/A rsize = fsize + padpsize + padsize;
2N/A
2N/A if (lml_main.lm_head) {
2N/A if (elf_map_check(lml, name, raddr, rsize) != 0)
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * As this is a fixed image, all segments must be individually
2N/A * mapped.
2N/A */
2N/A skipfseg = 0;
2N/A
2N/A } else {
2N/A size_t esize;
2N/A
2N/A /*
2N/A * If this isn't a fixed image, reserve enough address space for
2N/A * the entire image to be mapped. The amount of reservation is
2N/A * the range between the beginning of the first, and end of the
2N/A * last loadable segment, together with any padding, plus the
2N/A * alignment of the first segment.
2N/A *
2N/A * The optimal reservation is made as a no-reserve mapping from
2N/A * anonymous memory. Each segment is then mapped into this
2N/A * reservation. If the anonymous mapping capability isn't
2N/A * available, the reservation is obtained from the file itself.
2N/A * In this case the first segment of the image is mapped as part
2N/A * of the reservation, thus only the following segments need to
2N/A * be remapped.
2N/A */
2N/A rsize = fsize + padmsize + padsize;
2N/A if ((amret = elf_map_reserve(lml, name, &raddr, rsize, mperm,
2N/A fd, align)) == AM_ERROR)
2N/A return (0);
2N/A maddr = raddr + padmsize;
2N/A faddr = (caddr_t)S_ROUND((Off)maddr, align);
2N/A
2N/A /*
2N/A * If this reservation has been obtained from anonymous memory,
2N/A * then all segments must be individually mapped. Otherwise,
2N/A * the first segment heads the reservation.
2N/A */
2N/A if (amret == AM_OK)
2N/A skipfseg = 0;
2N/A else
2N/A skipfseg = 1;
2N/A
2N/A /*
2N/A * For backward compatibility (where MAP_ALIGN isn't available),
2N/A * insure the alignment of the reservation is adequate for this
2N/A * object, and if not remap the object to obtain the correct
2N/A * alignment.
2N/A */
2N/A if (faddr != maddr) {
2N/A (void) munmap(raddr, rsize);
2N/A
2N/A rsize += align;
2N/A if ((amret = elf_map_reserve(lml, name, &raddr, rsize,
2N/A mperm, fd, align)) == AM_ERROR)
2N/A return (0);
2N/A
2N/A maddr = faddr = (caddr_t)S_ROUND((Off)(raddr +
2N/A padpsize), align);
2N/A
2N/A esize = maddr - raddr + padpsize;
2N/A
2N/A /*
2N/A * As ths image has been realigned, the first segment
2N/A * of the file needs to be remapped to its correct
2N/A * location.
2N/A */
2N/A skipfseg = 0;
2N/A } else
2N/A esize = padmsize - padpsize;
2N/A
2N/A /*
2N/A * If this reservation included padding, remove any excess for
2N/A * the start of the image (the padding was adjusted to insure
2N/A * the image was aligned appropriately).
2N/A */
2N/A if (esize) {
2N/A (void) munmap(raddr, esize);
2N/A raddr += esize;
2N/A rsize -= esize;
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * At this point we know the initial location of the image, and its
2N/A * size. Pass these back to the caller for inclusion in the link-map
2N/A * that will eventually be created.
2N/A */
2N/A *rraddr = raddr;
2N/A *rrsize = rsize;
2N/A
2N/A /*
2N/A * The first loadable segment is now pointed to by maddr. This segment
2N/A * will eventually contain the elf header and program headers, so reset
2N/A * the program header. Pass this back to the caller for inclusion in
2N/A * the link-map so it can be used for later unmapping operations.
2N/A */
2N/A /* LINTED */
2N/A *rrphdr = (Phdr *)((char *)maddr + ehdr->e_phoff);
2N/A
2N/A /*
2N/A * If padding is required at the front of the image, obtain that now.
2N/A * Note, if we've already obtained a reservation from anonymous memory
2N/A * then this reservation will already include suitable padding.
2N/A * Otherwise this reservation is backed by the file, or in the case of
2N/A * a fixed image, doesn't yet exist. Map the padding so that it is
2N/A * suitably protected (PROT_NONE), and insure the first segment of the
2N/A * file is mapped to its correct location.
2N/A */
2N/A if (padsize) {
2N/A if (amret == AM_NOSUP) {
2N/A if (dz_map(lml, raddr, padpsize, PROT_NONE,
2N/A (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) ==
2N/A MAP_FAILED)
2N/A return (0);
2N/A
2N/A skipfseg = 0;
2N/A }
2N/A rsize -= padpsize;
2N/A }
2N/A
2N/A /*
2N/A * Map individual segments. For a fixed image, these will each be
2N/A * unique mappings. For a reservation these will fill in the
2N/A * reservation.
2N/A */
2N/A for (phdr = fphdr; phdr <= lphdr;
2N/A phdr = (Phdr *)((Off)phdr + ehdr->e_phentsize)) {
2N/A caddr_t addr;
2N/A Off mlen, flen;
2N/A size_t size;
2N/A
2N/A /*
2N/A * Skip non-loadable segments or segments that don't occupy
2N/A * any memory.
2N/A */
2N/A if (((phdr->p_type != PT_LOAD) &&
2N/A (phdr->p_type != PT_SUNWBSS)) || (phdr->p_memsz == 0))
2N/A continue;
2N/A
2N/A /*
2N/A * Establish this segments address relative to our base.
2N/A */
2N/A addr = (caddr_t)M_PTRUNC((ulong_t)(phdr->p_vaddr +
2N/A (fixed ? 0 : faddr)));
2N/A
2N/A /*
2N/A * Determine the mapping protection from the segment attributes.
2N/A * Also determine the etext address from the last loadable
2N/A * segment which has permissions but no write access.
2N/A */
2N/A mperm = 0;
2N/A if (phdr->p_flags) {
2N/A if (phdr->p_flags & PF_R)
2N/A mperm |= PROT_READ;
2N/A if (phdr->p_flags & PF_X)
2N/A mperm |= PROT_EXEC;
2N/A if (phdr->p_flags & PF_W)
2N/A mperm |= PROT_WRITE;
2N/A else
2N/A fmap->fm_etext = phdr->p_vaddr + phdr->p_memsz +
2N/A (ulong_t)(fixed ? 0 : faddr);
2N/A }
2N/A
2N/A /*
2N/A * Determine the type of mapping required.
2N/A */
2N/A if (phdr->p_type == PT_SUNWBSS) {
2N/A /*
2N/A * Potentially, we can defer the loading of any SUNWBSS
2N/A * segment, depending on whether the symbols it provides
2N/A * have been bound to. In this manner, large segments
2N/A * that are interposed upon between shared libraries
2N/A * may not require mapping. Note, that the mapping
2N/A * information is recorded in our mapping descriptor at
2N/A * this time.
2N/A */
2N/A mlen = phdr->p_memsz;
2N/A flen = 0;
2N/A
2N/A } else if ((phdr->p_filesz == 0) && (phdr->p_flags == 0)) {
2N/A /*
2N/A * If this segment has no backing file and no flags
2N/A * specified, then it defines a reservation. At this
2N/A * point all standard loadable segments will have been
2N/A * processed. The segment reservation is mapped
2N/A * directly from /dev/null.
2N/A */
2N/A if (nu_map(lml, (caddr_t)addr, phdr->p_memsz, PROT_NONE,
2N/A MAP_FIXED | MAP_PRIVATE) == MAP_FAILED)
2N/A return (0);
2N/A
2N/A mlen = phdr->p_memsz;
2N/A flen = 0;
2N/A
2N/A } else if (phdr->p_filesz == 0) {
2N/A /*
2N/A * If this segment has no backing file then it defines a
2N/A * nobits segment and is mapped directly from /dev/zero.
2N/A */
2N/A if (dz_map(lml, (caddr_t)addr, phdr->p_memsz, mperm,
2N/A MAP_FIXED | MAP_PRIVATE) == MAP_FAILED)
2N/A return (0);
2N/A
2N/A mlen = phdr->p_memsz;
2N/A flen = 0;
2N/A
2N/A } else {
2N/A Off foff;
2N/A
2N/A /*
2N/A * This mapping originates from the file. Determine the
2N/A * file offset to which the mapping will be directed
2N/A * (must be aligned) and how much to map (might be more
2N/A * than the file in the case of .bss).
2N/A */
2N/A foff = M_PTRUNC((ulong_t)phdr->p_offset);
2N/A mlen = phdr->p_memsz + (phdr->p_offset - foff);
2N/A flen = phdr->p_filesz + (phdr->p_offset - foff);
2N/A
2N/A /*
2N/A * If this is a non-fixed, non-anonymous mapping, and no
2N/A * padding is involved, then the first loadable segment
2N/A * is already part of the initial reservation. In this
2N/A * case there is no need to remap this segment.
2N/A */
2N/A if ((skipfseg == 0) || (phdr != fphdr)) {
2N/A int phdr_mperm = mperm;
2N/A /*
2N/A * If this segments memsz is greater that its
2N/A * filesz then the difference must be zeroed.
2N/A * Make sure this segment is writable.
2N/A */
2N/A if (phdr->p_memsz > phdr->p_filesz)
2N/A mperm |= PROT_WRITE;
2N/A
2N/A if (elf_map_textdata((caddr_t)addr, flen,
2N/A mperm, phdr_mperm,
2N/A (MAP_FIXED | MAP_PRIVATE), fd, foff) ==
2N/A MAP_FAILED) {
2N/A int err = errno;
2N/A eprintf(lml, ERR_FATAL,
2N/A MSG_INTL(MSG_SYS_MMAP), name,
2N/A strerror(err));
2N/A return (0);
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * If the memory occupancy of the segment overflows the
2N/A * definition in the file, we need to "zero out" the end
2N/A * of the mapping we've established, and if necessary,
2N/A * map some more space from /dev/zero. Note, zero'ed
2N/A * memory must end on a double word boundary to satisfy
2N/A * zero().
2N/A */
2N/A if (phdr->p_memsz > phdr->p_filesz) {
2N/A caddr_t zaddr;
2N/A size_t zlen, zplen;
2N/A Off fend;
2N/A
2N/A foff = (Off)(phdr->p_vaddr + phdr->p_filesz +
2N/A (fixed ? 0 : faddr));
2N/A zaddr = (caddr_t)M_PROUND(foff);
2N/A zplen = (size_t)(zaddr - foff);
2N/A
2N/A fend = (Off)S_DROUND((size_t)(phdr->p_vaddr +
2N/A phdr->p_memsz + (fixed ? 0 : faddr)));
2N/A zlen = (size_t)(fend - foff);
2N/A
2N/A /*
2N/A * Determine whether the number of bytes that
2N/A * must be zero'ed overflow to the next page.
2N/A * If not, simply clear the exact bytes
2N/A * (filesz to memsz) from this page. Otherwise,
2N/A * clear the remaining bytes of this page, and
2N/A * map an following pages from /dev/zero.
2N/A */
2N/A if (zlen < zplen)
2N/A zero((caddr_t)foff, (long)zlen);
2N/A else {
2N/A zero((caddr_t)foff, (long)zplen);
2N/A
2N/A if ((zlen = (fend - (Off)zaddr)) > 0) {
2N/A if (dz_map(lml, zaddr, zlen,
2N/A mperm,
2N/A MAP_FIXED | MAP_PRIVATE) ==
2N/A MAP_FAILED)
2N/A return (0);
2N/A }
2N/A }
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * Unmap anything from the last mapping address to this one and
2N/A * update the mapping claim pointer.
2N/A */
2N/A if ((fixed == 0) && ((size = addr - maddr) != 0)) {
2N/A (void) munmap(maddr, size);
2N/A rsize -= size;
2N/A }
2N/A
2N/A /*
2N/A * Retain this segments mapping information.
2N/A */
2N/A mmaps[*mmapcnt].m_vaddr = addr;
2N/A mmaps[*mmapcnt].m_msize = mlen;
2N/A mmaps[*mmapcnt].m_fsize = flen;
2N/A mmaps[*mmapcnt].m_perm = mperm;
2N/A (*mmapcnt)++;
2N/A
2N/A maddr = addr + M_PROUND(mlen);
2N/A rsize -= M_PROUND(mlen);
2N/A }
2N/A
2N/A /*
2N/A * If padding is required at the end of the image, obtain that now.
2N/A * Note, if we've already obtained a reservation from anonymous memory
2N/A * then this reservation will already include suitable padding.
2N/A */
2N/A if (padsize) {
2N/A if (amret == AM_NOSUP) {
2N/A /*
2N/A * maddr is currently page aligned from the last segment
2N/A * mapping.
2N/A */
2N/A if (dz_map(lml, maddr, padsize, PROT_NONE,
2N/A (MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE)) ==
2N/A MAP_FAILED)
2N/A return (0);
2N/A }
2N/A maddr += padsize;
2N/A rsize -= padsize;
2N/A }
2N/A
2N/A /*
2N/A * Unmap any final reservation.
2N/A */
2N/A if ((fixed == 0) && (rsize != 0))
2N/A (void) munmap(maddr, rsize);
2N/A
2N/A return (faddr);
2N/A}
2N/A
2N/A/*
2N/A * A null symbol interpretor. Used if a filter has no associated filtees.
2N/A */
2N/A/* ARGSUSED0 */
2N/Astatic Sym *
2N/Aelf_null_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo)
2N/A{
2N/A return ((Sym *)0);
2N/A}
2N/A
2N/A/*
2N/A * Disable filtee use.
2N/A */
2N/Astatic void
2N/Aelf_disable_filtee(Rt_map *lmp, Dyninfo *dip)
2N/A{
2N/A dip->di_info = 0;
2N/A
2N/A if ((dip->di_flags & FLG_DI_SYMFLTR) == 0) {
2N/A /*
2N/A * If this is an object filter, free the filtee's duplication.
2N/A */
2N/A if (OBJFLTRNDX(lmp) != FLTR_DISABLED) {
2N/A free(REFNAME(lmp));
2N/A REFNAME(lmp) = (char *)0;
2N/A OBJFLTRNDX(lmp) = FLTR_DISABLED;
2N/A
2N/A /*
2N/A * Indicate that this filtee is no longer available.
2N/A */
2N/A if (dip->di_flags & FLG_DI_STDFLTR)
2N/A SYMINTP(lmp) = elf_null_find_sym;
2N/A
2N/A }
2N/A } else if (dip->di_flags & FLG_DI_STDFLTR) {
2N/A /*
2N/A * Indicate that this standard filtee is no longer available.
2N/A */
2N/A if (SYMSFLTRCNT(lmp))
2N/A SYMSFLTRCNT(lmp)--;
2N/A } else {
2N/A /*
2N/A * Indicate that this auxiliary filtee is no longer available.
2N/A */
2N/A if (SYMAFLTRCNT(lmp))
2N/A SYMAFLTRCNT(lmp)--;
2N/A }
2N/A dip->di_flags &= ~MSK_DI_FILTER;
2N/A}
2N/A
2N/A/*
2N/A * Find symbol interpreter - filters.
2N/A * This function is called when the symbols from a shared object should
2N/A * be resolved from the shared objects filtees instead of from within itself.
2N/A *
2N/A * A symbol name of 0 is used to trigger filtee loading.
2N/A */
2N/Astatic Sym *
2N/A_elf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx)
2N/A{
2N/A const char *name = slp->sl_name, *filtees;
2N/A Rt_map *clmp = slp->sl_cmap;
2N/A Rt_map *ilmp = slp->sl_imap;
2N/A Pnode *pnp, **pnpp;
2N/A int any;
2N/A Dyninfo *dip = &DYNINFO(ilmp)[ndx];
2N/A Lm_list *lml = LIST(ilmp);
2N/A
2N/A /*
2N/A * Indicate that the filter has been used. If a binding already exists
2N/A * to the caller, indicate that this object is referenced. This insures
2N/A * we don't generate false unreferenced diagnostics from ldd -u/U or
2N/A * debugging. Don't create a binding regardless, as this filter may
2N/A * have been dlopen()'ed.
2N/A */
2N/A if (name && (ilmp != clmp)) {
2N/A Word tracing = (LIST(clmp)->lm_flags &
2N/A (LML_FLG_TRC_UNREF | LML_FLG_TRC_UNUSED));
2N/A
2N/A if (tracing || DBG_ENABLED) {
2N/A Bnd_desc *bdp;
2N/A Aliste idx;
2N/A
2N/A FLAGS1(ilmp) |= FL1_RT_USED;
2N/A
2N/A if ((tracing & LML_FLG_TRC_UNREF) || DBG_ENABLED) {
2N/A for (APLIST_TRAVERSE(CALLERS(ilmp), idx, bdp)) {
2N/A if (bdp->b_caller == clmp) {
2N/A bdp->b_flags |= BND_REFER;
2N/A break;
2N/A }
2N/A }
2N/A }
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * If this is the first call to process this filter, establish the
2N/A * filtee list. If a configuration file exists, determine if any
2N/A * filtee associations for this filter, and its filtee reference, are
2N/A * defined. Otherwise, process the filtee reference. Any token
2N/A * expansion is also completed at this point (i.e., $PLATFORM).
2N/A */
2N/A filtees = (char *)STRTAB(ilmp) + DYN(ilmp)[ndx].d_un.d_val;
2N/A if (dip->di_info == 0) {
2N/A if (rtld_flags2 & RT_FL2_FLTCFG)
2N/A dip->di_info = elf_config_flt(lml, PATHNAME(ilmp),
2N/A filtees);
2N/A
2N/A if (dip->di_info == 0) {
2N/A DBG_CALL(Dbg_file_filter(lml, NAME(ilmp), filtees, 0));
2N/A if ((lml->lm_flags &
2N/A (LML_FLG_TRC_VERBOSE | LML_FLG_TRC_SEARCH)) &&
2N/A ((FLAGS1(ilmp) & FL1_RT_LDDSTUB) == 0))
2N/A (void) printf(MSG_INTL(MSG_LDD_FIL_FILTER),
2N/A NAME(ilmp), filtees);
2N/A
2N/A if ((dip->di_info = (void *)expand_paths(ilmp,
2N/A filtees, PN_SER_FILTEE, 0)) == 0) {
2N/A elf_disable_filtee(ilmp, dip);
2N/A return ((Sym *)0);
2N/A }
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * Traverse the filtee list, dlopen()'ing any objects specified and
2N/A * using their group handle to lookup the symbol.
2N/A */
2N/A for (any = 0, pnpp = (Pnode **)&(dip->di_info), pnp = *pnpp; pnp;
2N/A pnpp = &pnp->p_next, pnp = *pnpp) {
2N/A int mode;
2N/A Grp_hdl *ghp;
2N/A Rt_map *nlmp = 0;
2N/A
2N/A if (pnp->p_len == 0)
2N/A continue;
2N/A
2N/A /*
2N/A * Establish the mode of the filtee from the filter. As filtees
2N/A * are loaded via a dlopen(), make sure that RTLD_GROUP is set
2N/A * and the filtees aren't global. It would be nice to have
2N/A * RTLD_FIRST used here also, but as filters got out long before
2N/A * RTLD_FIRST was introduced it's a little too late now.
2N/A */
2N/A mode = MODE(ilmp) | RTLD_GROUP;
2N/A mode &= ~RTLD_GLOBAL;
2N/A
2N/A /*
2N/A * Insure that any auxiliary filter can locate symbols from its
2N/A * caller.
2N/A */
2N/A if (dip->di_flags & FLG_DI_AUXFLTR)
2N/A mode |= RTLD_PARENT;
2N/A
2N/A /*
2N/A * Process any hardware capability directory. Establish a new
2N/A * link-map control list from which to analyze any newly added
2N/A * objects.
2N/A */
2N/A if ((pnp->p_info == 0) && (pnp->p_orig & PN_TKN_HWCAP)) {
2N/A Lm_cntl *lmc;
2N/A Aliste lmco;
2N/A
2N/A if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) {
2N/A if ((lmc = alist_append(&lml->lm_lists, 0,
2N/A sizeof (Lm_cntl), AL_CNT_LMLISTS)) == 0)
2N/A return ((Sym *)0);
2N/A lmco = (Aliste)((char *)lmc -
2N/A (char *)lml->lm_lists);
2N/A } else {
2N/A lmc = 0;
2N/A lmco = ALIST_OFF_DATA;
2N/A }
2N/A
2N/A pnp = hwcap_filtees(pnpp, lmco, lmc, dip, ilmp, filtees,
2N/A mode, (FLG_RT_HANDLE | FLG_RT_HWCAP));
2N/A
2N/A /*
2N/A * Now that any hardware capability objects have been
2N/A * processed, remove any link-map control list.
2N/A */
2N/A if (lmc)
2N/A remove_cntl(lml, lmco);
2N/A }
2N/A
2N/A if (pnp->p_len == 0)
2N/A continue;
2N/A
2N/A /*
2N/A * Process an individual filtee.
2N/A */
2N/A if (pnp->p_info == 0) {
2N/A const char *filtee = pnp->p_name;
2N/A int audit = 0;
2N/A
2N/A DBG_CALL(Dbg_file_filtee(lml, NAME(ilmp), filtee, 0));
2N/A
2N/A ghp = 0;
2N/A
2N/A /*
2N/A * Determine if the reference link map is already
2N/A * loaded. As an optimization compare the filtee with
2N/A * our interpretor. The most common filter is
2N/A * libdl.so.1, which is a filter on ld.so.1.
2N/A */
2N/A#if defined(_ELF64)
2N/A if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD_64)) == 0) {
2N/A#else
2N/A if (strcmp(filtee, MSG_ORIG(MSG_PTH_RTLD)) == 0) {
2N/A#endif
2N/A /*
2N/A * Create an association between ld.so.1 and the
2N/A * filter. As an optimization, a handle for
2N/A * ld.so.1 itself (required for the dlopen()
2N/A * family filtering mechanism) shouldn't search
2N/A * any dependencies of ld.so.1. Omitting
2N/A * GPD_ADDEPS prevents the addition of any
2N/A * ld.so.1 dependencies to this handle.
2N/A */
2N/A nlmp = lml_rtld.lm_head;
2N/A if ((ghp = hdl_create(&lml_rtld, nlmp, ilmp,
2N/A (GPH_LDSO | GPH_FIRST | GPH_FILTEE),
2N/A (GPD_DLSYM | GPD_RELOC), GPD_PARENT)) == 0)
2N/A nlmp = 0;
2N/A
2N/A /*
2N/A * Establish the filter handle to prevent any
2N/A * recursion.
2N/A */
2N/A if (nlmp && ghp)
2N/A pnp->p_info = (void *)ghp;
2N/A
2N/A /*
2N/A * Audit the filter/filtee established. Ignore
2N/A * any return from the auditor, as we can't
2N/A * allow ignore filtering to ld.so.1, otherwise
2N/A * nothing is going to work.
2N/A */
2N/A if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) &
2N/A LML_TFLG_AUD_OBJFILTER))
2N/A (void) audit_objfilter(ilmp, filtees,
2N/A nlmp, 0);
2N/A
2N/A } else {
2N/A Rej_desc rej = { 0 };
2N/A Lm_cntl *lmc;
2N/A Aliste lmco;
2N/A
2N/A /*
2N/A * Establish a new link-map control list from
2N/A * which to analyze any newly added objects.
2N/A */
2N/A if (FLAGS(lml->lm_head) & FLG_RT_RELOCED) {
2N/A if ((lmc =
2N/A alist_append(&lml->lm_lists, 0,
2N/A sizeof (Lm_cntl),
2N/A AL_CNT_LMLISTS)) == 0)
2N/A return ((Sym *)0);
2N/A lmco = (Aliste)((char *)lmc -
2N/A (char *)lml->lm_lists);
2N/A } else {
2N/A lmc = 0;
2N/A lmco = ALIST_OFF_DATA;
2N/A }
2N/A
2N/A /*
2N/A * Load the filtee. Note, an auditor can
2N/A * provide an alternative name.
2N/A */
2N/A if ((nlmp = load_path(lml, lmco, &(pnp->p_name),
2N/A ilmp, mode, FLG_RT_HANDLE, &ghp, 0,
2N/A &rej)) == 0) {
2N/A file_notfound(LIST(ilmp), filtee, ilmp,
2N/A FLG_RT_HANDLE, &rej);
2N/A remove_rej(&rej);
2N/A }
2N/A filtee = pnp->p_name;
2N/A
2N/A /*
2N/A * Establish the filter handle to prevent any
2N/A * recursion.
2N/A */
2N/A if (nlmp && ghp) {
2N/A ghp->gh_flags |= GPH_FILTEE;
2N/A pnp->p_info = (void *)ghp;
2N/A }
2N/A
2N/A /*
2N/A * Audit the filter/filtee established. A
2N/A * return of 0 indicates the auditor wishes to
2N/A * ignore this filtee.
2N/A */
2N/A if (nlmp && ((lml->lm_tflags | FLAGS1(ilmp)) &
2N/A LML_TFLG_AUD_OBJFILTER)) {
2N/A if (audit_objfilter(ilmp, filtees,
2N/A nlmp, 0) == 0) {
2N/A audit = 1;
2N/A nlmp = 0;
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * Finish processing the objects associated with
2N/A * this request. Create an association between
2N/A * this object and the originating filter to
2N/A * provide sufficient information to tear down
2N/A * this filtee if necessary.
2N/A */
2N/A if (nlmp && ghp &&
2N/A ((analyze_lmc(lml, lmco, nlmp) == 0) ||
2N/A (relocate_lmc(lml, lmco, ilmp, nlmp) == 0)))
2N/A nlmp = 0;
2N/A
2N/A /*
2N/A * If the filtee has been successfully
2N/A * processed, then create an association
2N/A * between the filter and filtee. This
2N/A * association provides sufficient information
2N/A * to tear down the filter and filtee if
2N/A * necessary.
2N/A */
2N/A DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD));
2N/A if (nlmp && ghp &&
2N/A (hdl_add(ghp, ilmp, GPD_FILTER) == 0))
2N/A nlmp = 0;
2N/A
2N/A /*
2N/A * If this filtee loading has failed, and we've
2N/A * created a new link-map control list to which
2N/A * this request has added objects, then remove
2N/A * all the objects that have been associated to
2N/A * this request.
2N/A */
2N/A if ((nlmp == 0) && lmc && lmc->lc_head)
2N/A remove_lmc(lml, clmp, lmc, lmco, name);
2N/A
2N/A /*
2N/A * Remove any link-map control list that was
2N/A * created.
2N/A */
2N/A if (lmc)
2N/A remove_cntl(lml, lmco);
2N/A }
2N/A
2N/A /*
2N/A * Generate a diagnostic if the filtee couldn't be
2N/A * loaded, null out the pnode entry, and continue
2N/A * the search. Otherwise, retain this group handle
2N/A * for future symbol searches.
2N/A */
2N/A if (nlmp == 0) {
2N/A DBG_CALL(Dbg_file_filtee(lml, 0, filtee,
2N/A audit));
2N/A
2N/A pnp->p_info = 0;
2N/A pnp->p_len = 0;
2N/A continue;
2N/A }
2N/A }
2N/A
2N/A ghp = (Grp_hdl *)pnp->p_info;
2N/A
2N/A /*
2N/A * If we're just here to trigger filtee loading skip the symbol
2N/A * lookup so we'll continue looking for additional filtees.
2N/A */
2N/A if (name) {
2N/A Grp_desc *gdp;
2N/A Sym *sym = 0;
2N/A Aliste idx;
2N/A Slookup sl = *slp;
2N/A
2N/A sl.sl_flags |= LKUP_FIRST;
2N/A any++;
2N/A
2N/A /*
2N/A * Look for the symbol in the handles dependencies.
2N/A */
2N/A for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
2N/A if ((gdp->gd_flags & GPD_DLSYM) == 0)
2N/A continue;
2N/A
2N/A /*
2N/A * If our parent is a dependency don't look at
2N/A * it (otherwise we are in a recursive loop).
2N/A * This situation can occur with auxiliary
2N/A * filters if the filtee has a dependency on the
2N/A * filter. This dependency isn't necessary as
2N/A * auxiliary filters are opened RTLD_PARENT, but
2N/A * users may still unknowingly add an explicit
2N/A * dependency to the parent.
2N/A */
2N/A if ((sl.sl_imap = gdp->gd_depend) == ilmp)
2N/A continue;
2N/A
2N/A if (((sym = SYMINTP(sl.sl_imap)(&sl, dlmp,
2N/A binfo)) != 0) ||
2N/A (ghp->gh_flags & GPH_FIRST))
2N/A break;
2N/A }
2N/A
2N/A /*
2N/A * If a symbol has been found, indicate the binding
2N/A * and return the symbol.
2N/A */
2N/A if (sym) {
2N/A *binfo |= DBG_BINFO_FILTEE;
2N/A return (sym);
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * If this object is tagged to terminate filtee processing we're
2N/A * done.
2N/A */
2N/A if (FLAGS1(ghp->gh_ownlmp) & FL1_RT_ENDFILTE)
2N/A break;
2N/A }
2N/A
2N/A /*
2N/A * If we're just here to trigger filtee loading then we're done.
2N/A */
2N/A if (name == 0)
2N/A return ((Sym *)0);
2N/A
2N/A /*
2N/A * If no filtees have been found for a filter, clean up any Pnode
2N/A * structures and disable their search completely. For auxiliary
2N/A * filters we can reselect the symbol search function so that we never
2N/A * enter this routine again for this object. For standard filters we
2N/A * use the null symbol routine.
2N/A */
2N/A if (any == 0) {
2N/A remove_pnode((Pnode *)dip->di_info);
2N/A elf_disable_filtee(ilmp, dip);
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A return ((Sym *)0);
2N/A}
2N/A
2N/A/*
2N/A * Focal point for disabling error messages for auxiliary filters. As an
2N/A * auxiliary filter allows for filtee use, but provides a fallback should a
2N/A * filtee not exist (or fail to load), any errors generated as a consequence of
2N/A * trying to load the filtees are typically suppressed. Setting RT_FL_SILENCERR
2N/A * suppresses errors generated by eprint(), but insures a debug diagnostic is
2N/A * produced. ldd(1) employs printf(), and here, the selection of whether to
2N/A * print a diagnostic in regards to auxiliary filters is a little more complex.
2N/A *
2N/A * . The determination of whether to produce an ldd message, or a fatal
2N/A * error message is driven by LML_FLG_TRC_ENABLE.
2N/A * . More detailed ldd messages may also be driven off of LML_FLG_TRC_WARN,
2N/A * (ldd -d/-r), LML_FLG_TRC_VERBOSE (ldd -v), LML_FLG_TRC_SEARCH (ldd -s),
2N/A * and LML_FLG_TRC_UNREF/LML_FLG_TRC_UNUSED (ldd -U/-u).
2N/A *
2N/A * . If the calling object is lddstub, then several classes of message are
2N/A * suppressed. The user isn't trying to diagnose lddstub, this is simply
2N/A * a stub executable employed to preload a user specified library against.
2N/A *
2N/A * . If RT_FL_SILENCERR is in effect then any generic ldd() messages should
2N/A * be suppressed. All detailed ldd messages should still be produced.
2N/A */
2N/ASym *
2N/Aelf_lookup_filtee(Slookup *slp, Rt_map **dlmp, uint_t *binfo, uint_t ndx)
2N/A{
2N/A Sym *sym;
2N/A Dyninfo *dip = &DYNINFO(slp->sl_imap)[ndx];
2N/A int silent = 0;
2N/A
2N/A /*
2N/A * Make sure this entry is still acting as a filter. We may have tried
2N/A * to process this previously, and disabled it if the filtee couldn't
2N/A * be processed. However, other entries may provide different filtees
2N/A * that are yet to be completed.
2N/A */
2N/A if (dip->di_flags == 0)
2N/A return ((Sym *)0);
2N/A
2N/A /*
2N/A * Indicate whether an error message is required should this filtee not
2N/A * be found, based on the type of filter.
2N/A */
2N/A if ((dip->di_flags & FLG_DI_AUXFLTR) &&
2N/A ((rtld_flags & (RT_FL_WARNFLTR | RT_FL_SILENCERR)) == 0)) {
2N/A rtld_flags |= RT_FL_SILENCERR;
2N/A silent = 1;
2N/A }
2N/A
2N/A sym = _elf_lookup_filtee(slp, dlmp, binfo, ndx);
2N/A
2N/A if (silent)
2N/A rtld_flags &= ~RT_FL_SILENCERR;
2N/A
2N/A return (sym);
2N/A}
2N/A
2N/A/*
2N/A * Compute the elf hash value (as defined in the ELF access library).
2N/A * The form of the hash table is:
2N/A *
2N/A * |--------------|
2N/A * | # of buckets |
2N/A * |--------------|
2N/A * | # of chains |
2N/A * |--------------|
2N/A * | bucket[] |
2N/A * |--------------|
2N/A * | chain[] |
2N/A * |--------------|
2N/A */
2N/Aulong_t
2N/Aelf_hash(const char *name)
2N/A{
2N/A uint_t hval = 0;
2N/A
2N/A while (*name) {
2N/A uint_t g;
2N/A hval = (hval << 4) + *name++;
2N/A if ((g = (hval & 0xf0000000)) != 0)
2N/A hval ^= g >> 24;
2N/A hval &= ~g;
2N/A }
2N/A return ((ulong_t)hval);
2N/A}
2N/A
2N/A/*
2N/A * If flag argument has LKUP_SPEC set, we treat undefined symbols of type
2N/A * function specially in the executable - if they have a value, even though
2N/A * undefined, we use that value. This allows us to associate all references
2N/A * to a function's address to a single place in the process: the plt entry
2N/A * for that function in the executable. Calls to lookup from plt binding
2N/A * routines do NOT set LKUP_SPEC in the flag.
2N/A */
2N/ASym *
2N/Aelf_find_sym(Slookup *slp, Rt_map **dlmp, uint_t *binfo)
2N/A{
2N/A const char *name = slp->sl_name;
2N/A Rt_map *ilmp = slp->sl_imap;
2N/A ulong_t hash = slp->sl_hash;
2N/A uint_t ndx, htmp, buckets, *chainptr;
2N/A Sym *sym, *symtabptr;
2N/A char *strtabptr, *strtabname;
2N/A uint_t flags1;
2N/A Syminfo *sip;
2N/A
2N/A /*
2N/A * If we're only here to establish a symbols index, skip the diagnostic
2N/A * used to trace a symbol search.
2N/A */
2N/A if ((slp->sl_flags & LKUP_SYMNDX) == 0)
2N/A DBG_CALL(Dbg_syms_lookup(ilmp, name, MSG_ORIG(MSG_STR_ELF)));
2N/A
2N/A if (HASH(ilmp) == 0)
2N/A return ((Sym *)0);
2N/A
2N/A buckets = HASH(ilmp)[0];
2N/A /* LINTED */
2N/A htmp = (uint_t)hash % buckets;
2N/A
2N/A /*
2N/A * Get the first symbol on hash chain and initialize the string
2N/A * and symbol table pointers.
2N/A */
2N/A if ((ndx = HASH(ilmp)[htmp + 2]) == 0)
2N/A return ((Sym *)0);
2N/A
2N/A chainptr = HASH(ilmp) + 2 + buckets;
2N/A strtabptr = STRTAB(ilmp);
2N/A symtabptr = SYMTAB(ilmp);
2N/A
2N/A while (ndx) {
2N/A sym = symtabptr + ndx;
2N/A strtabname = strtabptr + sym->st_name;
2N/A
2N/A /*
2N/A * Compare the symbol found with the name required. If the
2N/A * names don't match continue with the next hash entry.
2N/A */
2N/A if ((*strtabname++ != *name) || strcmp(strtabname, &name[1])) {
2N/A if ((ndx = chainptr[ndx]) != 0)
2N/A continue;
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A /*
2N/A * The Solaris ld does not put DT_VERSYM in the dynamic
2N/A * section, but the GNU ld does. The GNU runtime linker
2N/A * interprets the top bit of the 16-bit Versym value
2N/A * (0x8000) as the "hidden" bit. If this bit is set,
2N/A * the linker is supposed to act as if that symbol does
2N/A * not exist. The hidden bit supports their versioning
2N/A * scheme, which allows multiple incompatible functions
2N/A * with the same name to exist at different versions
2N/A * within an object. The Solaris linker does not support this
2N/A * mechanism, or the model of interface evolution that
2N/A * it allows, but we honor the hidden bit in GNU ld
2N/A * produced objects in order to interoperate with them.
2N/A */
2N/A if ((VERSYM(ilmp) != NULL) &&
2N/A ((VERSYM(ilmp)[ndx] & 0x8000) != 0)) {
2N/A DBG_CALL(Dbg_syms_ignore_gnuver(ilmp, name,
2N/A ndx, VERSYM(ilmp)[ndx]));
2N/A if ((ndx = chainptr[ndx]) != 0)
2N/A continue;
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A /*
2N/A * If we're only here to establish a symbols index, we're done.
2N/A */
2N/A if (slp->sl_flags & LKUP_SYMNDX)
2N/A return (sym);
2N/A
2N/A /*
2N/A * If we find a match and the symbol is defined, return the
2N/A * symbol pointer and the link map in which it was found.
2N/A */
2N/A if (sym->st_shndx != SHN_UNDEF) {
2N/A *dlmp = ilmp;
2N/A *binfo |= DBG_BINFO_FOUND;
2N/A if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) ||
2N/A ((FLAGS(ilmp) & FLG_RT_SYMINTPO) &&
2N/A is_sym_interposer(ilmp, sym)))
2N/A *binfo |= DBG_BINFO_INTERPOSE;
2N/A break;
2N/A
2N/A /*
2N/A * If we find a match and the symbol is undefined, the
2N/A * symbol type is a function, and the value of the symbol
2N/A * is non zero, then this is a special case. This allows
2N/A * the resolution of a function address to the plt[] entry.
2N/A * See SPARC ABI, Dynamic Linking, Function Addresses for
2N/A * more details.
2N/A */
2N/A } else if ((slp->sl_flags & LKUP_SPEC) &&
2N/A (FLAGS(ilmp) & FLG_RT_ISMAIN) && (sym->st_value != 0) &&
2N/A (ELF_ST_TYPE(sym->st_info) == STT_FUNC)) {
2N/A *dlmp = ilmp;
2N/A *binfo |= (DBG_BINFO_FOUND | DBG_BINFO_PLTADDR);
2N/A if ((FLAGS(ilmp) & FLG_RT_OBJINTPO) ||
2N/A ((FLAGS(ilmp) & FLG_RT_SYMINTPO) &&
2N/A is_sym_interposer(ilmp, sym)))
2N/A *binfo |= DBG_BINFO_INTERPOSE;
2N/A return (sym);
2N/A }
2N/A
2N/A /*
2N/A * Undefined symbol.
2N/A */
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A /*
2N/A * We've found a match. Determine if the defining object contains
2N/A * symbol binding information.
2N/A */
2N/A if ((sip = SYMINFO(ilmp)) != 0)
2N/A sip += ndx;
2N/A
2N/A /*
2N/A * If this definition is a singleton, and we haven't followed a default
2N/A * symbol search knowing that we're looking for a singleton (presumably
2N/A * because the symbol definition has been changed since the referring
2N/A * object was built), then reject this binding so that the caller can
2N/A * fall back to a standard symbol search.
2N/A */
2N/A if ((ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON) &&
2N/A (((slp->sl_flags & LKUP_STANDARD) == 0) ||
2N/A (((slp->sl_flags & LKUP_SINGLETON) == 0) &&
2N/A (LIST(ilmp)->lm_flags & LML_FLG_GROUPSEXIST)))) {
2N/A DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name,
2N/A DBG_BNDREJ_SINGLE));
2N/A *binfo |= BINFO_REJSINGLE;
2N/A *binfo &= ~DBG_BINFO_MSK;
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A /*
2N/A * If this is a direct binding request, but the symbol definition has
2N/A * disabled directly binding to it (presumably because the symbol
2N/A * definition has been changed since the referring object was built),
2N/A * indicate this failure so that the caller can fall back to a standard
2N/A * symbol search.
2N/A */
2N/A if (sip && (slp->sl_flags & LKUP_DIRECT) &&
2N/A (sip->si_flags & SYMINFO_FLG_NOEXTDIRECT)) {
2N/A DBG_CALL(Dbg_bind_reject(slp->sl_cmap, ilmp, name,
2N/A DBG_BNDREJ_NODIR));
2N/A *binfo |= BINFO_REJDIRECT;
2N/A *binfo &= ~DBG_BINFO_MSK;
2N/A return ((Sym *)0);
2N/A }
2N/A
2N/A /*
2N/A * Determine whether this object is acting as a filter.
2N/A */
2N/A if (((flags1 = FLAGS1(ilmp)) & MSK_RT_FILTER) == 0)
2N/A return (sym);
2N/A
2N/A /*
2N/A * Determine if this object offers per-symbol filtering, and if so,
2N/A * whether this symbol references a filtee.
2N/A */
2N/A if (sip && (flags1 & (FL1_RT_SYMSFLTR | FL1_RT_SYMAFLTR))) {
2N/A /*
2N/A * If this is a standard filter reference, and no standard
2N/A * filtees remain to be inspected, we're done. If this is an
2N/A * auxiliary filter reference, and no auxiliary filtees remain,
2N/A * we'll fall through in case any object filtering is available.
2N/A */
2N/A if ((sip->si_flags & SYMINFO_FLG_FILTER) &&
2N/A (SYMSFLTRCNT(ilmp) == 0))
2N/A return ((Sym *)0);
2N/A
2N/A if ((sip->si_flags & SYMINFO_FLG_FILTER) ||
2N/A ((sip->si_flags & SYMINFO_FLG_AUXILIARY) &&
2N/A SYMAFLTRCNT(ilmp))) {
2N/A Sym *fsym;
2N/A
2N/A /*
2N/A * This symbol has an associated filtee. Lookup the
2N/A * symbol in the filtee, and if it is found return it.
2N/A * If the symbol doesn't exist, and this is a standard
2N/A * filter, return an error, otherwise fall through to
2N/A * catch any object filtering that may be available.
2N/A */
2N/A if ((fsym = elf_lookup_filtee(slp, dlmp, binfo,
2N/A sip->si_boundto)) != 0)
2N/A return (fsym);
2N/A if (sip->si_flags & SYMINFO_FLG_FILTER)
2N/A return ((Sym *)0);
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * Determine if this object provides global filtering.
2N/A */
2N/A if (flags1 & (FL1_RT_OBJSFLTR | FL1_RT_OBJAFLTR)) {
2N/A Sym *fsym;
2N/A
2N/A if (OBJFLTRNDX(ilmp) != FLTR_DISABLED) {
2N/A /*
2N/A * This object has an associated filtee. Lookup the
2N/A * symbol in the filtee, and if it is found return it.
2N/A * If the symbol doesn't exist, and this is a standard
2N/A * filter, return and error, otherwise return the symbol
2N/A * within the filter itself.
2N/A */
2N/A if ((fsym = elf_lookup_filtee(slp, dlmp, binfo,
2N/A OBJFLTRNDX(ilmp))) != 0)
2N/A return (fsym);
2N/A }
2N/A
2N/A if (flags1 & FL1_RT_OBJSFLTR)
2N/A return ((Sym *)0);
2N/A }
2N/A return (sym);
2N/A}
2N/A
2N/A/*
2N/A * Create a new Rt_map structure for an ELF object and initialize
2N/A * all values.
2N/A */
2N/ARt_map *
2N/Aelf_new_lm(Lm_list *lml, const char *pname, const char *oname, Dyn *ld,
2N/A ulong_t addr, ulong_t etext, Aliste lmco, ulong_t msize, ulong_t entry,
2N/A ulong_t paddr, ulong_t padimsize, Mmap *mmaps, uint_t mmapcnt)
2N/A{
2N/A Rt_map *lmp;
2N/A ulong_t base, fltr = 0, audit = 0, cfile = 0, crle = 0;
2N/A Xword rpath = 0;
2N/A Ehdr *ehdr = (Ehdr *)addr;
2N/A
2N/A DBG_CALL(Dbg_file_elf(lml, pname, (ulong_t)ld, addr, msize, entry,
2N/A lml->lm_lmidstr, lmco));
2N/A
2N/A /*
2N/A * Allocate space for the link-map and private elf information. Once
2N/A * these are allocated and initialized, we can use remove_so(0, lmp) to
2N/A * tear down the link-map should any failures occur.
2N/A */
2N/A if ((lmp = calloc(sizeof (Rt_map), 1)) == 0)
2N/A return (0);
2N/A if ((ELFPRV(lmp) = calloc(sizeof (Rt_elfp), 1)) == 0) {
2N/A free(lmp);
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * All fields not filled in were set to 0 by calloc.
2N/A */
2N/A ORIGNAME(lmp) = PATHNAME(lmp) = NAME(lmp) = (char *)pname;
2N/A DYN(lmp) = ld;
2N/A ADDR(lmp) = addr;
2N/A MSIZE(lmp) = msize;
2N/A ENTRY(lmp) = (Addr)entry;
2N/A SYMINTP(lmp) = elf_find_sym;
2N/A ETEXT(lmp) = etext;
2N/A FCT(lmp) = &elf_fct;
2N/A LIST(lmp) = lml;
2N/A PADSTART(lmp) = paddr;
2N/A PADIMLEN(lmp) = padimsize;
2N/A THREADID(lmp) = rt_thr_self();
2N/A OBJFLTRNDX(lmp) = FLTR_DISABLED;
2N/A SORTVAL(lmp) = -1;
2N/A
2N/A MMAPS(lmp) = mmaps;
2N/A MMAPCNT(lmp) = mmapcnt;
2N/A ASSERT(mmapcnt != 0);
2N/A
2N/A /*
2N/A * If this is a shared object, add the base address to each address.
2N/A * if this is an executable, use address as is.
2N/A */
2N/A if (ehdr->e_type == ET_EXEC) {
2N/A base = 0;
2N/A FLAGS(lmp) |= FLG_RT_FIXED;
2N/A } else
2N/A base = addr;
2N/A
2N/A /*
2N/A * Fill in rest of the link map entries with information from the file's
2N/A * dynamic structure.
2N/A */
2N/A if (ld) {
2N/A uint_t dynndx = 0;
2N/A Xword pltpadsz = 0;
2N/A Rti_desc *rti;
2N/A
2N/A /* CSTYLED */
2N/A for ( ; ld->d_tag != DT_NULL; ++ld, dynndx++) {
2N/A switch ((Xword)ld->d_tag) {
2N/A case DT_SYMTAB:
2N/A SYMTAB(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_SUNW_SYMTAB:
2N/A SUNWSYMTAB(lmp) =
2N/A (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_SUNW_SYMSZ:
2N/A SUNWSYMSZ(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_STRTAB:
2N/A STRTAB(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_SYMENT:
2N/A SYMENT(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_FEATURE_1:
2N/A ld->d_un.d_val |= DTF_1_PARINIT;
2N/A if (ld->d_un.d_val & DTF_1_CONFEXP)
2N/A crle = 1;
2N/A break;
2N/A case DT_MOVESZ:
2N/A MOVESZ(lmp) = ld->d_un.d_val;
2N/A FLAGS(lmp) |= FLG_RT_MOVE;
2N/A break;
2N/A case DT_MOVEENT:
2N/A MOVEENT(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_MOVETAB:
2N/A MOVETAB(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_REL:
2N/A case DT_RELA:
2N/A /*
2N/A * At this time, ld.so. can only handle one
2N/A * type of relocation per object.
2N/A */
2N/A REL(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_RELSZ:
2N/A case DT_RELASZ:
2N/A RELSZ(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_RELENT:
2N/A case DT_RELAENT:
2N/A RELENT(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_RELCOUNT:
2N/A case DT_RELACOUNT:
2N/A RELACOUNT(lmp) = (uint_t)ld->d_un.d_val;
2N/A break;
2N/A case DT_TEXTREL:
2N/A FLAGS1(lmp) |= FL1_RT_TEXTREL;
2N/A break;
2N/A case DT_HASH:
2N/A HASH(lmp) = (uint_t *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_PLTGOT:
2N/A PLTGOT(lmp) = (uint_t *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_PLTRELSZ:
2N/A PLTRELSZ(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_JMPREL:
2N/A JMPREL(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_INIT:
2N/A if (ld->d_un.d_ptr != NULL)
2N/A INIT(lmp) =
2N/A (void (*)())(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_FINI:
2N/A if (ld->d_un.d_ptr != NULL)
2N/A FINI(lmp) =
2N/A (void (*)())(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_INIT_ARRAY:
2N/A INITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A case DT_INIT_ARRAYSZ:
2N/A INITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val;
2N/A break;
2N/A case DT_FINI_ARRAY:
2N/A FINIARRAY(lmp) = (Addr *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A case DT_FINI_ARRAYSZ:
2N/A FINIARRAYSZ(lmp) = (uint_t)ld->d_un.d_val;
2N/A break;
2N/A case DT_PREINIT_ARRAY:
2N/A PREINITARRAY(lmp) = (Addr *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A case DT_PREINIT_ARRAYSZ:
2N/A PREINITARRAYSZ(lmp) = (uint_t)ld->d_un.d_val;
2N/A break;
2N/A case DT_RPATH:
2N/A case DT_RUNPATH:
2N/A rpath = ld->d_un.d_val;
2N/A break;
2N/A case DT_FILTER:
2N/A fltr = ld->d_un.d_val;
2N/A OBJFLTRNDX(lmp) = dynndx;
2N/A FLAGS1(lmp) |= FL1_RT_OBJSFLTR;
2N/A break;
2N/A case DT_AUXILIARY:
2N/A if (!(rtld_flags & RT_FL_NOAUXFLTR)) {
2N/A fltr = ld->d_un.d_val;
2N/A OBJFLTRNDX(lmp) = dynndx;
2N/A }
2N/A FLAGS1(lmp) |= FL1_RT_OBJAFLTR;
2N/A break;
2N/A case DT_SUNW_FILTER:
2N/A SYMSFLTRCNT(lmp)++;
2N/A FLAGS1(lmp) |= FL1_RT_SYMSFLTR;
2N/A break;
2N/A case DT_SUNW_AUXILIARY:
2N/A if (!(rtld_flags & RT_FL_NOAUXFLTR)) {
2N/A SYMAFLTRCNT(lmp)++;
2N/A }
2N/A FLAGS1(lmp) |= FL1_RT_SYMAFLTR;
2N/A break;
2N/A case DT_DEPAUDIT:
2N/A if (!(rtld_flags & RT_FL_NOAUDIT))
2N/A audit = ld->d_un.d_val;
2N/A break;
2N/A case DT_CONFIG:
2N/A cfile = ld->d_un.d_val;
2N/A break;
2N/A case DT_DEBUG:
2N/A /*
2N/A * DT_DEBUG entries are only created in
2N/A * dynamic objects that require an interpretor
2N/A * (ie. all dynamic executables and some shared
2N/A * objects), and provide for a hand-shake with
2N/A * debuggers. This entry is initialized to
2N/A * zero by the link-editor. If a debugger has
2N/A * us and updated this entry set the debugger
2N/A * flag, and finish initializing the debugging
2N/A * structure (see setup() also). Switch off any
2N/A * configuration object use as most debuggers
2N/A * can't handle fixed dynamic executables as
2N/A * dependencies, and we can't handle requests
2N/A * like object padding for alternative objects.
2N/A */
2N/A if (ld->d_un.d_ptr)
2N/A rtld_flags |=
2N/A (RT_FL_DEBUGGER | RT_FL_NOOBJALT);
2N/A ld->d_un.d_ptr = (Addr)&r_debug;
2N/A break;
2N/A case DT_VERNEED:
2N/A VERNEED(lmp) = (Verneed *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A case DT_VERNEEDNUM:
2N/A /* LINTED */
2N/A VERNEEDNUM(lmp) = (int)ld->d_un.d_val;
2N/A break;
2N/A case DT_VERDEF:
2N/A VERDEF(lmp) = (Verdef *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_VERDEFNUM:
2N/A /* LINTED */
2N/A VERDEFNUM(lmp) = (int)ld->d_un.d_val;
2N/A break;
2N/A case DT_VERSYM:
2N/A /*
2N/A * The Solaris ld does not produce DT_VERSYM,
2N/A * but the GNU ld does, in order to support
2N/A * their style of versioning, which differs
2N/A * from ours in some ways, while using the
2N/A * same data structures. The presence of
2N/A * DT_VERSYM therefore means that GNU
2N/A * versioning rules apply to the given file.
2N/A * If DT_VERSYM is not present, then Solaris
2N/A * versioning rules apply.
2N/A */
2N/A VERSYM(lmp) = (Versym *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_BIND_NOW:
2N/A if ((ld->d_un.d_val & DF_BIND_NOW) &&
2N/A ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) {
2N/A MODE(lmp) |= RTLD_NOW;
2N/A MODE(lmp) &= ~RTLD_LAZY;
2N/A }
2N/A break;
2N/A case DT_FLAGS:
2N/A if (ld->d_un.d_val & DF_SYMBOLIC)
2N/A FLAGS1(lmp) |= FL1_RT_SYMBOLIC;
2N/A if (ld->d_un.d_val & DF_TEXTREL)
2N/A FLAGS1(lmp) |= FL1_RT_TEXTREL;
2N/A if ((ld->d_un.d_val & DF_BIND_NOW) &&
2N/A ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) {
2N/A MODE(lmp) |= RTLD_NOW;
2N/A MODE(lmp) &= ~RTLD_LAZY;
2N/A }
2N/A /*
2N/A * Capture any static TLS use, and enforce that
2N/A * this object be non-deletable.
2N/A */
2N/A if (ld->d_un.d_val & DF_STATIC_TLS) {
2N/A FLAGS1(lmp) |= FL1_RT_TLSSTAT;
2N/A MODE(lmp) |= RTLD_NODELETE;
2N/A }
2N/A break;
2N/A case DT_FLAGS_1:
2N/A if (ld->d_un.d_val & DF_1_DISPRELPND)
2N/A FLAGS1(lmp) |= FL1_RT_DISPREL;
2N/A if (ld->d_un.d_val & DF_1_GROUP)
2N/A FLAGS(lmp) |=
2N/A (FLG_RT_SETGROUP | FLG_RT_HANDLE);
2N/A if ((ld->d_un.d_val & DF_1_NOW) &&
2N/A ((rtld_flags2 & RT_FL2_BINDLAZY) == 0)) {
2N/A MODE(lmp) |= RTLD_NOW;
2N/A MODE(lmp) &= ~RTLD_LAZY;
2N/A }
2N/A if (ld->d_un.d_val & DF_1_NODELETE)
2N/A MODE(lmp) |= RTLD_NODELETE;
2N/A if (ld->d_un.d_val & DF_1_INITFIRST)
2N/A FLAGS(lmp) |= FLG_RT_INITFRST;
2N/A if (ld->d_un.d_val & DF_1_NOOPEN)
2N/A FLAGS(lmp) |= FLG_RT_NOOPEN;
2N/A if (ld->d_un.d_val & DF_1_LOADFLTR)
2N/A FLAGS(lmp) |= FLG_RT_LOADFLTR;
2N/A if (ld->d_un.d_val & DF_1_NODUMP)
2N/A FLAGS(lmp) |= FLG_RT_NODUMP;
2N/A if (ld->d_un.d_val & DF_1_CONFALT)
2N/A crle = 1;
2N/A if (ld->d_un.d_val & DF_1_DIRECT)
2N/A FLAGS1(lmp) |= FL1_RT_DIRECT;
2N/A if (ld->d_un.d_val & DF_1_NODEFLIB)
2N/A FLAGS1(lmp) |= FL1_RT_NODEFLIB;
2N/A if (ld->d_un.d_val & DF_1_ENDFILTEE)
2N/A FLAGS1(lmp) |= FL1_RT_ENDFILTE;
2N/A if (ld->d_un.d_val & DF_1_TRANS)
2N/A FLAGS(lmp) |= FLG_RT_TRANS;
2N/A#ifndef EXPAND_RELATIVE
2N/A if (ld->d_un.d_val & DF_1_ORIGIN)
2N/A FLAGS1(lmp) |= FL1_RT_RELATIVE;
2N/A#endif
2N/A /*
2N/A * Global auditing is only meaningful when
2N/A * specified by the initiating object of the
2N/A * process - typically the dynamic executable.
2N/A * If this is the initiaiting object, its link-
2N/A * map will not yet have been added to the
2N/A * link-map list, and consequently the link-map
2N/A * list is empty. (see setup()).
2N/A */
2N/A if (ld->d_un.d_val & DF_1_GLOBAUDIT) {
2N/A if (lml_main.lm_head == 0)
2N/A FLAGS1(lmp) |= FL1_RT_GLOBAUD;
2N/A else
2N/A DBG_CALL(Dbg_audit_ignore(lmp));
2N/A }
2N/A
2N/A /*
2N/A * If this object identifies itself as an
2N/A * interposer, but relocation processing has
2N/A * already started, then demote it. It's too
2N/A * late to guarantee complete interposition.
2N/A */
2N/A /* BEGIN CSTYLED */
2N/A if (ld->d_un.d_val &
2N/A (DF_1_INTERPOSE | DF_1_SYMINTPOSE)) {
2N/A if (lml->lm_flags & LML_FLG_STARTREL) {
2N/A DBG_CALL(Dbg_util_intoolate(lmp));
2N/A if (lml->lm_flags & LML_FLG_TRC_ENABLE)
2N/A (void) printf(
2N/A MSG_INTL(MSG_LDD_REL_ERR2),
2N/A NAME(lmp));
2N/A } else if (ld->d_un.d_val & DF_1_INTERPOSE)
2N/A FLAGS(lmp) |= FLG_RT_OBJINTPO;
2N/A else
2N/A FLAGS(lmp) |= FLG_RT_SYMINTPO;
2N/A }
2N/A /* END CSTYLED */
2N/A break;
2N/A case DT_SYMINFO:
2N/A SYMINFO(lmp) = (Syminfo *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A case DT_SYMINENT:
2N/A SYMINENT(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_PLTPAD:
2N/A PLTPAD(lmp) = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_PLTPADSZ:
2N/A pltpadsz = ld->d_un.d_val;
2N/A break;
2N/A case DT_SUNW_RTLDINF:
2N/A /*
2N/A * Maintain a list of RTLDINFO structures.
2N/A * Typically, libc is the only supplier, and
2N/A * only one structure is provided. However,
2N/A * multiple suppliers and multiple structures
2N/A * are supported. For example, one structure
2N/A * may provide thread_init, and another
2N/A * structure may provide atexit reservations.
2N/A */
2N/A if ((rti = alist_append(&lml->lm_rti, 0,
2N/A sizeof (Rti_desc), AL_CNT_RTLDINFO)) == 0) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A rti->rti_lmp = lmp;
2N/A rti->rti_info = (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_SUNW_SORTENT:
2N/A SUNWSORTENT(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_SUNW_SYMSORT:
2N/A SUNWSYMSORT(lmp) =
2N/A (void *)(ld->d_un.d_ptr + base);
2N/A break;
2N/A case DT_SUNW_SYMSORTSZ:
2N/A SUNWSYMSORTSZ(lmp) = ld->d_un.d_val;
2N/A break;
2N/A case DT_DEPRECATED_SPARC_REGISTER:
2N/A case M_DT_REGISTER:
2N/A FLAGS(lmp) |= FLG_RT_REGSYMS;
2N/A break;
2N/A case M_DT_PLTRESERVE:
2N/A PLTRESERVE(lmp) = (void *)(ld->d_un.d_ptr +
2N/A base);
2N/A break;
2N/A }
2N/A }
2N/A
2N/A if (PLTPAD(lmp)) {
2N/A if (pltpadsz == (Xword)0)
2N/A PLTPAD(lmp) = 0;
2N/A else
2N/A PLTPADEND(lmp) = (void *)((Addr)PLTPAD(lmp) +
2N/A pltpadsz);
2N/A }
2N/A
2N/A /*
2N/A * Allocate a Dynamic Info structure.
2N/A */
2N/A if ((DYNINFO(lmp) = calloc((size_t)dynndx,
2N/A sizeof (Dyninfo))) == 0) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A DYNINFOCNT(lmp) = dynndx;
2N/A }
2N/A
2N/A /*
2N/A * A dynsym contains only global functions. We want to have
2N/A * a version of it that also includes local functions, so that
2N/A * dladdr() will be able to report names for local functions
2N/A * when used to generate a stack trace for a stripped file.
2N/A * This version of the dynsym is provided via DT_SUNW_SYMTAB.
2N/A *
2N/A * In producing DT_SUNW_SYMTAB, ld uses a non-obvious trick
2N/A * in order to avoid having to have two copies of the global
2N/A * symbols held in DT_SYMTAB: The local symbols are placed in
2N/A * a separate section than the globals in the dynsym, but the
2N/A * linker conspires to put the data for these two sections adjacent
2N/A * to each other. DT_SUNW_SYMTAB points at the top of the local
2N/A * symbols, and DT_SUNW_SYMSZ is the combined length of both tables.
2N/A *
2N/A * If the two sections are not adjacent, then something went wrong
2N/A * at link time. We use ASSERT to kill the process if this is
2N/A * a debug build. In a production build, we will silently ignore
2N/A * the presence of the .ldynsym and proceed. We can detect this
2N/A * situation by checking to see that DT_SYMTAB lies in
2N/A * the range given by DT_SUNW_SYMTAB/DT_SUNW_SYMSZ.
2N/A */
2N/A if ((SUNWSYMTAB(lmp) != NULL) &&
2N/A (((char *)SYMTAB(lmp) <= (char *)SUNWSYMTAB(lmp)) ||
2N/A (((char *)SYMTAB(lmp) >=
2N/A (SUNWSYMSZ(lmp) + (char *)SUNWSYMTAB(lmp)))))) {
2N/A ASSERT(0);
2N/A SUNWSYMTAB(lmp) = NULL;
2N/A SUNWSYMSZ(lmp) = 0;
2N/A }
2N/A
2N/A /*
2N/A * If configuration file use hasn't been disabled, and a configuration
2N/A * file hasn't already been set via an environment variable, see if any
2N/A * application specific configuration file is specified. An LD_CONFIG
2N/A * setting is used first, but if this image was generated via crle(1)
2N/A * then a default configuration file is a fall-back.
2N/A */
2N/A if ((!(rtld_flags & RT_FL_NOCFG)) && (config->c_name == 0)) {
2N/A if (cfile)
2N/A config->c_name = (const char *)(cfile +
2N/A (char *)STRTAB(lmp));
2N/A else if (crle) {
2N/A rtld_flags |= RT_FL_CONFAPP;
2N/A#ifndef EXPAND_RELATIVE
2N/A FLAGS1(lmp) |= FL1_RT_RELATIVE;
2N/A#endif
2N/A }
2N/A }
2N/A
2N/A if (rpath)
2N/A RPATH(lmp) = (char *)(rpath + (char *)STRTAB(lmp));
2N/A if (fltr) {
2N/A /*
2N/A * If this object is a global filter, duplicate the filtee
2N/A * string name(s) so that REFNAME() is available in core files.
2N/A * This cludge was useful for debuggers at one point, but only
2N/A * when the filtee name was an individual full path.
2N/A */
2N/A if ((REFNAME(lmp) = strdup(fltr + (char *)STRTAB(lmp))) == 0) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A }
2N/A
2N/A if (rtld_flags & RT_FL_RELATIVE)
2N/A FLAGS1(lmp) |= FL1_RT_RELATIVE;
2N/A
2N/A /*
2N/A * For Intel ABI compatibility. It's possible that a JMPREL can be
2N/A * specified without any other relocations (e.g. a dynamic executable
2N/A * normally only contains .plt relocations). If this is the case then
2N/A * no REL, RELSZ or RELENT will have been created. For us to be able
2N/A * to traverse the .plt relocations under LD_BIND_NOW we need to know
2N/A * the RELENT for these relocations. Refer to elf_reloc() for more
2N/A * details.
2N/A */
2N/A if (!RELENT(lmp) && JMPREL(lmp))
2N/A RELENT(lmp) = sizeof (Rel);
2N/A
2N/A /*
2N/A * Establish any per-object auditing. If we're establishing `main's
2N/A * link-map its too early to go searching for audit objects so just
2N/A * hold the object name for later (see setup()).
2N/A */
2N/A if (audit) {
2N/A char *cp = audit + (char *)STRTAB(lmp);
2N/A
2N/A if (*cp) {
2N/A if (((AUDITORS(lmp) =
2N/A calloc(1, sizeof (Audit_desc))) == 0) ||
2N/A ((AUDITORS(lmp)->ad_name = strdup(cp)) == 0)) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A if (lml_main.lm_head) {
2N/A if (audit_setup(lmp, AUDITORS(lmp), 0) == 0) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A FLAGS1(lmp) |= AUDITORS(lmp)->ad_flags;
2N/A lml->lm_flags |= LML_FLG_LOCAUDIT;
2N/A }
2N/A }
2N/A }
2N/A
2N/A if ((CONDVAR(lmp) = rt_cond_create()) == 0) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A if (oname && ((append_alias(lmp, oname, 0)) == 0)) {
2N/A remove_so(0, lmp);
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * Add the mapped object to the end of the link map list.
2N/A */
2N/A lm_append(lml, lmco, lmp);
2N/A return (lmp);
2N/A}
2N/A
2N/A/*
2N/A * Assign hardware/software capabilities.
2N/A */
2N/Avoid
2N/Acap_assign(Cap *cap, Rt_map *lmp)
2N/A{
2N/A while (cap->c_tag != CA_SUNW_NULL) {
2N/A switch (cap->c_tag) {
2N/A case CA_SUNW_HW_1:
2N/A HWCAP(lmp) = cap->c_un.c_val;
2N/A break;
2N/A case CA_SUNW_SF_1:
2N/A SFCAP(lmp) = cap->c_un.c_val;
2N/A }
2N/A cap++;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Map in an ELF object.
2N/A * Takes an open file descriptor for the object to map and its pathname; returns
2N/A * a pointer to a Rt_map structure for this object, or 0 on error.
2N/A */
2N/Astatic Rt_map *
2N/Aelf_map_so(Lm_list *lml, Aliste lmco, const char *pname, const char *oname,
2N/A int fd)
2N/A{
2N/A int i; /* general temporary */
2N/A Off memsize = 0; /* total memory size of pathname */
2N/A Off mentry; /* entry point */
2N/A Ehdr *ehdr; /* ELF header of ld.so */
2N/A Phdr *phdr; /* first Phdr in file */
2N/A Phdr *phdr0; /* Saved first Phdr in file */
2N/A Phdr *pptr; /* working Phdr */
2N/A Phdr *fph = 0; /* first loadable Phdr */
2N/A Phdr *lph; /* last loadable Phdr */
2N/A Phdr *lfph = 0; /* last loadable (filesz != 0) Phdr */
2N/A Phdr *lmph = 0; /* last loadable (memsz != 0) Phdr */
2N/A Phdr *swph = 0; /* program header for SUNWBSS */
2N/A Phdr *tlph = 0; /* program header for PT_TLS */
2N/A Phdr *unwindph = 0; /* program header for PT_SUNW_UNWIND */
2N/A Cap *cap = 0; /* program header for SUNWCAP */
2N/A Dyn *mld = 0; /* DYNAMIC structure for pathname */
2N/A size_t size; /* size of elf and program headers */
2N/A caddr_t faddr = 0; /* mapping address of pathname */
2N/A Rt_map *lmp; /* link map created */
2N/A caddr_t paddr; /* start of padded image */
2N/A Off plen; /* size of image including padding */
2N/A Half etype;
2N/A int fixed;
2N/A Mmap *mmaps;
2N/A uint_t mmapcnt = 0;
2N/A Xword align = 0;
2N/A
2N/A /* LINTED */
2N/A ehdr = (Ehdr *)fmap->fm_maddr;
2N/A
2N/A /*
2N/A * If this a relocatable object then special processing is required.
2N/A */
2N/A if ((etype = ehdr->e_type) == ET_REL)
2N/A return (elf_obj_file(lml, lmco, pname, fd));
2N/A
2N/A /*
2N/A * If this isn't a dynamic executable or shared object we can't process
2N/A * it. If this is a dynamic executable then all addresses are fixed.
2N/A */
2N/A if (etype == ET_EXEC) {
2N/A fixed = 1;
2N/A } else if (etype == ET_DYN) {
2N/A fixed = 0;
2N/A } else {
2N/A Conv_inv_buf_t inv_buf;
2N/A
2N/A eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_BADTYPE), pname,
2N/A conv_ehdr_type(etype, 0, &inv_buf));
2N/A return (0);
2N/A }
2N/A
2N/A /*
2N/A * If our original mapped page was not large enough to hold all the
2N/A * program headers remap them.
2N/A */
2N/A size = (size_t)((char *)ehdr->e_phoff +
2N/A (ehdr->e_phnum * ehdr->e_phentsize));
2N/A if (size > fmap->fm_fsize) {
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname);
2N/A return (0);
2N/A }
2N/A if (size > fmap->fm_msize) {
2N/A fmap_setup();
2N/A if ((fmap->fm_maddr = mmap(fmap->fm_maddr, size, PROT_READ,
2N/A fmap->fm_mflags, fd, 0)) == MAP_FAILED) {
2N/A int err = errno;
2N/A eprintf(lml, ERR_FATAL, MSG_INTL(MSG_SYS_MMAP), pname,
2N/A strerror(err));
2N/A return (0);
2N/A }
2N/A fmap->fm_msize = size;
2N/A /* LINTED */
2N/A ehdr = (Ehdr *)fmap->fm_maddr;
2N/A }
2N/A /* LINTED */
2N/A phdr0 = phdr = (Phdr *)((char *)ehdr + ehdr->e_ehsize);
2N/A
2N/A /*
2N/A * Get entry point.
2N/A */
2N/A mentry = ehdr->e_entry;
2N/A
2N/A /*
2N/A * Point at program headers and perform some basic validation.
2N/A */
2N/A for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++,
2N/A pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) {
2N/A if ((pptr->p_type == PT_LOAD) ||
2N/A (pptr->p_type == PT_SUNWBSS)) {
2N/A
2N/A if (fph == 0) {
2N/A fph = pptr;
2N/A /* LINTED argument lph is initialized in first pass */
2N/A } else if (pptr->p_vaddr <= lph->p_vaddr) {
2N/A eprintf(lml, ERR_ELF,
2N/A MSG_INTL(MSG_GEN_INVPRGHDR), pname);
2N/A return (0);
2N/A }
2N/A
2N/A lph = pptr;
2N/A
2N/A if (pptr->p_memsz)
2N/A lmph = pptr;
2N/A if (pptr->p_filesz)
2N/A lfph = pptr;
2N/A if (pptr->p_type == PT_SUNWBSS)
2N/A swph = pptr;
2N/A if (pptr->p_align > align)
2N/A align = pptr->p_align;
2N/A
2N/A } else if (pptr->p_type == PT_DYNAMIC) {
2N/A mld = (Dyn *)(pptr->p_vaddr);
2N/A } else if ((pptr->p_type == PT_TLS) && pptr->p_memsz) {
2N/A tlph = pptr;
2N/A } else if (pptr->p_type == PT_SUNWCAP) {
2N/A cap = (Cap *)(pptr->p_vaddr);
2N/A } else if (pptr->p_type == PT_SUNW_UNWIND) {
2N/A unwindph = pptr;
2N/A }
2N/A }
2N/A
2N/A#if defined(MAP_ALIGN)
2N/A /*
2N/A * Make sure the maximum page alignment is a power of 2 >= the default
2N/A * segment alignment, for use with MAP_ALIGN.
2N/A */
2N/A align = S_ROUND(align, M_SEGM_ALIGN);
2N/A#endif
2N/A
2N/A /*
2N/A * We'd better have at least one loadable segment, together with some
2N/A * specified file and memory size.
2N/A */
2N/A if ((fph == 0) || (lmph == 0) || (lfph == 0)) {
2N/A eprintf(lml, ERR_ELF, MSG_INTL(MSG_GEN_NOLOADSEG), pname);
2N/A return (0);
2N/A }
2N/A
2N/A /*
* Check that the files size accounts for the loadable sections
* we're going to map in (failure to do this may cause spurious
* bus errors if we're given a truncated file).
*/
if (fmap->fm_fsize < ((size_t)lfph->p_offset + lfph->p_filesz)) {
eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_CORTRUNC), pname);
return (0);
}
/*
* Memsize must be page rounded so that if we add object padding
* at the end it will start at the beginning of a page.
*/
plen = memsize = M_PROUND((lmph->p_vaddr + lmph->p_memsz) -
M_PTRUNC((ulong_t)fph->p_vaddr));
/*
* Determine if an existing mapping is acceptable.
*/
if (interp && (lml->lm_flags & LML_FLG_BASELM) &&
(strcmp(pname, interp->i_name) == 0)) {
/*
* If this is the interpreter then it has already been mapped
* and we have the address so don't map it again. Note that
* the common occurrence of a reference to the interpretor
* (libdl -> ld.so.1) will have been caught during filter
* initialization (see elf_lookup_filtee()). However, some
* ELF implementations are known to record libc.so.1 as the
* interpretor, and thus this test catches this behavior.
*/
paddr = faddr = interp->i_faddr;
} else if ((fixed == 0) && (r_debug.rtd_objpad == 0) &&
(memsize <= fmap->fm_msize) && ((fph->p_flags & PF_W) == 0) &&
(fph == lph) && (fph->p_filesz == fph->p_memsz) &&
(((Xword)fmap->fm_maddr % align) == 0)) {
size_t rsize;
/*
* If the file contains a single segment, and the mapping
* required has already been established from the initial fmap
* mapping, then we don't need to do anything more. Reset the
* fmap address so that any later files start a new fmap. This
* is really an optimization for filters, such as libdl.so,
* libthread, etc. that are constructed to be a single text
* segment.
*/
paddr = faddr = fmap->fm_maddr;
/*
* Free any unused mapping by assigning the fmap buffer to the
* unused region. fmap_setup() will unmap this area and
* establish defaults for future mappings.
*/
rsize = M_PROUND(fph->p_filesz);
fmap->fm_maddr += rsize;
fmap->fm_msize -= rsize;
fmap_setup();
}
/*
* Allocate a mapping array to retain mapped segment information.
*/
if ((mmaps = calloc(ehdr->e_phnum, sizeof (Mmap))) == 0)
return (0);
/*
* If we're reusing an existing mapping determine the objects etext
* address. Otherwise map the file (which will calculate the etext
* address as part of the mapping process).
*/
if (faddr) {
caddr_t base;
if (fixed)
base = 0;
else
base = faddr;
/* LINTED */
phdr0 = phdr = (Phdr *)((char *)faddr + ehdr->e_ehsize);
for (i = 0, pptr = phdr; i < (int)ehdr->e_phnum; i++,
pptr = (Phdr *)((Off)pptr + ehdr->e_phentsize)) {
if (pptr->p_type != PT_LOAD)
continue;
mmaps[mmapcnt].m_vaddr = (pptr->p_vaddr + base);
mmaps[mmapcnt].m_msize = pptr->p_memsz;
mmaps[mmapcnt].m_fsize = pptr->p_filesz;
mmaps[mmapcnt].m_perm = (PROT_READ | PROT_EXEC);
mmapcnt++;
if (!(pptr->p_flags & PF_W)) {
fmap->fm_etext = (ulong_t)pptr->p_vaddr +
(ulong_t)pptr->p_memsz +
(ulong_t)(fixed ? 0 : faddr);
}
}
} else {
/*
* Map the file.
*/
if (!(faddr = elf_map_it(lml, pname, memsize, ehdr, fph, lph,
&phdr, &paddr, &plen, fixed, fd, align, mmaps, &mmapcnt)))
return (0);
}
/*
* Calculate absolute base addresses and entry points.
*/
if (!fixed) {
if (mld)
/* LINTED */
mld = (Dyn *)((Off)mld + faddr);
if (cap)
/* LINTED */
cap = (Cap *)((Off)cap + faddr);
mentry += (Off)faddr;
}
/*
* Create new link map structure for newly mapped shared object.
*/
if (!(lmp = elf_new_lm(lml, pname, oname, mld, (ulong_t)faddr,
fmap->fm_etext, lmco, memsize, mentry, (ulong_t)paddr, plen, mmaps,
mmapcnt))) {
(void) munmap((caddr_t)faddr, memsize);
return (0);
}
/*
* Start the system loading in the ELF information we'll be processing.
*/
if (REL(lmp)) {
(void) madvise((void *)ADDR(lmp), (uintptr_t)REL(lmp) +
(uintptr_t)RELSZ(lmp) - (uintptr_t)ADDR(lmp),
MADV_WILLNEED);
}
/*
* If this shared object contains any special segments, record them.
*/
if (swph) {
FLAGS(lmp) |= FLG_RT_SUNWBSS;
SUNWBSS(lmp) = phdr + (swph - phdr0);
}
if (tlph && (tls_assign(lml, lmp, (phdr + (tlph - phdr0))) == 0)) {
remove_so(lml, lmp);
return (0);
}
if (unwindph)
PTUNWIND(lmp) = phdr + (unwindph - phdr0);
if (cap)
cap_assign(cap, lmp);
return (lmp);
}
/*
* Function to correct protection settings. Segments are all mapped initially
* with permissions as given in the segment header. We need to turn on write
* permissions on a text segment if there are any relocations against that
* segment, and them turn write permission back off again before returning
* control to the user. This function turns the permission on or off depending
* on the value of the argument.
*/
int
elf_set_prot(Rt_map *lmp, int permission)
{
Mmap *mmaps;
/*
* If this is an allocated image (ie. a relocatable object) we can't
* mprotect() anything.
*/
if (FLAGS(lmp) & FLG_RT_IMGALLOC)
return (1);
DBG_CALL(Dbg_file_prot(lmp, permission));
for (mmaps = MMAPS(lmp); mmaps->m_vaddr; mmaps++) {
if (mmaps->m_perm & PROT_WRITE)
continue;
if (mprotect(mmaps->m_vaddr, mmaps->m_msize,
(mmaps->m_perm | permission)) == -1) {
int err = errno;
eprintf(LIST(lmp), ERR_FATAL, MSG_INTL(MSG_SYS_MPROT),
NAME(lmp), strerror(err));
return (0);
}
}
return (1);
}
/*
* Build full pathname of shared object from given directory name and filename.
*/
static char *
elf_get_so(const char *dir, const char *file)
{
static char pname[PATH_MAX];
(void) snprintf(pname, PATH_MAX, MSG_ORIG(MSG_FMT_PATH), dir, file);
return (pname);
}
/*
* The copy relocation is recorded in a copy structure which will be applied
* after all other relocations are carried out. This provides for copying data
* that must be relocated itself (ie. pointers in shared objects). This
* structure also provides a means of binding RTLD_GROUP dependencies to any
* copy relocations that have been taken from any group members.
*
* If the size of the .bss area available for the copy information is not the
* same as the source of the data inform the user if we're under ldd(1) control
* (this checking was only established in 5.3, so by only issuing an error via
* ldd(1) we maintain the standard set by previous releases).
*/
int
elf_copy_reloc(char *name, Sym *rsym, Rt_map *rlmp, void *radd, Sym *dsym,
Rt_map *dlmp, const void *dadd)
{
Rel_copy rc;
Lm_list *lml = LIST(rlmp);
rc.r_name = name;
rc.r_rsym = rsym; /* the new reference symbol and its */
rc.r_rlmp = rlmp; /* associated link-map */
rc.r_dlmp = dlmp; /* the defining link-map */
rc.r_dsym = dsym; /* the original definition */
rc.r_radd = radd;
rc.r_dadd = dadd;
if (rsym->st_size > dsym->st_size)
rc.r_size = (size_t)dsym->st_size;
else
rc.r_size = (size_t)rsym->st_size;
if (alist_append(&COPY_R(dlmp), &rc, sizeof (Rel_copy),
AL_CNT_COPYREL) == 0) {
if (!(lml->lm_flags & LML_FLG_TRC_WARN))
return (0);
else
return (1);
}
if (!(FLAGS1(dlmp) & FL1_RT_COPYTOOK)) {
if (aplist_append(&COPY_S(rlmp), dlmp,
AL_CNT_COPYREL) == NULL) {
if (!(lml->lm_flags & LML_FLG_TRC_WARN))
return (0);
else
return (1);
}
FLAGS1(dlmp) |= FL1_RT_COPYTOOK;
}
/*
* If we are tracing (ldd), warn the user if
* 1) the size from the reference symbol differs from the
* copy definition. We can only copy as much data as the
* reference (dynamic executables) entry allows.
* 2) the copy definition has STV_PROTECTED visibility.
*/
if (lml->lm_flags & LML_FLG_TRC_WARN) {
if (rsym->st_size != dsym->st_size) {
(void) printf(MSG_INTL(MSG_LDD_CPY_SIZDIF),
_conv_reloc_type(M_R_COPY), demangle(name),
NAME(rlmp), EC_XWORD(rsym->st_size),
NAME(dlmp), EC_XWORD(dsym->st_size));
if (rsym->st_size > dsym->st_size)
(void) printf(MSG_INTL(MSG_LDD_CPY_INSDATA),
NAME(dlmp));
else
(void) printf(MSG_INTL(MSG_LDD_CPY_DATRUNC),
NAME(rlmp));
}
if (ELF_ST_VISIBILITY(dsym->st_other) == STV_PROTECTED) {
(void) printf(MSG_INTL(MSG_LDD_CPY_PROT),
_conv_reloc_type(M_R_COPY), demangle(name),
NAME(dlmp));
}
}
DBG_CALL(Dbg_reloc_apply_val(lml, ELF_DBG_RTLD, (Xword)radd,
(Xword)rc.r_size));
return (1);
}
/*
* Determine the symbol location of an address within a link-map. Look for
* the nearest symbol (whose value is less than or equal to the required
* address). This is the object specific part of dladdr().
*/
static void
elf_dladdr(ulong_t addr, Rt_map *lmp, Dl_info *dlip, void **info, int flags)
{
ulong_t ndx, cnt, base, _value;
Sym *sym, *_sym = NULL;
const char *str;
int _flags;
uint_t *dynaddr_ndx;
uint_t dynaddr_n = 0;
ulong_t value;
/*
* If SUNWSYMTAB() is non-NULL, then it sees a special version of
* the dynsym that starts with any local function symbols that exist in
* the library and then moves to the data held in SYMTAB(). In this
* case, SUNWSYMSZ tells us how long the symbol table is. The
* availability of local function symbols will enhance the results
* we can provide.
*
* If SUNWSYMTAB() is non-NULL, then there might also be a
* SUNWSYMSORT() vector associated with it. SUNWSYMSORT() contains
* an array of indices into SUNWSYMTAB, sorted by increasing
* address. We can use this to do an O(log N) search instead of a
* brute force search.
*
* If SUNWSYMTAB() is NULL, then SYMTAB() references a dynsym that
* contains only global symbols. In that case, the length of
* the symbol table comes from the nchain field of the related
* symbol lookup hash table.
*/
str = STRTAB(lmp);
if (SUNWSYMSZ(lmp) == NULL) {
sym = SYMTAB(lmp);
/*
* If we don't have a .hash table there are no symbols
* to look at.
*/
if (HASH(lmp) == 0)
return;
cnt = HASH(lmp)[1];
} else {
sym = SUNWSYMTAB(lmp);
cnt = SUNWSYMSZ(lmp) / SYMENT(lmp);
dynaddr_ndx = SUNWSYMSORT(lmp);
if (dynaddr_ndx != NULL)
dynaddr_n = SUNWSYMSORTSZ(lmp) / SUNWSORTENT(lmp);
}
if (FLAGS(lmp) & FLG_RT_FIXED)
base = 0;
else
base = ADDR(lmp);
if (dynaddr_n > 0) { /* Binary search */
long low = 0, low_bnd;
long high = dynaddr_n - 1, high_bnd;
long mid;
Sym *mid_sym;
/*
* Note that SUNWSYMSORT only contains symbols types that
* supply memory addresses, so there's no need to check and
* filter out any other types.
*/
low_bnd = low;
high_bnd = high;
while (low <= high) {
mid = (low + high) / 2;
mid_sym = &sym[dynaddr_ndx[mid]];
value = mid_sym->st_value + base;
if (addr < value) {
if ((sym[dynaddr_ndx[high]].st_value + base) >=
addr)
high_bnd = high;
high = mid - 1;
} else if (addr > value) {
if ((sym[dynaddr_ndx[low]].st_value + base) <=
addr)
low_bnd = low;
low = mid + 1;
} else {
_sym = mid_sym;
_value = value;
break;
}
}
/*
* If the above didn't find it exactly, then we must
* return the closest symbol with a value that doesn't
* exceed the one we are looking for. If that symbol exists,
* it will lie in the range bounded by low_bnd and
* high_bnd. This is a linear search, but a short one.
*/
if (_sym == NULL) {
for (mid = low_bnd; mid <= high_bnd; mid++) {
mid_sym = &sym[dynaddr_ndx[mid]];
value = mid_sym->st_value + base;
if (addr >= value) {
_sym = mid_sym;
_value = value;
} else {
break;
}
}
}
} else { /* Linear search */
for (_value = 0, sym++, ndx = 1; ndx < cnt; ndx++, sym++) {
/*
* Skip expected symbol types that are not functions
* or data:
* - A symbol table starts with an undefined symbol
* in slot 0. If we are using SUNWSYMTAB(),
* there will be a second undefined symbol
* right before the globals.
* - The local part of SUNWSYMTAB() contains a
* series of function symbols. Each section
* starts with an initial STT_FILE symbol.
*/
if ((sym->st_shndx == SHN_UNDEF) ||
(ELF_ST_TYPE(sym->st_info) == STT_FILE))
continue;
value = sym->st_value + base;
if (value > addr)
continue;
if (value < _value)
continue;
_sym = sym;
_value = value;
/*
* Note, because we accept local and global symbols
* we could find a section symbol that matches the
* associated address, which means that the symbol
* name will be null. In this case continue the
* search in case we can find a global symbol of
* the same value.
*/
if ((value == addr) &&
(ELF_ST_TYPE(sym->st_info) != STT_SECTION))
break;
}
}
_flags = flags & RTLD_DL_MASK;
if (_sym) {
if (_flags == RTLD_DL_SYMENT)
*info = (void *)_sym;
else if (_flags == RTLD_DL_LINKMAP)
*info = (void *)lmp;
dlip->dli_sname = str + _sym->st_name;
dlip->dli_saddr = (void *)_value;
} else {
/*
* addr lies between the beginning of the mapped segment and
* the first global symbol. We have no symbol to return
* and the caller requires one. We use _START_, the base
* address of the mapping.
*/
if (_flags == RTLD_DL_SYMENT) {
/*
* An actual symbol struct is needed, so we
* construct one for _START_. To do this in a
* fully accurate way requires a different symbol
* for each mapped segment. This requires the
* use of dynamic memory and a mutex. That's too much
* plumbing for a fringe case of limited importance.
*
* Fortunately, we can simplify:
* - Only the st_size and st_info fields are useful
* outside of the linker internals. The others
* reference things that outside code cannot see,
* and can be set to 0.
* - It's just a label and there is no size
* to report. So, the size should be 0.
* This means that only st_info needs a non-zero
* (constant) value. A static struct will suffice.
* It must be const (readonly) so the caller can't
* change its meaning for subsequent callers.
*/
static const Sym fsym = { 0, 0, 0,
ELF_ST_INFO(STB_LOCAL, STT_OBJECT) };
*info = (void *) &fsym;
}
dlip->dli_sname = MSG_ORIG(MSG_SYM_START);
dlip->dli_saddr = (void *) ADDR(lmp);
}
}
static void
elf_lazy_cleanup(APlist *alp)
{
Rt_map *lmp;
Aliste idx;
/*
* Cleanup any link-maps added to this dynamic list and free it.
*/
for (APLIST_TRAVERSE(alp, idx, lmp))
FLAGS(lmp) &= ~FLG_RT_TMPLIST;
free(alp);
}
/*
* This routine is called as a last fall-back to search for a symbol from a
* standard relocation. To maintain lazy loadings goal of reducing the number
* of objects mapped, any symbol search is first carried out using the objects
* that already exist in the process (either on a link-map list or handle).
* If a symbol can't be found, and lazy dependencies are still pending, this
* routine loads the dependencies in an attempt to locate the symbol.
*
* Only new objects are inspected as we will have already inspected presently
* loaded objects before calling this routine. However, a new object may not
* be new - although the di_lmp might be zero, the object may have been mapped
* as someone elses dependency. Thus there's a possibility of some symbol
* search duplication.
*/
Sym *
elf_lazy_find_sym(Slookup *slp, Rt_map **_lmp, uint_t *binfo)
{
Sym *sym = 0;
APlist *alist = NULL;
Aliste idx;
Rt_map *lmp1, *lmp = slp->sl_imap;
const char *name = slp->sl_name;
/*
* Generate a local list of new objects to process. This list can grow
* as each object supplies its own lazy dependencies.
*/
if (aplist_append(&alist, lmp, AL_CNT_LAZYFIND) == NULL)
return (NULL);
FLAGS(lmp) |= FLG_RT_TMPLIST;
for (APLIST_TRAVERSE(alist, idx, lmp1)) {
uint_t cnt = 0;
Slookup sl = *slp;
Dyninfo *dip, *pdip;
/*
* Discard any relocation index from further symbol searches.
* This index will have already been used to trigger any
* necessary lazy-loads, and it might be because one of these
* lazy loads have failed that we're here performing this
* fallback. By removing the relocation index we don't try
* and perform the same failed lazy loading activity again.
*/
sl.sl_rsymndx = 0;
/*
* Loop through the lazy DT_NEEDED entries examining each object
* for the required symbol. If the symbol is not found, the
* object is in turn added to the local alist, so that the
* objects lazy DT_NEEDED entries can be examined.
*/
lmp = lmp1;
for (dip = DYNINFO(lmp), pdip = NULL; cnt < DYNINFOCNT(lmp);
cnt++, pdip = dip++) {
Rt_map *nlmp;
if (((dip->di_flags & FLG_DI_LAZY) == 0) ||
dip->di_info)
continue;
/*
* If this object has already failed to lazy load, and
* we're still processing the same runtime linker
* operation that produced the failure, don't bother
* to try and load the object again.
*/
if ((dip->di_flags & FLG_DI_LAZYFAIL) && pdip &&
(pdip->di_flags & FLG_DI_POSFLAG1)) {
if (pdip->di_info == (void *)ld_entry_cnt)
continue;
dip->di_flags &= ~FLG_DI_LAZYFAIL;
pdip->di_info = NULL;
}
/*
* Try loading this lazy dependency. If the object
* can't be loaded, consider this non-fatal and continue
* the search. Lazy loaded dependencies need not exist
* and their loading should only turn out to be fatal
* if they are required to satisfy a relocation.
*
* If the file is already loaded and relocated we must
* still inspect it for symbols, even though it might
* have already been searched. This lazy load operation
* might have promoted the permissions of the object,
* and thus made the object applicable for this symbol
* search, whereas before the object might have been
* skipped.
*/
if ((nlmp = elf_lazy_load(lmp, &sl, cnt, name)) == 0)
continue;
/*
* If this object isn't yet a part of the dynamic list
* then inspect it for the symbol. If the symbol isn't
* found add the object to the dynamic list so that we
* can inspect its dependencies.
*/
if (FLAGS(nlmp) & FLG_RT_TMPLIST)
continue;
sl.sl_imap = nlmp;
if (sym = LM_LOOKUP_SYM(sl.sl_cmap)(&sl, _lmp, binfo))
break;
/*
* Some dlsym() operations are already traversing a
* link-map (dlopen(0)), and thus there's no need to
* build our own dynamic dependency list.
*/
if ((sl.sl_flags & LKUP_NODESCENT) == 0) {
if (aplist_append(&alist, nlmp,
AL_CNT_LAZYFIND) == 0) {
elf_lazy_cleanup(alist);
return (0);
}
FLAGS(nlmp) |= FLG_RT_TMPLIST;
}
}
if (sym)
break;
}
elf_lazy_cleanup(alist);
return (sym);
}
/*
* Warning message for bad r_offset.
*/
void
elf_reloc_bad(Rt_map *lmp, void *rel, uchar_t rtype, ulong_t roffset,
ulong_t rsymndx)
{
const char *name = (char *)0;
Lm_list *lml = LIST(lmp);
int trace;
if ((lml->lm_flags & LML_FLG_TRC_ENABLE) &&
(((rtld_flags & RT_FL_SILENCERR) == 0) ||
(lml->lm_flags & LML_FLG_TRC_VERBOSE)))
trace = 1;
else
trace = 0;
if ((trace == 0) && (DBG_ENABLED == 0))
return;
if (rsymndx) {
Sym *symref = (Sym *)((ulong_t)SYMTAB(lmp) +
(rsymndx * SYMENT(lmp)));
if (ELF_ST_BIND(symref->st_info) != STB_LOCAL)
name = (char *)(STRTAB(lmp) + symref->st_name);
}
if (name == 0)
name = MSG_ORIG(MSG_STR_EMPTY);
if (trace) {
const char *rstr;
rstr = _conv_reloc_type((uint_t)rtype);
(void) printf(MSG_INTL(MSG_LDD_REL_ERR1), rstr, name,
EC_ADDR(roffset));
return;
}
Dbg_reloc_error(lml, ELF_DBG_RTLD, M_MACH, M_REL_SHT_TYPE, rel, name);
}
/*
* Resolve a static TLS relocation.
*/
long
elf_static_tls(Rt_map *lmp, Sym *sym, void *rel, uchar_t rtype, char *name,
ulong_t roffset, long value)
{
Lm_list *lml = LIST(lmp);
/*
* Relocations against a static TLS block have limited support once
* process initialization has completed. Any error condition should be
* discovered by testing for DF_STATIC_TLS as part of loading an object,
* however individual relocations are tested in case the dynamic flag
* had not been set when this object was built.
*/
if (PTTLS(lmp) == 0) {
DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH,
M_REL_SHT_TYPE, rel, NULL, name));
eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS),
_conv_reloc_type((uint_t)rtype), NAME(lmp),
name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN));
return (0);
}
/*
* If no static TLS has been set aside for this object, determine if
* any can be obtained. Enforce that any object using static TLS is
* non-deletable.
*/
if (TLSSTATOFF(lmp) == 0) {
FLAGS1(lmp) |= FL1_RT_TLSSTAT;
MODE(lmp) |= RTLD_NODELETE;
if (tls_assign(lml, lmp, PTTLS(lmp)) == 0) {
DBG_CALL(Dbg_reloc_in(lml, ELF_DBG_RTLD, M_MACH,
M_REL_SHT_TYPE, rel, NULL, name));
eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_BADTLS),
_conv_reloc_type((uint_t)rtype), NAME(lmp),
name ? demangle(name) : MSG_INTL(MSG_STR_UNKNOWN));
return (0);
}
}
/*
* Typically, a static TLS offset is maintained as a symbols value.
* For local symbols that are not apart of the dynamic symbol table,
* the TLS relocation points to a section symbol, and the static TLS
* offset was deposited in the associated GOT table. Make sure the GOT
* is cleared, so that the value isn't reused in do_reloc().
*/
if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) {
if ((ELF_ST_TYPE(sym->st_info) == STT_SECTION)) {
value = *(long *)roffset;
*(long *)roffset = 0;
} else {
value = sym->st_value;
}
}
return (-(TLSSTATOFF(lmp) - value));
}