2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A
2N/A/*
2N/A * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
2N/A */
2N/A
2N/A#include <stdio.h>
2N/A#include <stdlib.h>
2N/A#include <stddef.h>
2N/A#include <unistd.h>
2N/A#include <thr_uberdata.h>
2N/A#include <thread_db.h>
2N/A#include <libc_int.h>
2N/A
2N/A/*
2N/A * Private structures.
2N/A */
2N/A
2N/Atypedef union {
2N/A mutex_t lock;
2N/A rwlock_t rwlock;
2N/A sema_t semaphore;
2N/A cond_t condition;
2N/A} td_so_un_t;
2N/A
2N/Astruct td_thragent {
2N/A rwlock_t rwlock;
2N/A struct ps_prochandle *ph_p;
2N/A int initialized;
2N/A int sync_tracking;
2N/A int model;
2N/A int primary_map;
2N/A psaddr_t bootstrap_addr;
2N/A psaddr_t uberdata_addr;
2N/A psaddr_t tdb_eventmask_addr;
2N/A psaddr_t tdb_register_sync_addr;
2N/A psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
2N/A psaddr_t hash_table_addr;
2N/A int hash_size;
2N/A lwpid_t single_lwpid;
2N/A psaddr_t single_ulwp_addr;
2N/A};
2N/A
2N/A/*
2N/A * This is the name of the variable in libc that contains
2N/A * the uberdata address that we will need.
2N/A */
2N/A#define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
2N/A/*
2N/A * This is the actual name of uberdata, used in the event
2N/A * that tdb_bootstrap has not yet been initialized.
2N/A */
2N/A#define TD_UBERDATA_NAME "_uberdata"
2N/A/*
2N/A * The library name should end with ".so.1", but older versions of
2N/A * dbx expect the unadorned name and malfunction if ".1" is specified.
2N/A * Unfortunately, if ".1" is not specified, mdb malfunctions when it
2N/A * is applied to another instance of itself (due to the presence of
2N/A * /usr/lib/mdb/proc/libc.so). So we try it both ways.
2N/A */
2N/A#define TD_LIBRARY_NAME "libc.so"
2N/A#define TD_LIBRARY_NAME_1 "libc.so.1"
2N/A
2N/Atd_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
2N/A
2N/Atd_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
2N/A void *cbdata_p, td_thr_state_e state, int ti_pri,
2N/A sigset_t *ti_sigmask_p, unsigned ti_user_flags);
2N/A
2N/A/*
2N/A * Initialize threads debugging interface.
2N/A */
2N/A#pragma weak td_init = __td_init
2N/Atd_err_e
2N/A__td_init()
2N/A{
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * This function does nothing, and never did.
2N/A * But the symbol is in the ABI, so we can't delete it.
2N/A */
2N/A#pragma weak td_log = __td_log
2N/Avoid
2N/A__td_log()
2N/A{
2N/A}
2N/A
2N/A/*
2N/A * Short-cut to read just the hash table size from the process,
2N/A * to avoid repeatedly reading the full uberdata structure when
2N/A * dealing with a single-threaded process.
2N/A */
2N/Astatic uint_t
2N/Atd_read_hash_size(td_thragent_t *ta_p)
2N/A{
2N/A psaddr_t addr;
2N/A uint_t hash_size;
2N/A
2N/A switch (ta_p->initialized) {
2N/A default: /* uninitialized */
2N/A return (0);
2N/A case 1: /* partially initialized */
2N/A break;
2N/A case 2: /* fully initialized */
2N/A return (ta_p->hash_size);
2N/A }
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
2N/A#else
2N/A addr = 0;
2N/A#endif
2N/A }
2N/A if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
2N/A != PS_OK)
2N/A return (0);
2N/A return (hash_size);
2N/A}
2N/A
2N/Astatic td_err_e
2N/Atd_read_uberdata(td_thragent_t *ta_p)
2N/A{
2N/A struct ps_prochandle *ph_p = ta_p->ph_p;
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A uberdata_t uberdata;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr,
2N/A &uberdata, sizeof (uberdata)) != PS_OK)
2N/A return (TD_DBERR);
2N/A ta_p->primary_map = uberdata.primary_map;
2N/A ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, tdb.tdb_ev_global_mask);
2N/A ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
2N/A ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
2N/A ta_p->hash_size = uberdata.hash_size;
2N/A if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
2N/A ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
2N/A return (TD_DBERR);
2N/A
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A uberdata32_t uberdata;
2N/A caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
2N/A int i;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr,
2N/A &uberdata, sizeof (uberdata)) != PS_OK)
2N/A return (TD_DBERR);
2N/A ta_p->primary_map = uberdata.primary_map;
2N/A ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
2N/A ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
2N/A ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
2N/A ta_p->hash_size = uberdata.hash_size;
2N/A if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
2N/A tdb_events, sizeof (tdb_events)) != PS_OK)
2N/A return (TD_DBERR);
2N/A for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
2N/A ta_p->tdb_events[i] = tdb_events[i];
2N/A#else
2N/A return (TD_DBERR);
2N/A#endif
2N/A }
2N/A if (ta_p->hash_size != 1) { /* multi-threaded */
2N/A ta_p->initialized = 2;
2N/A ta_p->single_lwpid = 0;
2N/A ta_p->single_ulwp_addr = NULL;
2N/A } else { /* single-threaded */
2N/A ta_p->initialized = 1;
2N/A /*
2N/A * Get the address and lwpid of the single thread/LWP.
2N/A * It may not be ulwp_one if this is a child of fork1().
2N/A */
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A thr_hash_table_t head;
2N/A lwpid_t lwpid = 0;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->hash_table_addr,
2N/A &head, sizeof (head)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if ((psaddr_t)head.hash_bucket == NULL)
2N/A ta_p->initialized = 0;
2N/A else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2N/A offsetof(ulwp_t, ul_lwpid),
2N/A &lwpid, sizeof (lwpid)) != PS_OK)
2N/A return (TD_DBERR);
2N/A ta_p->single_lwpid = lwpid;
2N/A ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A thr_hash_table32_t head;
2N/A lwpid_t lwpid = 0;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->hash_table_addr,
2N/A &head, sizeof (head)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if ((psaddr_t)head.hash_bucket == NULL)
2N/A ta_p->initialized = 0;
2N/A else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
2N/A offsetof(ulwp32_t, ul_lwpid),
2N/A &lwpid, sizeof (lwpid)) != PS_OK)
2N/A return (TD_DBERR);
2N/A ta_p->single_lwpid = lwpid;
2N/A ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
2N/A#else
2N/A return (TD_DBERR);
2N/A#endif
2N/A }
2N/A }
2N/A if (!ta_p->primary_map)
2N/A ta_p->initialized = 0;
2N/A return (TD_OK);
2N/A}
2N/A
2N/Astatic td_err_e
2N/Atd_read_bootstrap_data(td_thragent_t *ta_p)
2N/A{
2N/A struct ps_prochandle *ph_p = ta_p->ph_p;
2N/A psaddr_t bootstrap_addr;
2N/A psaddr_t uberdata_addr;
2N/A ps_err_e db_return;
2N/A td_err_e return_val;
2N/A int do_1;
2N/A
2N/A switch (ta_p->initialized) {
2N/A case 2: /* fully initialized */
2N/A return (TD_OK);
2N/A case 1: /* partially initialized */
2N/A if (td_read_hash_size(ta_p) == 1)
2N/A return (TD_OK);
2N/A return (td_read_uberdata(ta_p));
2N/A }
2N/A
2N/A /*
2N/A * Uninitialized -- do the startup work.
2N/A * We set ta_p->initialized to -1 to cut off recursive calls
2N/A * into libc_db by code in the provider of ps_pglobal_lookup().
2N/A */
2N/A do_1 = 0;
2N/A ta_p->initialized = -1;
2N/A db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
2N/A TD_BOOTSTRAP_NAME, &bootstrap_addr);
2N/A if (db_return == PS_NOSYM) {
2N/A do_1 = 1;
2N/A db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
2N/A TD_BOOTSTRAP_NAME, &bootstrap_addr);
2N/A }
2N/A if (db_return == PS_NOSYM) /* libc is not linked yet */
2N/A return (TD_NOLIBTHREAD);
2N/A if (db_return != PS_OK)
2N/A return (TD_ERR);
2N/A db_return = ps_pglobal_lookup(ph_p,
2N/A do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
2N/A TD_UBERDATA_NAME, &uberdata_addr);
2N/A if (db_return == PS_NOSYM) /* libc is not linked yet */
2N/A return (TD_NOLIBTHREAD);
2N/A if (db_return != PS_OK)
2N/A return (TD_ERR);
2N/A
2N/A /*
2N/A * Read the uberdata address into the thread agent structure.
2N/A */
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A psaddr_t psaddr;
2N/A if (ps_pdread(ph_p, bootstrap_addr,
2N/A &psaddr, sizeof (psaddr)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if ((ta_p->bootstrap_addr = psaddr) == NULL)
2N/A psaddr = uberdata_addr;
2N/A else if (ps_pdread(ph_p, psaddr,
2N/A &psaddr, sizeof (psaddr)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if (psaddr == NULL) {
2N/A /* primary linkmap in the tgt is not initialized */
2N/A ta_p->bootstrap_addr = NULL;
2N/A psaddr = uberdata_addr;
2N/A }
2N/A ta_p->uberdata_addr = psaddr;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A caddr32_t psaddr;
2N/A if (ps_pdread(ph_p, bootstrap_addr,
2N/A &psaddr, sizeof (psaddr)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == NULL)
2N/A psaddr = (caddr32_t)uberdata_addr;
2N/A else if (ps_pdread(ph_p, (psaddr_t)psaddr,
2N/A &psaddr, sizeof (psaddr)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if (psaddr == NULL) {
2N/A /* primary linkmap in the tgt is not initialized */
2N/A ta_p->bootstrap_addr = NULL;
2N/A psaddr = (caddr32_t)uberdata_addr;
2N/A }
2N/A ta_p->uberdata_addr = (psaddr_t)psaddr;
2N/A#else
2N/A return (TD_DBERR);
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
2N/A return (return_val);
2N/A if (ta_p->bootstrap_addr == NULL)
2N/A ta_p->initialized = 0;
2N/A return (TD_OK);
2N/A}
2N/A
2N/A#pragma weak ps_kill
2N/A#pragma weak ps_lrolltoaddr
2N/A
2N/A/*
2N/A * Allocate a new agent process handle ("thread agent").
2N/A */
2N/A#pragma weak td_ta_new = __td_ta_new
2N/Atd_err_e
2N/A__td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
2N/A{
2N/A td_thragent_t *ta_p;
2N/A int model;
2N/A td_err_e return_val = TD_OK;
2N/A
2N/A if (ph_p == NULL)
2N/A return (TD_BADPH);
2N/A if (ta_pp == NULL)
2N/A return (TD_ERR);
2N/A *ta_pp = NULL;
2N/A if (ps_pstop(ph_p) != PS_OK)
2N/A return (TD_DBERR);
2N/A /*
2N/A * ps_pdmodel might not be defined if this is an older client.
2N/A * Make it a weak symbol and test if it exists before calling.
2N/A */
2N/A#pragma weak ps_pdmodel
2N/A if (ps_pdmodel == NULL) {
2N/A model = PR_MODEL_NATIVE;
2N/A } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
2N/A (void) ps_pcontinue(ph_p);
2N/A return (TD_ERR);
2N/A }
2N/A if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
2N/A (void) ps_pcontinue(ph_p);
2N/A return (TD_MALLOC);
2N/A }
2N/A
2N/A /*
2N/A * Initialize the agent process handle.
2N/A * Pick up the symbol value we need from the target process.
2N/A */
2N/A (void) memset(ta_p, 0, sizeof (*ta_p));
2N/A ta_p->ph_p = ph_p;
2N/A (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
2N/A ta_p->model = model;
2N/A return_val = td_read_bootstrap_data(ta_p);
2N/A
2N/A /*
2N/A * Because the old libthread_db enabled lock tracking by default,
2N/A * we must also do it. However, we do it only if the application
2N/A * provides the ps_kill() and ps_lrolltoaddr() interfaces.
2N/A * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
2N/A */
2N/A if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
2N/A register_sync_t oldenable;
2N/A register_sync_t enable = REGISTER_SYNC_ENABLE;
2N/A psaddr_t psaddr = ta_p->tdb_register_sync_addr;
2N/A
2N/A if (ps_pdread(ph_p, psaddr,
2N/A &oldenable, sizeof (oldenable)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (oldenable != REGISTER_SYNC_OFF ||
2N/A ps_pdwrite(ph_p, psaddr,
2N/A &enable, sizeof (enable)) != PS_OK) {
2N/A /*
2N/A * Lock tracking was already enabled or we
2N/A * failed to enable it, probably because we
2N/A * are examining a core file. In either case
2N/A * set the sync_tracking flag non-zero to
2N/A * indicate that we should not attempt to
2N/A * disable lock tracking when we delete the
2N/A * agent process handle in td_ta_delete().
2N/A */
2N/A ta_p->sync_tracking = 1;
2N/A }
2N/A }
2N/A
2N/A if (return_val == TD_OK)
2N/A *ta_pp = ta_p;
2N/A else
2N/A free(ta_p);
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Utility function to grab the readers lock and return the prochandle,
2N/A * given an agent process handle. Performs standard error checking.
2N/A * Returns non-NULL with the lock held, or NULL with the lock not held.
2N/A */
2N/Astatic struct ps_prochandle *
2N/Aph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
2N/A{
2N/A struct ps_prochandle *ph_p = NULL;
2N/A td_err_e error;
2N/A
2N/A if (ta_p == NULL || ta_p->initialized == -1) {
2N/A *err = TD_BADTA;
2N/A } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
2N/A *err = TD_BADTA;
2N/A } else if ((ph_p = ta_p->ph_p) == NULL) {
2N/A (void) rw_unlock(&ta_p->rwlock);
2N/A *err = TD_BADPH;
2N/A } else if (ta_p->initialized != 2 &&
2N/A (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
2N/A (void) rw_unlock(&ta_p->rwlock);
2N/A ph_p = NULL;
2N/A *err = error;
2N/A } else {
2N/A *err = TD_OK;
2N/A }
2N/A
2N/A return (ph_p);
2N/A}
2N/A
2N/A/*
2N/A * Utility function to grab the readers lock and return the prochandle,
2N/A * given an agent thread handle. Performs standard error checking.
2N/A * Returns non-NULL with the lock held, or NULL with the lock not held.
2N/A */
2N/Astatic struct ps_prochandle *
2N/Aph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
2N/A{
2N/A if (th_p == NULL || th_p->th_unique == NULL) {
2N/A *err = TD_BADTH;
2N/A return (NULL);
2N/A }
2N/A return (ph_lock_ta(th_p->th_ta_p, err));
2N/A}
2N/A
2N/A/*
2N/A * Utility function to grab the readers lock and return the prochandle,
2N/A * given a synchronization object handle. Performs standard error checking.
2N/A * Returns non-NULL with the lock held, or NULL with the lock not held.
2N/A */
2N/Astatic struct ps_prochandle *
2N/Aph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
2N/A{
2N/A if (sh_p == NULL || sh_p->sh_unique == NULL) {
2N/A *err = TD_BADSH;
2N/A return (NULL);
2N/A }
2N/A return (ph_lock_ta(sh_p->sh_ta_p, err));
2N/A}
2N/A
2N/A/*
2N/A * Unlock the agent process handle obtained from ph_lock_*().
2N/A */
2N/Astatic void
2N/Aph_unlock(td_thragent_t *ta_p)
2N/A{
2N/A (void) rw_unlock(&ta_p->rwlock);
2N/A}
2N/A
2N/A/*
2N/A * De-allocate an agent process handle,
2N/A * releasing all related resources.
2N/A *
2N/A * XXX -- This is hopelessly broken ---
2N/A * Storage for thread agent is not deallocated. The prochandle
2N/A * in the thread agent is set to NULL so that future uses of
2N/A * the thread agent can be detected and an error value returned.
2N/A * All functions in the external user interface that make
2N/A * use of the thread agent are expected
2N/A * to check for a NULL prochandle in the thread agent.
2N/A * All such functions are also expected to obtain a
2N/A * reader lock on the thread agent while it is using it.
2N/A */
2N/A#pragma weak td_ta_delete = __td_ta_delete
2N/Atd_err_e
2N/A__td_ta_delete(td_thragent_t *ta_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A
2N/A /*
2N/A * This is the only place we grab the writer lock.
2N/A * We are going to NULL out the prochandle.
2N/A */
2N/A if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
2N/A return (TD_BADTA);
2N/A if ((ph_p = ta_p->ph_p) == NULL) {
2N/A (void) rw_unlock(&ta_p->rwlock);
2N/A return (TD_BADPH);
2N/A }
2N/A /*
2N/A * If synch. tracking was disabled when td_ta_new() was called and
2N/A * if td_ta_sync_tracking_enable() was never called, then disable
2N/A * synch. tracking (it was enabled by default in td_ta_new()).
2N/A */
2N/A if (ta_p->sync_tracking == 0 &&
2N/A ps_kill != NULL && ps_lrolltoaddr != NULL) {
2N/A register_sync_t enable = REGISTER_SYNC_DISABLE;
2N/A
2N/A (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
2N/A &enable, sizeof (enable));
2N/A }
2N/A ta_p->ph_p = NULL;
2N/A (void) rw_unlock(&ta_p->rwlock);
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Map an agent process handle to a client prochandle.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_ta_get_ph = __td_ta_get_ph
2N/Atd_err_e
2N/A__td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
2N/A{
2N/A td_err_e return_val;
2N/A
2N/A if (ph_pp != NULL) /* protect stupid callers */
2N/A *ph_pp = NULL;
2N/A if (ph_pp == NULL)
2N/A return (TD_ERR);
2N/A if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A ph_unlock(ta_p);
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Set the process's suggested concurrency level.
2N/A * This is a no-op in a one-level model.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
2N/A/* ARGSUSED1 */
2N/Atd_err_e
2N/A__td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
2N/A{
2N/A if (ta_p == NULL)
2N/A return (TD_BADTA);
2N/A if (ta_p->ph_p == NULL)
2N/A return (TD_BADPH);
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Get the number of threads in the process.
2N/A */
2N/A#pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
2N/Atd_err_e
2N/A__td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A int nthreads;
2N/A int nzombies;
2N/A psaddr_t nthreads_addr;
2N/A psaddr_t nzombies_addr;
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A nthreads_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, nthreads);
2N/A nzombies_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, nzombies);
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A nthreads_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, nthreads);
2N/A nzombies_addr = ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, nzombies);
2N/A#else
2N/A nthreads_addr = 0;
2N/A nzombies_addr = 0;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (nthread_p == NULL)
2N/A return (TD_ERR);
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A ph_unlock(ta_p);
2N/A if (return_val == TD_OK)
2N/A *nthread_p = nthreads + nzombies;
2N/A return (return_val);
2N/A}
2N/A
2N/Atypedef struct {
2N/A thread_t tid;
2N/A int found;
2N/A td_thrhandle_t th;
2N/A} td_mapper_param_t;
2N/A
2N/A/*
2N/A * Check the value in data against the thread id.
2N/A * If it matches, return 1 to terminate iterations.
2N/A * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
2N/A */
2N/Astatic int
2N/Atd_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
2N/A{
2N/A td_thrinfo_t ti;
2N/A
2N/A if (__td_thr_get_info(th_p, &ti) == TD_OK &&
2N/A data->tid == ti.ti_tid) {
2N/A data->found = 1;
2N/A data->th = *th_p;
2N/A return (1);
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * Given a thread identifier, return the corresponding thread handle.
2N/A */
2N/A#pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
2N/Atd_err_e
2N/A__td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
2N/A td_thrhandle_t *th_p)
2N/A{
2N/A td_err_e return_val;
2N/A td_mapper_param_t data;
2N/A
2N/A if (th_p != NULL && /* optimize for a single thread */
2N/A ta_p != NULL &&
2N/A ta_p->initialized == 1 &&
2N/A (td_read_hash_size(ta_p) == 1 ||
2N/A td_read_uberdata(ta_p) == TD_OK) &&
2N/A ta_p->initialized == 1 &&
2N/A ta_p->single_lwpid == tid) {
2N/A th_p->th_ta_p = ta_p;
2N/A if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
2N/A return (TD_NOTHR);
2N/A return (TD_OK);
2N/A }
2N/A
2N/A /*
2N/A * LOCKING EXCEPTION - Locking is not required here because
2N/A * the locking and checking will be done in __td_ta_thr_iter.
2N/A */
2N/A
2N/A if (ta_p == NULL)
2N/A return (TD_BADTA);
2N/A if (th_p == NULL)
2N/A return (TD_BADTH);
2N/A if (tid == 0)
2N/A return (TD_NOTHR);
2N/A
2N/A data.tid = tid;
2N/A data.found = 0;
2N/A return_val = __td_ta_thr_iter(ta_p,
2N/A (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
2N/A TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2N/A TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2N/A if (return_val == TD_OK) {
2N/A if (data.found == 0)
2N/A return_val = TD_NOTHR;
2N/A else
2N/A *th_p = data.th;
2N/A }
2N/A
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Map the address of a synchronization object to a sync. object handle.
2N/A */
2N/A#pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
2N/Atd_err_e
2N/A__td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A uint16_t sync_magic;
2N/A
2N/A if (sh_p == NULL)
2N/A return (TD_BADSH);
2N/A if (addr == NULL)
2N/A return (TD_ERR);
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A /*
2N/A * Check the magic number of the sync. object to make sure it's valid.
2N/A * The magic number is at the same offset for all sync. objects.
2N/A */
2N/A if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
2N/A &sync_magic, sizeof (sync_magic)) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_BADSH);
2N/A }
2N/A ph_unlock(ta_p);
2N/A if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
2N/A sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
2N/A return (TD_BADSH);
2N/A /*
2N/A * Just fill in the appropriate fields of the sync. handle.
2N/A */
2N/A sh_p->sh_ta_p = (td_thragent_t *)ta_p;
2N/A sh_p->sh_unique = addr;
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Iterate over the set of global TSD keys.
2N/A * The call back function is called with three arguments,
2N/A * a key, a pointer to the destructor function, and the cbdata pointer.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
2N/Atd_err_e
2N/A__td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A int key;
2N/A int numkeys;
2N/A psaddr_t dest_addr;
2N/A psaddr_t *destructors = NULL;
2N/A PFrV destructor;
2N/A
2N/A if (cb == NULL)
2N/A return (TD_ERR);
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A tsd_metadata_t tsdm;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2N/A &tsdm, sizeof (tsdm)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A numkeys = tsdm.tsdm_nused;
2N/A dest_addr = (psaddr_t)tsdm.tsdm_destro;
2N/A if (numkeys > 0)
2N/A destructors =
2N/A malloc(numkeys * sizeof (psaddr_t));
2N/A }
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A tsd_metadata32_t tsdm;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2N/A &tsdm, sizeof (tsdm)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A numkeys = tsdm.tsdm_nused;
2N/A dest_addr = (psaddr_t)tsdm.tsdm_destro;
2N/A if (numkeys > 0)
2N/A destructors =
2N/A malloc(numkeys * sizeof (caddr32_t));
2N/A }
2N/A#else
2N/A return_val = TD_DBERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (return_val != TD_OK || numkeys <= 0) {
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A }
2N/A
2N/A if (destructors == NULL)
2N/A return_val = TD_MALLOC;
2N/A else if (ta_p->model == PR_MODEL_NATIVE) {
2N/A if (ps_pdread(ph_p, dest_addr,
2N/A destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A for (key = 1; key < numkeys; key++) {
2N/A destructor = (PFrV)destructors[key];
2N/A if (destructor != TSD_UNALLOCATED &&
2N/A (*cb)(key, destructor, cbdata_p))
2N/A break;
2N/A }
2N/A }
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A } else {
2N/A caddr32_t *destructors32 = (caddr32_t *)destructors;
2N/A caddr32_t destruct32;
2N/A
2N/A if (ps_pdread(ph_p, dest_addr,
2N/A destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A for (key = 1; key < numkeys; key++) {
2N/A destruct32 = destructors32[key];
2N/A if (destruct32 != (caddr32_t)TSD_UNALLOCATED &&
2N/A (*cb)(key, (PFrV)(uintptr_t)destruct32,
2N/A cbdata_p))
2N/A break;
2N/A }
2N/A }
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (destructors)
2N/A free(destructors);
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/Aint
2N/Asigequalset(const sigset_t *s1, const sigset_t *s2)
2N/A{
2N/A return (
2N/A s1->__sigbits[0] == s2->__sigbits[0] &&
2N/A s1->__sigbits[1] == s2->__sigbits[1] &&
2N/A s1->__sigbits[2] == s2->__sigbits[2] &&
2N/A s1->__sigbits[3] == s2->__sigbits[3]);
2N/A}
2N/A
2N/A/*
2N/A * Description:
2N/A * Iterate over all threads. For each thread call
2N/A * the function pointed to by "cb" with a pointer
2N/A * to a thread handle, and a pointer to data which
2N/A * can be NULL. Only call td_thr_iter_f() on threads
2N/A * which match the properties of state, ti_pri,
2N/A * ti_sigmask_p, and ti_user_flags. If cb returns
2N/A * a non-zero value, terminate iterations.
2N/A *
2N/A * Input:
2N/A * *ta_p - thread agent
2N/A * *cb - call back function defined by user.
2N/A * td_thr_iter_f() takes a thread handle and
2N/A * cbdata_p as a parameter.
2N/A * cbdata_p - parameter for td_thr_iter_f().
2N/A *
2N/A * state - state of threads of interest. A value of
2N/A * TD_THR_ANY_STATE from enum td_thr_state_e
2N/A * does not restrict iterations by state.
2N/A * ti_pri - lower bound of priorities of threads of
2N/A * interest. A value of TD_THR_LOWEST_PRIORITY
2N/A * defined in thread_db.h does not restrict
2N/A * iterations by priority. A thread with priority
2N/A * less than ti_pri will NOT be passed to the callback
2N/A * function.
2N/A * ti_sigmask_p - signal mask of threads of interest.
2N/A * A value of TD_SIGNO_MASK defined in thread_db.h
2N/A * does not restrict iterations by signal mask.
2N/A * ti_user_flags - user flags of threads of interest. A
2N/A * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
2N/A * does not restrict iterations by user flags.
2N/A */
2N/A#pragma weak td_ta_thr_iter = __td_ta_thr_iter
2N/Atd_err_e
2N/A__td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
2N/A void *cbdata_p, td_thr_state_e state, int ti_pri,
2N/A sigset_t *ti_sigmask_p, unsigned ti_user_flags)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A psaddr_t first_lwp_addr;
2N/A psaddr_t first_zombie_addr;
2N/A psaddr_t curr_lwp_addr;
2N/A psaddr_t next_lwp_addr;
2N/A td_thrhandle_t th;
2N/A ps_err_e db_return;
2N/A ps_err_e db_return2;
2N/A td_err_e return_val;
2N/A
2N/A if (cb == NULL)
2N/A return (TD_ERR);
2N/A /*
2N/A * If state is not within bound, short circuit.
2N/A */
2N/A if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
2N/A return (TD_OK);
2N/A
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * For each ulwp_t in the circular linked lists pointed
2N/A * to by "all_lwps" and "all_zombies":
2N/A * (1) Filter each thread.
2N/A * (2) Create the thread_object for each thread that passes.
2N/A * (3) Call the call back function on each thread.
2N/A */
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A db_return = ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
2N/A &first_lwp_addr, sizeof (first_lwp_addr));
2N/A db_return2 = ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
2N/A &first_zombie_addr, sizeof (first_zombie_addr));
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A caddr32_t addr32;
2N/A
2N/A db_return = ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
2N/A &addr32, sizeof (addr32));
2N/A first_lwp_addr = addr32;
2N/A db_return2 = ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
2N/A &addr32, sizeof (addr32));
2N/A first_zombie_addr = addr32;
2N/A#else /* _SYSCALL32 */
2N/A db_return = PS_ERR;
2N/A db_return2 = PS_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A if (db_return == PS_OK)
2N/A db_return = db_return2;
2N/A
2N/A /*
2N/A * If first_lwp_addr and first_zombie_addr are both NULL,
2N/A * libc must not yet be initialized or all threads have
2N/A * exited. Return TD_NOTHR and all will be well.
2N/A */
2N/A if (db_return == PS_OK &&
2N/A first_lwp_addr == NULL && first_zombie_addr == NULL) {
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (TD_NOTHR);
2N/A }
2N/A if (db_return != PS_OK) {
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * Run down the lists of all living and dead lwps.
2N/A */
2N/A if (first_lwp_addr == NULL)
2N/A first_lwp_addr = first_zombie_addr;
2N/A curr_lwp_addr = first_lwp_addr;
2N/A for (;;) {
2N/A td_thr_state_e ts_state;
2N/A int userpri;
2N/A unsigned userflags;
2N/A sigset_t mask;
2N/A
2N/A /*
2N/A * Read the ulwp struct.
2N/A */
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t ulwp;
2N/A
2N/A if (ps_pdread(ph_p, curr_lwp_addr,
2N/A &ulwp, sizeof (ulwp)) != PS_OK &&
2N/A ((void) memset(&ulwp, 0, sizeof (ulwp)),
2N/A ps_pdread(ph_p, curr_lwp_addr,
2N/A &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A break;
2N/A }
2N/A next_lwp_addr = (psaddr_t)ulwp.ul_forw;
2N/A
2N/A ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
2N/A ulwp.ul_stop? TD_THR_STOPPED :
2N/A ulwp.ul_wchan? TD_THR_SLEEP :
2N/A TD_THR_ACTIVE;
2N/A userpri = ulwp.ul_pri;
2N/A userflags = ulwp.ul_usropts;
2N/A if (ulwp.ul_dead)
2N/A (void) sigemptyset(&mask);
2N/A else
2N/A mask = *(sigset_t *)&ulwp.ul_sigmask;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t ulwp;
2N/A
2N/A if (ps_pdread(ph_p, curr_lwp_addr,
2N/A &ulwp, sizeof (ulwp)) != PS_OK &&
2N/A ((void) memset(&ulwp, 0, sizeof (ulwp)),
2N/A ps_pdread(ph_p, curr_lwp_addr,
2N/A &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A break;
2N/A }
2N/A next_lwp_addr = (psaddr_t)ulwp.ul_forw;
2N/A
2N/A ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
2N/A ulwp.ul_stop? TD_THR_STOPPED :
2N/A ulwp.ul_wchan? TD_THR_SLEEP :
2N/A TD_THR_ACTIVE;
2N/A userpri = ulwp.ul_pri;
2N/A userflags = ulwp.ul_usropts;
2N/A if (ulwp.ul_dead)
2N/A (void) sigemptyset(&mask);
2N/A else
2N/A mask = *(sigset_t *)&ulwp.ul_sigmask;
2N/A#else /* _SYSCALL32 */
2N/A return_val = TD_ERR;
2N/A break;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A /*
2N/A * Filter on state, priority, sigmask, and user flags.
2N/A */
2N/A
2N/A if ((state != ts_state) &&
2N/A (state != TD_THR_ANY_STATE))
2N/A goto advance;
2N/A
2N/A if (ti_pri > userpri)
2N/A goto advance;
2N/A
2N/A if (ti_sigmask_p != TD_SIGNO_MASK &&
2N/A !sigequalset(ti_sigmask_p, &mask))
2N/A goto advance;
2N/A
2N/A if (ti_user_flags != userflags &&
2N/A ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
2N/A goto advance;
2N/A
2N/A /*
2N/A * Call back - break if the return
2N/A * from the call back is non-zero.
2N/A */
2N/A th.th_ta_p = (td_thragent_t *)ta_p;
2N/A th.th_unique = curr_lwp_addr;
2N/A if ((*cb)(&th, cbdata_p))
2N/A break;
2N/A
2N/Aadvance:
2N/A if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
2N/A /*
2N/A * Switch to the zombie list, unless it is NULL
2N/A * or we have already been doing the zombie list,
2N/A * in which case terminate the loop.
2N/A */
2N/A if (first_zombie_addr == NULL ||
2N/A first_lwp_addr == first_zombie_addr)
2N/A break;
2N/A curr_lwp_addr = first_lwp_addr = first_zombie_addr;
2N/A }
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Enable or disable process synchronization object tracking.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
2N/Atd_err_e
2N/A__td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A register_sync_t enable;
2N/A
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A /*
2N/A * Values of tdb_register_sync in the victim process:
2N/A * REGISTER_SYNC_ENABLE enables registration of synch objects
2N/A * REGISTER_SYNC_DISABLE disables registration of synch objects
2N/A * These cause the table to be cleared and tdb_register_sync set to:
2N/A * REGISTER_SYNC_ON registration in effect
2N/A * REGISTER_SYNC_OFF registration not in effect
2N/A */
2N/A enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
2N/A if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
2N/A &enable, sizeof (enable)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A /*
2N/A * Remember that this interface was called (see td_ta_delete()).
2N/A */
2N/A ta_p->sync_tracking = 1;
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Iterate over all known synchronization variables.
2N/A * It is very possible that the list generated is incomplete,
2N/A * because the iterator can only find synchronization variables
2N/A * that have been registered by the process since synchronization
2N/A * object registration was enabled.
2N/A * The call back function cb is called for each synchronization
2N/A * variable with two arguments: a pointer to the synchronization
2N/A * handle and the passed-in argument cbdata.
2N/A * If cb returns a non-zero value, iterations are terminated.
2N/A */
2N/A#pragma weak td_ta_sync_iter = __td_ta_sync_iter
2N/Atd_err_e
2N/A__td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A int i;
2N/A register_sync_t enable;
2N/A psaddr_t next_desc;
2N/A tdb_sync_stats_t sync_stats;
2N/A td_synchandle_t synchandle;
2N/A psaddr_t psaddr;
2N/A void *vaddr;
2N/A uint64_t *sync_addr_hash = NULL;
2N/A
2N/A if (cb == NULL)
2N/A return (TD_ERR);
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2N/A &enable, sizeof (enable)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A if (enable != REGISTER_SYNC_ON)
2N/A goto out;
2N/A
2N/A /*
2N/A * First read the hash table.
2N/A * The hash table is large; allocate with mmap().
2N/A */
2N/A if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
2N/A PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
2N/A == MAP_FAILED) {
2N/A return_val = TD_MALLOC;
2N/A goto out;
2N/A }
2N/A sync_addr_hash = vaddr;
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2N/A &psaddr, sizeof (&psaddr)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A } else {
2N/A#ifdef _SYSCALL32
2N/A caddr32_t addr;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2N/A &addr, sizeof (addr)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A psaddr = addr;
2N/A#else
2N/A return_val = TD_ERR;
2N/A goto out;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (psaddr == NULL)
2N/A goto out;
2N/A if (ps_pdread(ph_p, psaddr, sync_addr_hash,
2N/A TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A
2N/A /*
2N/A * Now scan the hash table.
2N/A */
2N/A for (i = 0; i < TDB_HASH_SIZE; i++) {
2N/A for (next_desc = (psaddr_t)sync_addr_hash[i];
2N/A next_desc != NULL;
2N/A next_desc = (psaddr_t)sync_stats.next) {
2N/A if (ps_pdread(ph_p, next_desc,
2N/A &sync_stats, sizeof (sync_stats)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A if (sync_stats.un.type == TDB_NONE) {
2N/A /* not registered since registration enabled */
2N/A continue;
2N/A }
2N/A synchandle.sh_ta_p = ta_p;
2N/A synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
2N/A if ((*cb)(&synchandle, cbdata) != 0)
2N/A goto out;
2N/A }
2N/A }
2N/A
2N/Aout:
2N/A if (sync_addr_hash != NULL)
2N/A (void) munmap((void *)sync_addr_hash,
2N/A TDB_HASH_SIZE * sizeof (uint64_t));
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Enable process statistics collection.
2N/A */
2N/A#pragma weak td_ta_enable_stats = __td_ta_enable_stats
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * Reset process statistics.
2N/A */
2N/A#pragma weak td_ta_reset_stats = __td_ta_reset_stats
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_ta_reset_stats(const td_thragent_t *ta_p)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * Read process statistics.
2N/A */
2N/A#pragma weak td_ta_get_stats = __td_ta_get_stats
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * Transfer information from lwp struct to thread information struct.
2N/A * XXX -- lots of this needs cleaning up.
2N/A */
2N/Astatic void
2N/Atd_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
2N/A ulwp_t *ulwp, td_thrinfo_t *ti_p)
2N/A{
2N/A lwpid_t lwpid;
2N/A
2N/A if ((lwpid = ulwp->ul_lwpid) == 0)
2N/A lwpid = 1;
2N/A (void) memset(ti_p, 0, sizeof (*ti_p));
2N/A ti_p->ti_ta_p = ta_p;
2N/A ti_p->ti_user_flags = ulwp->ul_usropts;
2N/A ti_p->ti_tid = lwpid;
2N/A ti_p->ti_exitval = ulwp->ul_rval;
2N/A ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
2N/A if (!ulwp->ul_dead) {
2N/A /*
2N/A * The bloody fools got this backwards!
2N/A */
2N/A ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
2N/A ti_p->ti_stksize = ulwp->ul_stksiz;
2N/A }
2N/A ti_p->ti_ro_area = ts_addr;
2N/A ti_p->ti_ro_size = ulwp->ul_replace?
2N/A REPLACEMENT_SIZE : sizeof (ulwp_t);
2N/A ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
2N/A ulwp->ul_stop? TD_THR_STOPPED :
2N/A ulwp->ul_wchan? TD_THR_SLEEP :
2N/A TD_THR_ACTIVE;
2N/A ti_p->ti_db_suspended = 0;
2N/A ti_p->ti_type = TD_THR_USER;
2N/A ti_p->ti_sp = ulwp->ul_sp;
2N/A ti_p->ti_flags = 0;
2N/A ti_p->ti_pri = ulwp->ul_pri;
2N/A ti_p->ti_lid = lwpid;
2N/A if (!ulwp->ul_dead)
2N/A ti_p->ti_sigmask = ulwp->ul_sigmask;
2N/A ti_p->ti_traceme = 0;
2N/A ti_p->ti_preemptflag = 0;
2N/A ti_p->ti_pirecflag = 0;
2N/A (void) sigemptyset(&ti_p->ti_pending);
2N/A ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
2N/A}
2N/A
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/Astatic void
2N/Atd_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
2N/A ulwp32_t *ulwp, td_thrinfo_t *ti_p)
2N/A{
2N/A lwpid_t lwpid;
2N/A
2N/A if ((lwpid = ulwp->ul_lwpid) == 0)
2N/A lwpid = 1;
2N/A (void) memset(ti_p, 0, sizeof (*ti_p));
2N/A ti_p->ti_ta_p = ta_p;
2N/A ti_p->ti_user_flags = ulwp->ul_usropts;
2N/A ti_p->ti_tid = lwpid;
2N/A ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
2N/A ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
2N/A if (!ulwp->ul_dead) {
2N/A /*
2N/A * The bloody fools got this backwards!
2N/A */
2N/A ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
2N/A ti_p->ti_stksize = ulwp->ul_stksiz;
2N/A }
2N/A ti_p->ti_ro_area = ts_addr;
2N/A ti_p->ti_ro_size = ulwp->ul_replace?
2N/A REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
2N/A ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
2N/A ulwp->ul_stop? TD_THR_STOPPED :
2N/A ulwp->ul_wchan? TD_THR_SLEEP :
2N/A TD_THR_ACTIVE;
2N/A ti_p->ti_db_suspended = 0;
2N/A ti_p->ti_type = TD_THR_USER;
2N/A ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
2N/A ti_p->ti_flags = 0;
2N/A ti_p->ti_pri = ulwp->ul_pri;
2N/A ti_p->ti_lid = lwpid;
2N/A if (!ulwp->ul_dead)
2N/A ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
2N/A ti_p->ti_traceme = 0;
2N/A ti_p->ti_preemptflag = 0;
2N/A ti_p->ti_pirecflag = 0;
2N/A (void) sigemptyset(&ti_p->ti_pending);
2N/A ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
2N/A}
2N/A#endif /* _SYSCALL32 */
2N/A
2N/A/*
2N/A * Get thread information.
2N/A */
2N/A#pragma weak td_thr_get_info = __td_thr_get_info
2N/Atd_err_e
2N/A__td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_thragent_t *ta_p;
2N/A td_err_e return_val;
2N/A psaddr_t psaddr;
2N/A
2N/A if (ti_p == NULL)
2N/A return (TD_ERR);
2N/A (void) memset(ti_p, NULL, sizeof (*ti_p));
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A ta_p = th_p->th_ta_p;
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * Read the ulwp struct from the process.
2N/A * Transfer the ulwp struct to the thread information struct.
2N/A */
2N/A psaddr = th_p->th_unique;
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t ulwp;
2N/A
2N/A if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
2N/A ((void) memset(&ulwp, 0, sizeof (ulwp)),
2N/A ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else
2N/A td_thr2to(ta_p, psaddr, &ulwp, ti_p);
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t ulwp;
2N/A
2N/A if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
2N/A ((void) memset(&ulwp, 0, sizeof (ulwp)),
2N/A ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
2N/A PS_OK)
2N/A return_val = TD_DBERR;
2N/A else
2N/A td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
2N/A#else
2N/A return_val = TD_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Given a process and an event number, return information about
2N/A * an address in the process or at which a breakpoint can be set
2N/A * to monitor the event.
2N/A */
2N/A#pragma weak td_ta_event_addr = __td_ta_event_addr
2N/Atd_err_e
2N/A__td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
2N/A{
2N/A if (ta_p == NULL)
2N/A return (TD_BADTA);
2N/A if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
2N/A return (TD_NOEVENT);
2N/A if (notify_p == NULL)
2N/A return (TD_ERR);
2N/A
2N/A notify_p->type = NOTIFY_BPT;
2N/A notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
2N/A
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Add the events in eventset 2 to eventset 1.
2N/A */
2N/Astatic void
2N/Aeventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
2N/A{
2N/A int i;
2N/A
2N/A for (i = 0; i < TD_EVENTSIZE; i++)
2N/A event1_p->event_bits[i] |= event2_p->event_bits[i];
2N/A}
2N/A
2N/A/*
2N/A * Delete the events in eventset 2 from eventset 1.
2N/A */
2N/Astatic void
2N/Aeventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
2N/A{
2N/A int i;
2N/A
2N/A for (i = 0; i < TD_EVENTSIZE; i++)
2N/A event1_p->event_bits[i] &= ~event2_p->event_bits[i];
2N/A}
2N/A
2N/A/*
2N/A * Either add or delete the given event set from a thread's event mask.
2N/A */
2N/Astatic td_err_e
2N/Amod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val = TD_OK;
2N/A char enable;
2N/A td_thr_events_t evset;
2N/A psaddr_t psaddr_evset;
2N/A psaddr_t psaddr_enab;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
2N/A psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
2N/A psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
2N/A#else
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_ERR);
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A if (onoff)
2N/A eventsetaddset(&evset, events);
2N/A else
2N/A eventsetdelset(&evset, events);
2N/A if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
2N/A != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A enable = 0;
2N/A if (td_eventismember(&evset, TD_EVENTS_ENABLE))
2N/A enable = 1;
2N/A if (ps_pdwrite(ph_p, psaddr_enab,
2N/A &enable, sizeof (enable)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A }
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Enable or disable tracing for a given thread. Tracing
2N/A * is filtered based on the event mask of each thread. Tracing
2N/A * can be turned on/off for the thread without changing thread
2N/A * event mask.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_event_enable = __td_thr_event_enable
2N/Atd_err_e
2N/A__td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
2N/A{
2N/A td_thr_events_t evset;
2N/A
2N/A td_event_emptyset(&evset);
2N/A td_event_addset(&evset, TD_EVENTS_ENABLE);
2N/A return (mod_eventset(th_p, &evset, onoff));
2N/A}
2N/A
2N/A/*
2N/A * Set event mask to enable event. event is turned on in
2N/A * event mask for thread. If a thread encounters an event
2N/A * for which its event mask is on, notification will be sent
2N/A * to the debugger.
2N/A * Addresses for each event are provided to the
2N/A * debugger. It is assumed that a breakpoint of some type will
2N/A * be placed at that address. If the event mask for the thread
2N/A * is on, the instruction at the address will be executed.
2N/A * Otherwise, the instruction will be skipped.
2N/A */
2N/A#pragma weak td_thr_set_event = __td_thr_set_event
2N/Atd_err_e
2N/A__td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
2N/A{
2N/A return (mod_eventset(th_p, events, 1));
2N/A}
2N/A
2N/A/*
2N/A * Enable or disable a set of events in the process-global event mask,
2N/A * depending on the value of onoff.
2N/A */
2N/Astatic td_err_e
2N/Atd_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_thr_events_t targ_eventset;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
2N/A &targ_eventset, sizeof (targ_eventset)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A if (onoff)
2N/A eventsetaddset(&targ_eventset, events);
2N/A else
2N/A eventsetdelset(&targ_eventset, events);
2N/A if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
2N/A &targ_eventset, sizeof (targ_eventset)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A }
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Enable a set of events in the process-global event mask.
2N/A */
2N/A#pragma weak td_ta_set_event = __td_ta_set_event
2N/Atd_err_e
2N/A__td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
2N/A{
2N/A return (td_ta_mod_event(ta_p, events, 1));
2N/A}
2N/A
2N/A/*
2N/A * Set event mask to disable the given event set; these events are cleared
2N/A * from the event mask of the thread. Events that occur for a thread
2N/A * with the event masked off will not cause notification to be
2N/A * sent to the debugger (see td_thr_set_event for fuller description).
2N/A */
2N/A#pragma weak td_thr_clear_event = __td_thr_clear_event
2N/Atd_err_e
2N/A__td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
2N/A{
2N/A return (mod_eventset(th_p, events, 0));
2N/A}
2N/A
2N/A/*
2N/A * Disable a set of events in the process-global event mask.
2N/A */
2N/A#pragma weak td_ta_clear_event = __td_ta_clear_event
2N/Atd_err_e
2N/A__td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
2N/A{
2N/A return (td_ta_mod_event(ta_p, events, 0));
2N/A}
2N/A
2N/A/*
2N/A * This function returns the most recent event message, if any,
2N/A * associated with a thread. Given a thread handle, return the message
2N/A * corresponding to the event encountered by the thread. Only one
2N/A * message per thread is saved. Messages from earlier events are lost
2N/A * when later events occur.
2N/A */
2N/A#pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
2N/Atd_err_e
2N/A__td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val = TD_OK;
2N/A psaddr_t psaddr;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_BADTA);
2N/A }
2N/A if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A td_evbuf_t evbuf;
2N/A
2N/A psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
2N/A if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A } else if (evbuf.eventnum == TD_EVENT_NONE) {
2N/A return_val = TD_NOEVENT;
2N/A } else {
2N/A msg->event = evbuf.eventnum;
2N/A msg->th_p = (td_thrhandle_t *)th_p;
2N/A msg->msg.data = (uintptr_t)evbuf.eventdata;
2N/A /* "Consume" the message */
2N/A evbuf.eventnum = TD_EVENT_NONE;
2N/A evbuf.eventdata = NULL;
2N/A if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
2N/A != PS_OK)
2N/A return_val = TD_DBERR;
2N/A }
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A td_evbuf32_t evbuf;
2N/A
2N/A psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
2N/A if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A } else if (evbuf.eventnum == TD_EVENT_NONE) {
2N/A return_val = TD_NOEVENT;
2N/A } else {
2N/A msg->event = evbuf.eventnum;
2N/A msg->th_p = (td_thrhandle_t *)th_p;
2N/A msg->msg.data = (uintptr_t)evbuf.eventdata;
2N/A /* "Consume" the message */
2N/A evbuf.eventnum = TD_EVENT_NONE;
2N/A evbuf.eventdata = NULL;
2N/A if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
2N/A != PS_OK)
2N/A return_val = TD_DBERR;
2N/A }
2N/A#else
2N/A return_val = TD_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * The callback function td_ta_event_getmsg uses when looking for
2N/A * a thread with an event. A thin wrapper around td_thr_event_getmsg.
2N/A */
2N/Astatic int
2N/Aevent_msg_cb(const td_thrhandle_t *th_p, void *arg)
2N/A{
2N/A static td_thrhandle_t th;
2N/A td_event_msg_t *msg = arg;
2N/A
2N/A if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
2N/A /*
2N/A * Got an event, stop iterating.
2N/A *
2N/A * Because of past mistakes in interface definition,
2N/A * we are forced to pass back a static local variable
2N/A * for the thread handle because th_p is a pointer
2N/A * to a local variable in __td_ta_thr_iter().
2N/A * Grr...
2N/A */
2N/A th = *th_p;
2N/A msg->th_p = &th;
2N/A return (1);
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * This function is just like td_thr_event_getmsg, except that it is
2N/A * passed a process handle rather than a thread handle, and returns
2N/A * an event message for some thread in the process that has an event
2N/A * message pending. If no thread has an event message pending, this
2N/A * routine returns TD_NOEVENT. Thus, all pending event messages may
2N/A * be collected from a process by repeatedly calling this routine
2N/A * until it returns TD_NOEVENT.
2N/A */
2N/A#pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
2N/Atd_err_e
2N/A__td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
2N/A{
2N/A td_err_e return_val;
2N/A
2N/A if (ta_p == NULL)
2N/A return (TD_BADTA);
2N/A if (ta_p->ph_p == NULL)
2N/A return (TD_BADPH);
2N/A if (msg == NULL)
2N/A return (TD_ERR);
2N/A msg->event = TD_EVENT_NONE;
2N/A if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
2N/A TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
2N/A TD_THR_ANY_USER_FLAGS)) != TD_OK)
2N/A return (return_val);
2N/A if (msg->event == TD_EVENT_NONE)
2N/A return (TD_NOEVENT);
2N/A return (TD_OK);
2N/A}
2N/A
2N/Astatic lwpid_t
2N/Athr_to_lwpid(const td_thrhandle_t *th_p)
2N/A{
2N/A struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
2N/A lwpid_t lwpid;
2N/A
2N/A /*
2N/A * The caller holds the prochandle lock
2N/A * and has already verfied everything.
2N/A */
2N/A if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
2N/A &lwpid, sizeof (lwpid)) != PS_OK)
2N/A lwpid = 0;
2N/A else if (lwpid == 0)
2N/A lwpid = 1;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
2N/A &lwpid, sizeof (lwpid)) != PS_OK)
2N/A lwpid = 0;
2N/A else if (lwpid == 0)
2N/A lwpid = 1;
2N/A#else
2N/A lwpid = 0;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A return (lwpid);
2N/A}
2N/A
2N/A/*
2N/A * Suspend a thread.
2N/A * XXX: What does this mean in a one-level model?
2N/A */
2N/A#pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
2N/Atd_err_e
2N/A__td_thr_dbsuspend(const td_thrhandle_t *th_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Resume a suspended thread.
2N/A * XXX: What does this mean in a one-level model?
2N/A */
2N/A#pragma weak td_thr_dbresume = __td_thr_dbresume
2N/Atd_err_e
2N/A__td_thr_dbresume(const td_thrhandle_t *th_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's signal mask.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's "signals-pending" set.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_setsigpending = __td_thr_setsigpending
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_setsigpending(const td_thrhandle_t *th_p,
2N/A uchar_t ti_pending_flag, const sigset_t ti_pending)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * Get a thread's general register set.
2N/A */
2N/A#pragma weak td_thr_getgregs = __td_thr_getgregs
2N/Atd_err_e
2N/A__td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's general register set.
2N/A */
2N/A#pragma weak td_thr_setgregs = __td_thr_setgregs
2N/Atd_err_e
2N/A__td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get a thread's floating-point register set.
2N/A */
2N/A#pragma weak td_thr_getfpregs = __td_thr_getfpregs
2N/Atd_err_e
2N/A__td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's floating-point register set.
2N/A */
2N/A#pragma weak td_thr_setfpregs = __td_thr_setfpregs
2N/Atd_err_e
2N/A__td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get the size of the extra state register set for this architecture.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_getxregsize = __td_thr_getxregsize
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A err = ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize);
2N/A if (err != PS_OK) {
2N/A#if defined(__i386) || defined(__amd64)
2N/A if (err == PS_NOXREGS) {
2N/A return_val = TD_NOXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A#else
2N/A return_val = TD_DBERR;
2N/A#endif
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get a thread's extra state register set.
2N/A */
2N/A#pragma weak td_thr_getxregs = __td_thr_getxregs
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
2N/A if (err != PS_OK) {
2N/A#if defined(__i386) || defined(__amd64)
2N/A if (err == PS_NOXREGS) {
2N/A return_val = TD_NOXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A#else
2N/A return_val = TD_DBERR;
2N/A#endif
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's extra state register set.
2N/A */
2N/A#pragma weak td_thr_setxregs = __td_thr_setxregs
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A err = ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
2N/A if (err != PS_OK) {
2N/A#if defined(__i386) || defined(__amd64)
2N/A if (err == PS_NOXREGS) {
2N/A return_val = TD_NOXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A#else
2N/A return_val = TD_DBERR;
2N/A#endif
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get the size of the extra state register set for this processor.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_getcxregsize = __td_thr_getcxregsize
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_getcxregsize(td_thrhandle_t *th_p, int *cxregsize)
2N/A{
2N/A td_err_e return_val;
2N/A
2N/A#if defined(__sparc)
2N/A struct ps_prochandle *ph_p;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * ps_lgetcxregsize might not be defined if this is an older client.
2N/A * Make it a weak symbol and test if it exists before calling.
2N/A */
2N/A#pragma weak ps_lgetcxregsize
2N/A if (ps_lgetcxregsize == NULL) {
2N/A err = PS_NOCXREGS;
2N/A } else {
2N/A err = ps_lgetcxregsize(ph_p, thr_to_lwpid(th_p), cxregsize);
2N/A }
2N/A if (err != PS_OK) {
2N/A if (err == PS_NOCXREGS) {
2N/A return_val = TD_NOCXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A#else
2N/A return_val = TD_NOCXREGS;
2N/A#endif
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get a thread's extra state register set.
2N/A */
2N/A#pragma weak td_thr_getcxregs = __td_thr_getcxregs
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_getcxregs(td_thrhandle_t *th_p, void *cxregset)
2N/A{
2N/A td_err_e return_val;
2N/A
2N/A#if defined(__sparc)
2N/A struct ps_prochandle *ph_p;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * ps_lgetcxregs might not be defined if this is an older client.
2N/A * Make it a weak symbol and test if it exists before calling.
2N/A */
2N/A#pragma weak ps_lgetcxregs
2N/A if (ps_lgetcxregs == NULL) {
2N/A err = PS_NOCXREGS;
2N/A } else {
2N/A err = ps_lgetcxregs(ph_p, thr_to_lwpid(th_p),
2N/A (caddr_t)cxregset);
2N/A }
2N/A if (err != PS_OK) {
2N/A if (err == PS_NOCXREGS) {
2N/A return_val = TD_NOCXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A#else
2N/A return_val = TD_NOCXREGS;
2N/A#endif
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Set a thread's extra state register set.
2N/A */
2N/A#pragma weak td_thr_setcxregs = __td_thr_setcxregs
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_setcxregs(td_thrhandle_t *th_p, const void *cxregset)
2N/A{
2N/A td_err_e return_val;
2N/A
2N/A#if defined(__sparc)
2N/A struct ps_prochandle *ph_p;
2N/A ps_err_e err;
2N/A
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * ps_lsetcxregs might not be defined if this is an older client.
2N/A * Make it a weak symbol and test if it exists before calling.
2N/A */
2N/A#pragma weak ps_lsetcxregs
2N/A if (ps_lsetcxregs == NULL) {
2N/A err = PS_NOCXREGS;
2N/A } else {
2N/A err = ps_lsetcxregs(ph_p, thr_to_lwpid(th_p),
2N/A (caddr_t)cxregset);
2N/A }
2N/A if (err != PS_OK) {
2N/A if (err == PS_NOCXREGS) {
2N/A return_val = TD_NOCXREGS;
2N/A } else {
2N/A return_val = TD_DBERR;
2N/A }
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(th_p->th_ta_p);
2N/A#else
2N/A return_val = TD_NOCXREGS;
2N/A#endif
2N/A return (return_val);
2N/A}
2N/A
2N/Astruct searcher {
2N/A psaddr_t addr;
2N/A int status;
2N/A};
2N/A
2N/A/*
2N/A * Check the struct thread address in *th_p again first
2N/A * value in "data". If value in data is found, set second value
2N/A * in "data" to 1 and return 1 to terminate iterations.
2N/A * This function is used by td_thr_validate() to verify that
2N/A * a thread handle is valid.
2N/A */
2N/Astatic int
2N/Atd_searcher(const td_thrhandle_t *th_p, void *data)
2N/A{
2N/A struct searcher *searcher_data = (struct searcher *)data;
2N/A
2N/A if (searcher_data->addr == th_p->th_unique) {
2N/A searcher_data->status = 1;
2N/A return (1);
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * Validate the thread handle. Check that
2N/A * a thread exists in the thread agent/process that
2N/A * corresponds to thread with handle *th_p.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_validate = __td_thr_validate
2N/Atd_err_e
2N/A__td_thr_validate(const td_thrhandle_t *th_p)
2N/A{
2N/A td_err_e return_val;
2N/A struct searcher searcher_data = {0, 0};
2N/A
2N/A if (th_p == NULL)
2N/A return (TD_BADTH);
2N/A if (th_p->th_unique == NULL || th_p->th_ta_p == NULL)
2N/A return (TD_BADTH);
2N/A
2N/A /*
2N/A * LOCKING EXCEPTION - Locking is not required
2N/A * here because no use of the thread agent is made (other
2N/A * than the sanity check) and checking of the thread
2N/A * agent will be done in __td_ta_thr_iter.
2N/A */
2N/A
2N/A searcher_data.addr = th_p->th_unique;
2N/A return_val = __td_ta_thr_iter(th_p->th_ta_p,
2N/A td_searcher, &searcher_data,
2N/A TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2N/A TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2N/A
2N/A if (return_val == TD_OK && searcher_data.status == 0)
2N/A return_val = TD_NOTHR;
2N/A
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get a thread's private binding to a given thread specific
2N/A * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2N/A * have a binding for a particular key, then NULL is returned.
2N/A */
2N/A#pragma weak td_thr_tsd = __td_thr_tsd
2N/Atd_err_e
2N/A__td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_thragent_t *ta_p;
2N/A td_err_e return_val;
2N/A int maxkey;
2N/A int nkey;
2N/A psaddr_t tsd_paddr;
2N/A
2N/A if (data_pp == NULL)
2N/A return (TD_ERR);
2N/A *data_pp = NULL;
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A ta_p = th_p->th_ta_p;
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A tsd_metadata_t tsdm;
2N/A tsd_t stsd;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2N/A &tsdm, sizeof (tsdm)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2N/A &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (tsd_paddr != NULL &&
2N/A ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A maxkey = tsdm.tsdm_nused;
2N/A nkey = tsd_paddr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2N/A
2N/A if (key < TSD_NFAST)
2N/A tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2N/A }
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A tsd_metadata32_t tsdm;
2N/A tsd32_t stsd;
2N/A caddr32_t addr;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2N/A &tsdm, sizeof (tsdm)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2N/A &addr, sizeof (addr)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (addr != NULL &&
2N/A ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else {
2N/A maxkey = tsdm.tsdm_nused;
2N/A nkey = addr == NULL ? TSD_NFAST : stsd.tsd_nalloc;
2N/A
2N/A if (key < TSD_NFAST) {
2N/A tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2N/A } else {
2N/A tsd_paddr = addr;
2N/A }
2N/A }
2N/A#else
2N/A return_val = TD_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (return_val == TD_OK && (key < 1 || key >= maxkey))
2N/A return_val = TD_NOTSD;
2N/A if (return_val != TD_OK || key >= nkey) {
2N/A /* NULL has already been stored in data_pp */
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A }
2N/A
2N/A /*
2N/A * Read the value from the thread's tsd array.
2N/A */
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A void *value;
2N/A
2N/A if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2N/A &value, sizeof (value)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else
2N/A *data_pp = value;
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A } else {
2N/A caddr32_t value32;
2N/A
2N/A if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2N/A &value32, sizeof (value32)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else
2N/A *data_pp = (void *)(uintptr_t)value32;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Get the base address of a thread's thread local storage (TLS) block
2N/A * for the module (executable or shared object) identified by 'moduleid'.
2N/A */
2N/A#pragma weak td_thr_tlsbase = __td_thr_tlsbase
2N/Atd_err_e
2N/A__td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_thragent_t *ta_p;
2N/A td_err_e return_val;
2N/A
2N/A if (base == NULL)
2N/A return (TD_ERR);
2N/A *base = NULL;
2N/A if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A ta_p = th_p->th_ta_p;
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A tls_metadata_t tls_metadata;
2N/A TLS_modinfo tlsmod;
2N/A tls_t tls;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2N/A &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2N/A return_val = TD_NOTLS;
2N/A else if (ps_pdread(ph_p,
2N/A (psaddr_t)((TLS_modinfo *)
2N/A tls_metadata.tls_modinfo.tls_data + moduleid),
2N/A &tlsmod, sizeof (tlsmod)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (tlsmod.tm_memsz == 0)
2N/A return_val = TD_NOTLS;
2N/A else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2N/A *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2N/A else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2N/A &tls, sizeof (tls)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (moduleid >= tls.tls_size)
2N/A return_val = TD_TLSDEFER;
2N/A else if (ps_pdread(ph_p,
2N/A (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2N/A &tls, sizeof (tls)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (tls.tls_size == 0)
2N/A return_val = TD_TLSDEFER;
2N/A else
2N/A *base = (psaddr_t)tls.tls_data;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A tls_metadata32_t tls_metadata;
2N/A TLS_modinfo32 tlsmod;
2N/A tls32_t tls;
2N/A
2N/A if (ps_pdread(ph_p,
2N/A ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2N/A &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2N/A return_val = TD_NOTLS;
2N/A else if (ps_pdread(ph_p,
2N/A (psaddr_t)((TLS_modinfo32 *)
2N/A (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2N/A &tlsmod, sizeof (tlsmod)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (tlsmod.tm_memsz == 0)
2N/A return_val = TD_NOTLS;
2N/A else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2N/A *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2N/A else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2N/A &tls, sizeof (tls)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (moduleid >= tls.tls_size)
2N/A return_val = TD_TLSDEFER;
2N/A else if (ps_pdread(ph_p,
2N/A (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2N/A &tls, sizeof (tls)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A else if (tls.tls_size == 0)
2N/A return_val = TD_TLSDEFER;
2N/A else
2N/A *base = (psaddr_t)tls.tls_data;
2N/A#else
2N/A return_val = TD_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Change a thread's priority to the value specified by ti_pri.
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_thr_setprio = __td_thr_setprio
2N/A/* ARGSUSED */
2N/Atd_err_e
2N/A__td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2N/A{
2N/A return (TD_NOCAPAB);
2N/A}
2N/A
2N/A/*
2N/A * This structure links td_thr_lockowner and the lowner_cb callback function.
2N/A */
2N/Atypedef struct {
2N/A td_sync_iter_f *owner_cb;
2N/A void *owner_cb_arg;
2N/A td_thrhandle_t *th_p;
2N/A} lowner_cb_ctl_t;
2N/A
2N/Astatic int
2N/Alowner_cb(const td_synchandle_t *sh_p, void *arg)
2N/A{
2N/A lowner_cb_ctl_t *ocb = arg;
2N/A int trunc = 0;
2N/A union {
2N/A rwlock_t rwl;
2N/A mutex_t mx;
2N/A } rw_m;
2N/A
2N/A if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2N/A &rw_m, sizeof (rw_m)) != PS_OK) {
2N/A trunc = 1;
2N/A if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2N/A &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2N/A return (0);
2N/A }
2N/A if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2N/A rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2N/A return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2N/A if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2N/A mutex_t *rwlock = &rw_m.rwl.mutex;
2N/A if (rwlock->mutex_owner == ocb->th_p->th_unique)
2N/A return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * Iterate over the set of locks owned by a specified thread.
2N/A * If cb returns a non-zero value, terminate iterations.
2N/A */
2N/A#pragma weak td_thr_lockowner = __td_thr_lockowner
2N/Atd_err_e
2N/A__td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2N/A void *cb_data)
2N/A{
2N/A td_thragent_t *ta_p;
2N/A td_err_e return_val;
2N/A lowner_cb_ctl_t lcb;
2N/A
2N/A /*
2N/A * Just sanity checks.
2N/A */
2N/A if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2N/A return (return_val);
2N/A ta_p = th_p->th_ta_p;
2N/A ph_unlock(ta_p);
2N/A
2N/A lcb.owner_cb = cb;
2N/A lcb.owner_cb_arg = cb_data;
2N/A lcb.th_p = (td_thrhandle_t *)th_p;
2N/A return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2N/A}
2N/A
2N/A/*
2N/A * If a thread is asleep on a synchronization variable,
2N/A * then get the synchronization handle.
2N/A */
2N/A#pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2N/Atd_err_e
2N/A__td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val = TD_OK;
2N/A uintptr_t wchan;
2N/A
2N/A if (sh_p == NULL)
2N/A return (TD_ERR);
2N/A if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A
2N/A /*
2N/A * No need to stop the process for a simple read.
2N/A */
2N/A if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2N/A &wchan, sizeof (wchan)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A caddr32_t wchan32;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2N/A &wchan32, sizeof (wchan32)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A wchan = wchan32;
2N/A#else
2N/A return_val = TD_ERR;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (return_val != TD_OK || wchan == NULL) {
2N/A sh_p->sh_ta_p = NULL;
2N/A sh_p->sh_unique = NULL;
2N/A if (return_val == TD_OK)
2N/A return_val = TD_ERR;
2N/A } else {
2N/A sh_p->sh_ta_p = th_p->th_ta_p;
2N/A sh_p->sh_unique = (psaddr_t)wchan;
2N/A }
2N/A
2N/A ph_unlock(th_p->th_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Which thread is running on an lwp?
2N/A */
2N/A#pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2N/Atd_err_e
2N/A__td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2N/A td_thrhandle_t *th_p)
2N/A{
2N/A return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2N/A}
2N/A
2N/A/*
2N/A * Common code for td_sync_get_info() and td_sync_get_stats()
2N/A */
2N/Astatic td_err_e
2N/Async_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2N/A td_syncinfo_t *si_p)
2N/A{
2N/A int trunc = 0;
2N/A td_so_un_t generic_so;
2N/A
2N/A /*
2N/A * Determine the sync. object type; a little type fudgery here.
2N/A * First attempt to read the whole union. If that fails, attempt
2N/A * to read just the condvar. A condvar is the smallest sync. object.
2N/A */
2N/A if (ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so, sizeof (generic_so)) != PS_OK) {
2N/A trunc = 1;
2N/A if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2N/A sizeof (generic_so.condition)) != PS_OK)
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A switch (generic_so.condition.cond_magic) {
2N/A case MUTEX_MAGIC:
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2N/A return (TD_DBERR);
2N/A si_p->si_type = TD_SYNC_MUTEX;
2N/A si_p->si_shared_type =
2N/A (generic_so.lock.mutex_type & USYNC_PROCESS);
2N/A (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2N/A sizeof (generic_so.lock.mutex_flag));
2N/A si_p->si_state.mutex_locked =
2N/A (generic_so.lock.mutex_lockw != 0);
2N/A si_p->si_size = sizeof (generic_so.lock);
2N/A si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2N/A si_p->si_rcount = generic_so.lock.mutex_rcount;
2N/A si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2N/A if (si_p->si_state.mutex_locked) {
2N/A if (si_p->si_shared_type & USYNC_PROCESS)
2N/A si_p->si_ownerpid =
2N/A generic_so.lock.mutex_ownerpid;
2N/A si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2N/A si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2N/A }
2N/A break;
2N/A case COND_MAGIC:
2N/A si_p->si_type = TD_SYNC_COND;
2N/A si_p->si_shared_type =
2N/A (generic_so.condition.cond_type & USYNC_PROCESS);
2N/A (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2N/A sizeof (generic_so.condition.flags.flag));
2N/A si_p->si_size = sizeof (generic_so.condition);
2N/A si_p->si_has_waiters =
2N/A (generic_so.condition.cond_waiters_user |
2N/A generic_so.condition.cond_waiters_kernel)? 1 : 0;
2N/A break;
2N/A case SEMA_MAGIC:
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.semaphore, sizeof (generic_so.semaphore))
2N/A != PS_OK)
2N/A return (TD_DBERR);
2N/A si_p->si_type = TD_SYNC_SEMA;
2N/A si_p->si_shared_type =
2N/A (generic_so.semaphore.type & USYNC_PROCESS);
2N/A si_p->si_state.sem_count = generic_so.semaphore.count;
2N/A si_p->si_size = sizeof (generic_so.semaphore);
2N/A si_p->si_has_waiters =
2N/A ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2N/A /* this is useless but the old interface provided it */
2N/A si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2N/A break;
2N/A case RWL_MAGIC:
2N/A {
2N/A uint32_t rwstate;
2N/A
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2N/A return (TD_DBERR);
2N/A si_p->si_type = TD_SYNC_RWLOCK;
2N/A si_p->si_shared_type =
2N/A (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2N/A si_p->si_size = sizeof (generic_so.rwlock);
2N/A
2N/A rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2N/A if (rwstate & URW_WRITE_LOCKED) {
2N/A si_p->si_state.nreaders = -1;
2N/A si_p->si_is_wlock = 1;
2N/A si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2N/A si_p->si_owner.th_unique =
2N/A generic_so.rwlock.rwlock_owner;
2N/A if (si_p->si_shared_type & USYNC_PROCESS)
2N/A si_p->si_ownerpid =
2N/A generic_so.rwlock.rwlock_ownerpid;
2N/A } else {
2N/A si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2N/A }
2N/A si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2N/A
2N/A /* this is useless but the old interface provided it */
2N/A si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2N/A break;
2N/A }
2N/A default:
2N/A return (TD_BADSH);
2N/A }
2N/A
2N/A si_p->si_ta_p = sh_p->sh_ta_p;
2N/A si_p->si_sv_addr = sh_p->sh_unique;
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Given a synchronization handle, fill in the
2N/A * information for the synchronization variable into *si_p.
2N/A */
2N/A#pragma weak td_sync_get_info = __td_sync_get_info
2N/Atd_err_e
2N/A__td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_err_e return_val;
2N/A
2N/A if (si_p == NULL)
2N/A return (TD_ERR);
2N/A (void) memset(si_p, 0, sizeof (*si_p));
2N/A if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A return_val = sync_get_info_common(sh_p, ph_p, si_p);
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/Astatic uint_t
2N/Atdb_addr_hash64(uint64_t addr)
2N/A{
2N/A uint64_t value60 = (addr >> 4);
2N/A uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2N/A return ((value30 >> 15) ^ (value30 & 0x7fff));
2N/A}
2N/A
2N/Astatic uint_t
2N/Atdb_addr_hash32(uint64_t addr)
2N/A{
2N/A uint32_t value30 = (addr >> 2); /* 30 bits */
2N/A return ((value30 >> 15) ^ (value30 & 0x7fff));
2N/A}
2N/A
2N/Astatic td_err_e
2N/Aread_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2N/A psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2N/A{
2N/A psaddr_t next_desc;
2N/A uint64_t first;
2N/A uint_t ix;
2N/A
2N/A /*
2N/A * Compute the hash table index from the synch object's address.
2N/A */
2N/A if (ta_p->model == PR_MODEL_LP64)
2N/A ix = tdb_addr_hash64(sync_obj_addr);
2N/A else
2N/A ix = tdb_addr_hash32(sync_obj_addr);
2N/A
2N/A /*
2N/A * Get the address of the first element in the linked list.
2N/A */
2N/A if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2N/A &first, sizeof (first)) != PS_OK)
2N/A return (TD_DBERR);
2N/A
2N/A /*
2N/A * Search the linked list for an entry for the synch object..
2N/A */
2N/A for (next_desc = (psaddr_t)first; next_desc != NULL;
2N/A next_desc = (psaddr_t)sync_stats->next) {
2N/A if (ps_pdread(ta_p->ph_p, next_desc,
2N/A sync_stats, sizeof (*sync_stats)) != PS_OK)
2N/A return (TD_DBERR);
2N/A if (sync_stats->sync_addr == sync_obj_addr)
2N/A return (TD_OK);
2N/A }
2N/A
2N/A (void) memset(sync_stats, 0, sizeof (*sync_stats));
2N/A return (TD_OK);
2N/A}
2N/A
2N/A/*
2N/A * Given a synchronization handle, fill in the
2N/A * statistics for the synchronization variable into *ss_p.
2N/A */
2N/A#pragma weak td_sync_get_stats = __td_sync_get_stats
2N/Atd_err_e
2N/A__td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A td_thragent_t *ta_p;
2N/A td_err_e return_val;
2N/A register_sync_t enable;
2N/A psaddr_t hashaddr;
2N/A tdb_sync_stats_t sync_stats;
2N/A size_t ix;
2N/A
2N/A if (ss_p == NULL)
2N/A return (TD_ERR);
2N/A (void) memset(ss_p, 0, sizeof (*ss_p));
2N/A if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A ta_p = sh_p->sh_ta_p;
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2N/A != TD_OK) {
2N/A if (return_val != TD_BADSH)
2N/A goto out;
2N/A /* we can correct TD_BADSH */
2N/A (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2N/A ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2N/A ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2N/A /* we correct si_type and si_size below */
2N/A return_val = TD_OK;
2N/A }
2N/A if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2N/A &enable, sizeof (enable)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A if (enable != REGISTER_SYNC_ON)
2N/A goto out;
2N/A
2N/A /*
2N/A * Get the address of the hash table in the target process.
2N/A */
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr +
2N/A offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2N/A &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A caddr32_t addr;
2N/A
2N/A if (ps_pdread(ph_p, ta_p->uberdata_addr +
2N/A offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2N/A &addr, sizeof (addr)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A goto out;
2N/A }
2N/A hashaddr = addr;
2N/A#else
2N/A return_val = TD_ERR;
2N/A goto out;
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (hashaddr == 0)
2N/A return_val = TD_BADSH;
2N/A else
2N/A return_val = read_sync_stats(ta_p, hashaddr,
2N/A sh_p->sh_unique, &sync_stats);
2N/A if (return_val != TD_OK)
2N/A goto out;
2N/A
2N/A /*
2N/A * We have the hash table entry. Transfer the data to
2N/A * the td_syncstats_t structure provided by the caller.
2N/A */
2N/A switch (sync_stats.un.type) {
2N/A case TDB_MUTEX:
2N/A {
2N/A td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2N/A
2N/A ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2N/A ss_p->ss_info.si_size = sizeof (mutex_t);
2N/A msp->mutex_lock =
2N/A sync_stats.un.mutex.mutex_lock;
2N/A msp->mutex_sleep =
2N/A sync_stats.un.mutex.mutex_sleep;
2N/A msp->mutex_sleep_time =
2N/A sync_stats.un.mutex.mutex_sleep_time;
2N/A msp->mutex_hold_time =
2N/A sync_stats.un.mutex.mutex_hold_time;
2N/A msp->mutex_try =
2N/A sync_stats.un.mutex.mutex_try;
2N/A msp->mutex_try_fail =
2N/A sync_stats.un.mutex.mutex_try_fail;
2N/A if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2N/A (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2N/A < ta_p->hash_size * sizeof (thr_hash_table_t))
2N/A msp->mutex_internal =
2N/A ix / sizeof (thr_hash_table_t) + 1;
2N/A break;
2N/A }
2N/A case TDB_COND:
2N/A {
2N/A td_cond_stats_t *csp = &ss_p->ss_un.cond;
2N/A
2N/A ss_p->ss_info.si_type = TD_SYNC_COND;
2N/A ss_p->ss_info.si_size = sizeof (cond_t);
2N/A csp->cond_wait =
2N/A sync_stats.un.cond.cond_wait;
2N/A csp->cond_timedwait =
2N/A sync_stats.un.cond.cond_timedwait;
2N/A csp->cond_wait_sleep_time =
2N/A sync_stats.un.cond.cond_wait_sleep_time;
2N/A csp->cond_timedwait_sleep_time =
2N/A sync_stats.un.cond.cond_timedwait_sleep_time;
2N/A csp->cond_timedwait_timeout =
2N/A sync_stats.un.cond.cond_timedwait_timeout;
2N/A csp->cond_signal =
2N/A sync_stats.un.cond.cond_signal;
2N/A csp->cond_broadcast =
2N/A sync_stats.un.cond.cond_broadcast;
2N/A if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2N/A (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2N/A < ta_p->hash_size * sizeof (thr_hash_table_t))
2N/A csp->cond_internal =
2N/A ix / sizeof (thr_hash_table_t) + 1;
2N/A break;
2N/A }
2N/A case TDB_RWLOCK:
2N/A {
2N/A td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2N/A
2N/A ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2N/A ss_p->ss_info.si_size = sizeof (rwlock_t);
2N/A rwsp->rw_rdlock =
2N/A sync_stats.un.rwlock.rw_rdlock;
2N/A rwsp->rw_rdlock_try =
2N/A sync_stats.un.rwlock.rw_rdlock_try;
2N/A rwsp->rw_rdlock_try_fail =
2N/A sync_stats.un.rwlock.rw_rdlock_try_fail;
2N/A rwsp->rw_wrlock =
2N/A sync_stats.un.rwlock.rw_wrlock;
2N/A rwsp->rw_wrlock_hold_time =
2N/A sync_stats.un.rwlock.rw_wrlock_hold_time;
2N/A rwsp->rw_wrlock_try =
2N/A sync_stats.un.rwlock.rw_wrlock_try;
2N/A rwsp->rw_wrlock_try_fail =
2N/A sync_stats.un.rwlock.rw_wrlock_try_fail;
2N/A break;
2N/A }
2N/A case TDB_SEMA:
2N/A {
2N/A td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2N/A
2N/A ss_p->ss_info.si_type = TD_SYNC_SEMA;
2N/A ss_p->ss_info.si_size = sizeof (sema_t);
2N/A ssp->sema_wait =
2N/A sync_stats.un.sema.sema_wait;
2N/A ssp->sema_wait_sleep =
2N/A sync_stats.un.sema.sema_wait_sleep;
2N/A ssp->sema_wait_sleep_time =
2N/A sync_stats.un.sema.sema_wait_sleep_time;
2N/A ssp->sema_trywait =
2N/A sync_stats.un.sema.sema_trywait;
2N/A ssp->sema_trywait_fail =
2N/A sync_stats.un.sema.sema_trywait_fail;
2N/A ssp->sema_post =
2N/A sync_stats.un.sema.sema_post;
2N/A ssp->sema_max_count =
2N/A sync_stats.un.sema.sema_max_count;
2N/A ssp->sema_min_count =
2N/A sync_stats.un.sema.sema_min_count;
2N/A break;
2N/A }
2N/A default:
2N/A return_val = TD_BADSH;
2N/A break;
2N/A }
2N/A
2N/Aout:
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/A/*
2N/A * Change the state of a synchronization variable.
2N/A * 1) mutex lock state set to value
2N/A * 2) semaphore's count set to value
2N/A * 3) writer's lock set by value < 0
2N/A * 4) reader's lock number of readers set to value >= 0
2N/A * Currently unused by dbx.
2N/A */
2N/A#pragma weak td_sync_setstate = __td_sync_setstate
2N/Atd_err_e
2N/A__td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A int trunc = 0;
2N/A td_err_e return_val;
2N/A td_so_un_t generic_so;
2N/A uint32_t *rwstate;
2N/A int value = (int)lvalue;
2N/A
2N/A if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pstop(ph_p) != PS_OK) {
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A
2N/A /*
2N/A * Read the synch. variable information.
2N/A * First attempt to read the whole union and if that fails
2N/A * fall back to reading only the smallest member, the condvar.
2N/A */
2N/A if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2N/A sizeof (generic_so)) != PS_OK) {
2N/A trunc = 1;
2N/A if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2N/A sizeof (generic_so.condition)) != PS_OK) {
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * Set the new value in the sync. variable, read the synch. variable
2N/A * information. from the process, reset its value and write it back.
2N/A */
2N/A switch (generic_so.condition.mutex_magic) {
2N/A case MUTEX_MAGIC:
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A break;
2N/A }
2N/A generic_so.lock.mutex_lockw = (uint8_t)value;
2N/A if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2N/A sizeof (generic_so.lock)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A break;
2N/A case SEMA_MAGIC:
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.semaphore, sizeof (generic_so.semaphore))
2N/A != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A break;
2N/A }
2N/A generic_so.semaphore.count = value;
2N/A if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2N/A sizeof (generic_so.semaphore)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A break;
2N/A case COND_MAGIC:
2N/A /* Operation not supported on a condition variable */
2N/A return_val = TD_ERR;
2N/A break;
2N/A case RWL_MAGIC:
2N/A if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2N/A &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2N/A return_val = TD_DBERR;
2N/A break;
2N/A }
2N/A rwstate = (uint32_t *)&generic_so.rwlock.readers;
2N/A *rwstate &= URW_HAS_WAITERS;
2N/A if (value < 0)
2N/A *rwstate |= URW_WRITE_LOCKED;
2N/A else
2N/A *rwstate |= (value & URW_READERS_MASK);
2N/A if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2N/A sizeof (generic_so.rwlock)) != PS_OK)
2N/A return_val = TD_DBERR;
2N/A break;
2N/A default:
2N/A /* Bad sync. object type */
2N/A return_val = TD_BADSH;
2N/A break;
2N/A }
2N/A
2N/A (void) ps_pcontinue(ph_p);
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (return_val);
2N/A}
2N/A
2N/Atypedef struct {
2N/A td_thr_iter_f *waiter_cb;
2N/A psaddr_t sync_obj_addr;
2N/A uint16_t sync_magic;
2N/A void *waiter_cb_arg;
2N/A td_err_e errcode;
2N/A} waiter_cb_ctl_t;
2N/A
2N/Astatic int
2N/Awaiters_cb(const td_thrhandle_t *th_p, void *arg)
2N/A{
2N/A td_thragent_t *ta_p = th_p->th_ta_p;
2N/A struct ps_prochandle *ph_p = ta_p->ph_p;
2N/A waiter_cb_ctl_t *wcb = arg;
2N/A caddr_t wchan;
2N/A
2N/A if (ta_p->model == PR_MODEL_NATIVE) {
2N/A ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2N/A &wchan, sizeof (wchan)) != PS_OK) {
2N/A wcb->errcode = TD_DBERR;
2N/A return (1);
2N/A }
2N/A } else {
2N/A#if defined(_LP64) && defined(_SYSCALL32)
2N/A ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2N/A caddr32_t wchan32;
2N/A
2N/A if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2N/A &wchan32, sizeof (wchan32)) != PS_OK) {
2N/A wcb->errcode = TD_DBERR;
2N/A return (1);
2N/A }
2N/A wchan = (caddr_t)(uintptr_t)wchan32;
2N/A#else
2N/A wcb->errcode = TD_ERR;
2N/A return (1);
2N/A#endif /* _SYSCALL32 */
2N/A }
2N/A
2N/A if (wchan == NULL)
2N/A return (0);
2N/A
2N/A if (wchan == (caddr_t)wcb->sync_obj_addr)
2N/A return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
2N/A
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * For a given synchronization variable, iterate over the
2N/A * set of waiting threads. The call back function is passed
2N/A * two parameters, a pointer to a thread handle and a pointer
2N/A * to extra call back data.
2N/A */
2N/A#pragma weak td_sync_waiters = __td_sync_waiters
2N/Atd_err_e
2N/A__td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
2N/A{
2N/A struct ps_prochandle *ph_p;
2N/A waiter_cb_ctl_t wcb;
2N/A td_err_e return_val;
2N/A
2N/A if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2N/A return (return_val);
2N/A if (ps_pdread(ph_p,
2N/A (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
2N/A (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A return (TD_DBERR);
2N/A }
2N/A ph_unlock(sh_p->sh_ta_p);
2N/A
2N/A switch (wcb.sync_magic) {
2N/A case MUTEX_MAGIC:
2N/A case COND_MAGIC:
2N/A case SEMA_MAGIC:
2N/A case RWL_MAGIC:
2N/A break;
2N/A default:
2N/A return (TD_BADSH);
2N/A }
2N/A
2N/A wcb.waiter_cb = cb;
2N/A wcb.sync_obj_addr = sh_p->sh_unique;
2N/A wcb.waiter_cb_arg = cb_data;
2N/A wcb.errcode = TD_OK;
2N/A return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
2N/A TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
2N/A TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2N/A
2N/A if (return_val != TD_OK)
2N/A return (return_val);
2N/A
2N/A return (wcb.errcode);
2N/A}