#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
#include <thr_uberdata.h>
#include <thread_db.h>
#include <libc_int.h>
typedef union {
mutex_t lock;
rwlock_t rwlock;
sema_t semaphore;
cond_t condition;
} td_so_un_t;
struct td_thragent {
rwlock_t rwlock;
struct ps_prochandle *ph_p;
int initialized;
int sync_tracking;
int model;
int primary_map;
psaddr_t bootstrap_addr;
psaddr_t uberdata_addr;
psaddr_t tdb_eventmask_addr;
psaddr_t tdb_register_sync_addr;
psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
psaddr_t hash_table_addr;
int hash_size;
lwpid_t single_lwpid;
psaddr_t single_ulwp_addr;
};
#define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
#define TD_UBERDATA_NAME "_uberdata"
#define TD_LIBRARY_NAME "libc.so"
#define TD_LIBRARY_NAME_1 "libc.so.1"
td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
void *cbdata_p, td_thr_state_e state, int ti_pri,
sigset_t *ti_sigmask_p, unsigned ti_user_flags);
#pragma weak td_init = __td_init
td_err_e
__td_init()
{
return (TD_OK);
}
#pragma weak td_log = __td_log
void
__td_log()
{
}
static uint_t
td_read_hash_size(td_thragent_t *ta_p)
{
psaddr_t addr;
uint_t hash_size;
switch (ta_p->initialized) {
default:
return (0);
case 1:
break;
case 2:
return (ta_p->hash_size);
}
if (ta_p->model == PR_MODEL_NATIVE) {
addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
} else {
#if defined(_LP64) && defined(_SYSCALL32)
addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
#else
addr = 0;
#endif
}
if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
!= PS_OK)
return (0);
return (hash_size);
}
static td_err_e
td_read_uberdata(td_thragent_t *ta_p)
{
struct ps_prochandle *ph_p = ta_p->ph_p;
int i;
if (ta_p->model == PR_MODEL_NATIVE) {
uberdata_t uberdata;
if (ps_pdread(ph_p, ta_p->uberdata_addr,
&uberdata, sizeof (uberdata)) != PS_OK)
return (TD_DBERR);
ta_p->primary_map = uberdata.primary_map;
ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
offsetof(uberdata_t, tdb.tdb_ev_global_mask);
ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
ta_p->hash_size = uberdata.hash_size;
if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
return (TD_DBERR);
} else {
#if defined(_LP64) && defined(_SYSCALL32)
uberdata32_t uberdata;
caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
if (ps_pdread(ph_p, ta_p->uberdata_addr,
&uberdata, sizeof (uberdata)) != PS_OK)
return (TD_DBERR);
ta_p->primary_map = uberdata.primary_map;
ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
ta_p->hash_size = uberdata.hash_size;
if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
tdb_events, sizeof (tdb_events)) != PS_OK)
return (TD_DBERR);
for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
ta_p->tdb_events[i] = tdb_events[i];
#else
return (TD_DBERR);
#endif
}
for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
uint8_t check;
if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
&check, sizeof (check)) != PS_OK) {
return (TD_DBERR);
}
}
if (ta_p->hash_size != 1) {
ta_p->initialized = 2;
ta_p->single_lwpid = 0;
ta_p->single_ulwp_addr = 0;
} else {
ta_p->initialized = 1;
if (ta_p->model == PR_MODEL_NATIVE) {
thr_hash_table_t head;
lwpid_t lwpid = 0;
if (ps_pdread(ph_p, ta_p->hash_table_addr,
&head, sizeof (head)) != PS_OK)
return (TD_DBERR);
if ((psaddr_t)head.hash_bucket == 0)
ta_p->initialized = 0;
else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
offsetof(ulwp_t, ul_lwpid),
&lwpid, sizeof (lwpid)) != PS_OK)
return (TD_DBERR);
ta_p->single_lwpid = lwpid;
ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
thr_hash_table32_t head;
lwpid_t lwpid = 0;
if (ps_pdread(ph_p, ta_p->hash_table_addr,
&head, sizeof (head)) != PS_OK)
return (TD_DBERR);
if ((psaddr_t)head.hash_bucket == 0)
ta_p->initialized = 0;
else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
offsetof(ulwp32_t, ul_lwpid),
&lwpid, sizeof (lwpid)) != PS_OK)
return (TD_DBERR);
ta_p->single_lwpid = lwpid;
ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
#else
return (TD_DBERR);
#endif
}
}
if (!ta_p->primary_map)
ta_p->initialized = 0;
return (TD_OK);
}
static td_err_e
td_read_bootstrap_data(td_thragent_t *ta_p)
{
struct ps_prochandle *ph_p = ta_p->ph_p;
psaddr_t bootstrap_addr;
psaddr_t uberdata_addr;
ps_err_e db_return;
td_err_e return_val;
int do_1;
switch (ta_p->initialized) {
case 2:
return (TD_OK);
case 1:
if (td_read_hash_size(ta_p) == 1)
return (TD_OK);
return (td_read_uberdata(ta_p));
}
do_1 = 0;
ta_p->initialized = -1;
db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
TD_BOOTSTRAP_NAME, &bootstrap_addr);
if (db_return == PS_NOSYM) {
do_1 = 1;
db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
TD_BOOTSTRAP_NAME, &bootstrap_addr);
}
if (db_return == PS_NOSYM)
return (TD_NOLIBTHREAD);
if (db_return != PS_OK)
return (TD_ERR);
db_return = ps_pglobal_lookup(ph_p,
do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
TD_UBERDATA_NAME, &uberdata_addr);
if (db_return == PS_NOSYM)
return (TD_NOLIBTHREAD);
if (db_return != PS_OK)
return (TD_ERR);
if (ta_p->model == PR_MODEL_NATIVE) {
psaddr_t psaddr;
if (ps_pdread(ph_p, bootstrap_addr,
&psaddr, sizeof (psaddr)) != PS_OK)
return (TD_DBERR);
if ((ta_p->bootstrap_addr = psaddr) == 0)
psaddr = uberdata_addr;
else if (ps_pdread(ph_p, psaddr,
&psaddr, sizeof (psaddr)) != PS_OK)
return (TD_DBERR);
if (psaddr == 0) {
ta_p->bootstrap_addr = 0;
psaddr = uberdata_addr;
}
ta_p->uberdata_addr = psaddr;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
caddr32_t psaddr;
if (ps_pdread(ph_p, bootstrap_addr,
&psaddr, sizeof (psaddr)) != PS_OK)
return (TD_DBERR);
if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
psaddr = (caddr32_t)uberdata_addr;
else if (ps_pdread(ph_p, (psaddr_t)psaddr,
&psaddr, sizeof (psaddr)) != PS_OK)
return (TD_DBERR);
if (psaddr == 0) {
ta_p->bootstrap_addr = 0;
psaddr = (caddr32_t)uberdata_addr;
}
ta_p->uberdata_addr = (psaddr_t)psaddr;
#else
return (TD_DBERR);
#endif
}
if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
return (return_val);
if (ta_p->bootstrap_addr == 0)
ta_p->initialized = 0;
return (TD_OK);
}
#pragma weak ps_kill
#pragma weak ps_lrolltoaddr
#pragma weak td_ta_new = __td_ta_new
td_err_e
__td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
{
td_thragent_t *ta_p;
int model;
td_err_e return_val = TD_OK;
if (ph_p == NULL)
return (TD_BADPH);
if (ta_pp == NULL)
return (TD_ERR);
*ta_pp = NULL;
if (ps_pstop(ph_p) != PS_OK)
return (TD_DBERR);
#pragma weak ps_pdmodel
if (ps_pdmodel == NULL) {
model = PR_MODEL_NATIVE;
} else if (ps_pdmodel(ph_p, &model) != PS_OK) {
(void) ps_pcontinue(ph_p);
return (TD_ERR);
}
if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
(void) ps_pcontinue(ph_p);
return (TD_MALLOC);
}
(void) memset(ta_p, 0, sizeof (*ta_p));
ta_p->ph_p = ph_p;
(void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
ta_p->model = model;
return_val = td_read_bootstrap_data(ta_p);
if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
register_sync_t oldenable;
register_sync_t enable = REGISTER_SYNC_ENABLE;
psaddr_t psaddr = ta_p->tdb_register_sync_addr;
if (ps_pdread(ph_p, psaddr,
&oldenable, sizeof (oldenable)) != PS_OK)
return_val = TD_DBERR;
else if (oldenable != REGISTER_SYNC_OFF ||
ps_pdwrite(ph_p, psaddr,
&enable, sizeof (enable)) != PS_OK) {
ta_p->sync_tracking = 1;
}
}
if (return_val == TD_OK)
*ta_pp = ta_p;
else
free(ta_p);
(void) ps_pcontinue(ph_p);
return (return_val);
}
static struct ps_prochandle *
ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
{
struct ps_prochandle *ph_p = NULL;
td_err_e error;
if (ta_p == NULL || ta_p->initialized == -1) {
*err = TD_BADTA;
} else if (rw_rdlock(&ta_p->rwlock) != 0) {
*err = TD_BADTA;
} else if ((ph_p = ta_p->ph_p) == NULL) {
(void) rw_unlock(&ta_p->rwlock);
*err = TD_BADPH;
} else if (ta_p->initialized != 2 &&
(error = td_read_bootstrap_data(ta_p)) != TD_OK) {
(void) rw_unlock(&ta_p->rwlock);
ph_p = NULL;
*err = error;
} else {
*err = TD_OK;
}
return (ph_p);
}
static struct ps_prochandle *
ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
{
if (th_p == NULL || th_p->th_unique == 0) {
*err = TD_BADTH;
return (NULL);
}
return (ph_lock_ta(th_p->th_ta_p, err));
}
static struct ps_prochandle *
ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
{
if (sh_p == NULL || sh_p->sh_unique == 0) {
*err = TD_BADSH;
return (NULL);
}
return (ph_lock_ta(sh_p->sh_ta_p, err));
}
static void
ph_unlock(td_thragent_t *ta_p)
{
(void) rw_unlock(&ta_p->rwlock);
}
#pragma weak td_ta_delete = __td_ta_delete
td_err_e
__td_ta_delete(td_thragent_t *ta_p)
{
struct ps_prochandle *ph_p;
if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
return (TD_BADTA);
if ((ph_p = ta_p->ph_p) == NULL) {
(void) rw_unlock(&ta_p->rwlock);
return (TD_BADPH);
}
if (ta_p->sync_tracking == 0 &&
ps_kill != NULL && ps_lrolltoaddr != NULL) {
register_sync_t enable = REGISTER_SYNC_DISABLE;
(void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
&enable, sizeof (enable));
}
ta_p->ph_p = NULL;
(void) rw_unlock(&ta_p->rwlock);
return (TD_OK);
}
#pragma weak td_ta_get_ph = __td_ta_get_ph
td_err_e
__td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
{
td_err_e return_val;
if (ph_pp != NULL)
*ph_pp = NULL;
if (ph_pp == NULL)
return (TD_ERR);
if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
ph_unlock(ta_p);
return (TD_OK);
}
#pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
td_err_e
__td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
{
if (ta_p == NULL)
return (TD_BADTA);
if (ta_p->ph_p == NULL)
return (TD_BADPH);
return (TD_OK);
}
#pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
td_err_e
__td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
int nthreads;
int nzombies;
psaddr_t nthreads_addr;
psaddr_t nzombies_addr;
if (ta_p->model == PR_MODEL_NATIVE) {
nthreads_addr = ta_p->uberdata_addr +
offsetof(uberdata_t, nthreads);
nzombies_addr = ta_p->uberdata_addr +
offsetof(uberdata_t, nzombies);
} else {
#if defined(_LP64) && defined(_SYSCALL32)
nthreads_addr = ta_p->uberdata_addr +
offsetof(uberdata32_t, nthreads);
nzombies_addr = ta_p->uberdata_addr +
offsetof(uberdata32_t, nzombies);
#else
nthreads_addr = 0;
nzombies_addr = 0;
#endif
}
if (nthread_p == NULL)
return (TD_ERR);
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
return_val = TD_DBERR;
if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
return_val = TD_DBERR;
ph_unlock(ta_p);
if (return_val == TD_OK)
*nthread_p = nthreads + nzombies;
return (return_val);
}
typedef struct {
thread_t tid;
int found;
td_thrhandle_t th;
} td_mapper_param_t;
static int
td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
{
td_thrinfo_t ti;
if (__td_thr_get_info(th_p, &ti) == TD_OK &&
data->tid == ti.ti_tid) {
data->found = 1;
data->th = *th_p;
return (1);
}
return (0);
}
#pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
td_err_e
__td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
td_thrhandle_t *th_p)
{
td_err_e return_val;
td_mapper_param_t data;
if (th_p != NULL &&
ta_p != NULL &&
ta_p->initialized == 1 &&
(td_read_hash_size(ta_p) == 1 ||
td_read_uberdata(ta_p) == TD_OK) &&
ta_p->initialized == 1 &&
ta_p->single_lwpid == tid) {
th_p->th_ta_p = ta_p;
if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
return (TD_NOTHR);
return (TD_OK);
}
if (ta_p == NULL)
return (TD_BADTA);
if (th_p == NULL)
return (TD_BADTH);
if (tid == 0)
return (TD_NOTHR);
data.tid = tid;
data.found = 0;
return_val = __td_ta_thr_iter(ta_p,
(td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
if (return_val == TD_OK) {
if (data.found == 0)
return_val = TD_NOTHR;
else
*th_p = data.th;
}
return (return_val);
}
#pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
td_err_e
__td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
uint16_t sync_magic;
if (sh_p == NULL)
return (TD_BADSH);
if (addr == 0)
return (TD_ERR);
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
&sync_magic, sizeof (sync_magic)) != PS_OK) {
ph_unlock(ta_p);
return (TD_BADSH);
}
ph_unlock(ta_p);
if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
return (TD_BADSH);
sh_p->sh_ta_p = (td_thragent_t *)ta_p;
sh_p->sh_unique = addr;
return (TD_OK);
}
#pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
td_err_e
__td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
int key;
int numkeys;
psaddr_t dest_addr;
psaddr_t *destructors = NULL;
PFrV destructor;
if (cb == NULL)
return (TD_ERR);
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ta_p->model == PR_MODEL_NATIVE) {
tsd_metadata_t tsdm;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
&tsdm, sizeof (tsdm)) != PS_OK)
return_val = TD_DBERR;
else {
numkeys = tsdm.tsdm_nused;
dest_addr = (psaddr_t)tsdm.tsdm_destro;
if (numkeys > 0)
destructors =
malloc(numkeys * sizeof (psaddr_t));
}
} else {
#if defined(_LP64) && defined(_SYSCALL32)
tsd_metadata32_t tsdm;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
&tsdm, sizeof (tsdm)) != PS_OK)
return_val = TD_DBERR;
else {
numkeys = tsdm.tsdm_nused;
dest_addr = (psaddr_t)tsdm.tsdm_destro;
if (numkeys > 0)
destructors =
malloc(numkeys * sizeof (caddr32_t));
}
#else
return_val = TD_DBERR;
#endif
}
if (return_val != TD_OK || numkeys <= 0) {
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
if (destructors == NULL)
return_val = TD_MALLOC;
else if (ta_p->model == PR_MODEL_NATIVE) {
if (ps_pdread(ph_p, dest_addr,
destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
return_val = TD_DBERR;
else {
for (key = 1; key < numkeys; key++) {
destructor = (PFrV)destructors[key];
if (destructor != TSD_UNALLOCATED &&
(*cb)(key, destructor, cbdata_p))
break;
}
}
#if defined(_LP64) && defined(_SYSCALL32)
} else {
caddr32_t *destructors32 = (caddr32_t *)destructors;
caddr32_t destruct32;
if (ps_pdread(ph_p, dest_addr,
destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
return_val = TD_DBERR;
else {
for (key = 1; key < numkeys; key++) {
destruct32 = destructors32[key];
if ((destruct32 !=
(caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
(*cb)(key, (PFrV)(uintptr_t)destruct32,
cbdata_p))
break;
}
}
#endif
}
if (destructors)
free(destructors);
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
int
sigequalset(const sigset_t *s1, const sigset_t *s2)
{
return (
s1->__sigbits[0] == s2->__sigbits[0] &&
s1->__sigbits[1] == s2->__sigbits[1] &&
s1->__sigbits[2] == s2->__sigbits[2] &&
s1->__sigbits[3] == s2->__sigbits[3]);
}
#pragma weak td_ta_thr_iter = __td_ta_thr_iter
td_err_e
__td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
void *cbdata_p, td_thr_state_e state, int ti_pri,
sigset_t *ti_sigmask_p, unsigned ti_user_flags)
{
struct ps_prochandle *ph_p;
psaddr_t first_lwp_addr;
psaddr_t first_zombie_addr;
psaddr_t curr_lwp_addr;
psaddr_t next_lwp_addr;
td_thrhandle_t th;
ps_err_e db_return;
ps_err_e db_return2;
td_err_e return_val;
if (cb == NULL)
return (TD_ERR);
if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
return (TD_OK);
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ta_p->model == PR_MODEL_NATIVE) {
db_return = ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
&first_lwp_addr, sizeof (first_lwp_addr));
db_return2 = ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
&first_zombie_addr, sizeof (first_zombie_addr));
} else {
#if defined(_LP64) && defined(_SYSCALL32)
caddr32_t addr32;
db_return = ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
&addr32, sizeof (addr32));
first_lwp_addr = addr32;
db_return2 = ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
&addr32, sizeof (addr32));
first_zombie_addr = addr32;
#else
db_return = PS_ERR;
db_return2 = PS_ERR;
#endif
}
if (db_return == PS_OK)
db_return = db_return2;
if (db_return == PS_OK &&
first_lwp_addr == 0 && first_zombie_addr == 0) {
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (TD_NOTHR);
}
if (db_return != PS_OK) {
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (TD_DBERR);
}
if (first_lwp_addr == 0)
first_lwp_addr = first_zombie_addr;
curr_lwp_addr = first_lwp_addr;
for (;;) {
td_thr_state_e ts_state;
int userpri;
unsigned userflags;
sigset_t mask;
if (ta_p->model == PR_MODEL_NATIVE) {
ulwp_t ulwp;
if (ps_pdread(ph_p, curr_lwp_addr,
&ulwp, sizeof (ulwp)) != PS_OK &&
((void) memset(&ulwp, 0, sizeof (ulwp)),
ps_pdread(ph_p, curr_lwp_addr,
&ulwp, REPLACEMENT_SIZE)) != PS_OK) {
return_val = TD_DBERR;
break;
}
next_lwp_addr = (psaddr_t)ulwp.ul_forw;
ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
ulwp.ul_stop? TD_THR_STOPPED :
ulwp.ul_wchan? TD_THR_SLEEP :
TD_THR_ACTIVE;
userpri = ulwp.ul_pri;
userflags = ulwp.ul_usropts;
if (ulwp.ul_dead)
(void) sigemptyset(&mask);
else
mask = *(sigset_t *)&ulwp.ul_sigmask;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t ulwp;
if (ps_pdread(ph_p, curr_lwp_addr,
&ulwp, sizeof (ulwp)) != PS_OK &&
((void) memset(&ulwp, 0, sizeof (ulwp)),
ps_pdread(ph_p, curr_lwp_addr,
&ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
return_val = TD_DBERR;
break;
}
next_lwp_addr = (psaddr_t)ulwp.ul_forw;
ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
ulwp.ul_stop? TD_THR_STOPPED :
ulwp.ul_wchan? TD_THR_SLEEP :
TD_THR_ACTIVE;
userpri = ulwp.ul_pri;
userflags = ulwp.ul_usropts;
if (ulwp.ul_dead)
(void) sigemptyset(&mask);
else
mask = *(sigset_t *)&ulwp.ul_sigmask;
#else
return_val = TD_ERR;
break;
#endif
}
if ((state != ts_state) &&
(state != TD_THR_ANY_STATE))
goto advance;
if (ti_pri > userpri)
goto advance;
if (ti_sigmask_p != TD_SIGNO_MASK &&
!sigequalset(ti_sigmask_p, &mask))
goto advance;
if (ti_user_flags != userflags &&
ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
goto advance;
th.th_ta_p = (td_thragent_t *)ta_p;
th.th_unique = curr_lwp_addr;
if ((*cb)(&th, cbdata_p))
break;
advance:
if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
if (first_zombie_addr == 0 ||
first_lwp_addr == first_zombie_addr)
break;
curr_lwp_addr = first_lwp_addr = first_zombie_addr;
}
}
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
td_err_e
__td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
register_sync_t enable;
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
&enable, sizeof (enable)) != PS_OK)
return_val = TD_DBERR;
ta_p->sync_tracking = 1;
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_ta_sync_iter = __td_ta_sync_iter
td_err_e
__td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
int i;
register_sync_t enable;
psaddr_t next_desc;
tdb_sync_stats_t sync_stats;
td_synchandle_t synchandle;
psaddr_t psaddr;
void *vaddr;
uint64_t *sync_addr_hash = NULL;
if (cb == NULL)
return (TD_ERR);
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
&enable, sizeof (enable)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
if (enable != REGISTER_SYNC_ON)
goto out;
if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
== MAP_FAILED) {
return_val = TD_MALLOC;
goto out;
}
sync_addr_hash = vaddr;
if (ta_p->model == PR_MODEL_NATIVE) {
if (ps_pdread(ph_p, ta_p->uberdata_addr +
offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
&psaddr, sizeof (&psaddr)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
} else {
#ifdef _SYSCALL32
caddr32_t addr;
if (ps_pdread(ph_p, ta_p->uberdata_addr +
offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
&addr, sizeof (addr)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
psaddr = addr;
#else
return_val = TD_ERR;
goto out;
#endif
}
if (psaddr == 0)
goto out;
if (ps_pdread(ph_p, psaddr, sync_addr_hash,
TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
for (i = 0; i < TDB_HASH_SIZE; i++) {
for (next_desc = (psaddr_t)sync_addr_hash[i];
next_desc != 0;
next_desc = (psaddr_t)sync_stats.next) {
if (ps_pdread(ph_p, next_desc,
&sync_stats, sizeof (sync_stats)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
if (sync_stats.un.type == TDB_NONE) {
continue;
}
synchandle.sh_ta_p = ta_p;
synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
if ((*cb)(&synchandle, cbdata) != 0)
goto out;
}
}
out:
if (sync_addr_hash != NULL)
(void) munmap((void *)sync_addr_hash,
TDB_HASH_SIZE * sizeof (uint64_t));
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_ta_enable_stats = __td_ta_enable_stats
td_err_e
__td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
{
return (TD_NOCAPAB);
}
#pragma weak td_ta_reset_stats = __td_ta_reset_stats
td_err_e
__td_ta_reset_stats(const td_thragent_t *ta_p)
{
return (TD_NOCAPAB);
}
#pragma weak td_ta_get_stats = __td_ta_get_stats
td_err_e
__td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
{
return (TD_NOCAPAB);
}
static void
td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
ulwp_t *ulwp, td_thrinfo_t *ti_p)
{
lwpid_t lwpid;
if ((lwpid = ulwp->ul_lwpid) == 0)
lwpid = 1;
(void) memset(ti_p, 0, sizeof (*ti_p));
ti_p->ti_ta_p = ta_p;
ti_p->ti_user_flags = ulwp->ul_usropts;
ti_p->ti_tid = lwpid;
ti_p->ti_exitval = ulwp->ul_rval;
ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
if (!ulwp->ul_dead) {
ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
ti_p->ti_stksize = ulwp->ul_stksiz;
}
ti_p->ti_ro_area = ts_addr;
ti_p->ti_ro_size = ulwp->ul_replace?
REPLACEMENT_SIZE : sizeof (ulwp_t);
ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
ulwp->ul_stop? TD_THR_STOPPED :
ulwp->ul_wchan? TD_THR_SLEEP :
TD_THR_ACTIVE;
ti_p->ti_db_suspended = 0;
ti_p->ti_type = TD_THR_USER;
ti_p->ti_sp = ulwp->ul_sp;
ti_p->ti_flags = 0;
ti_p->ti_pri = ulwp->ul_pri;
ti_p->ti_lid = lwpid;
if (!ulwp->ul_dead)
ti_p->ti_sigmask = ulwp->ul_sigmask;
ti_p->ti_traceme = 0;
ti_p->ti_preemptflag = 0;
ti_p->ti_pirecflag = 0;
(void) sigemptyset(&ti_p->ti_pending);
ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
}
#if defined(_LP64) && defined(_SYSCALL32)
static void
td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
ulwp32_t *ulwp, td_thrinfo_t *ti_p)
{
lwpid_t lwpid;
if ((lwpid = ulwp->ul_lwpid) == 0)
lwpid = 1;
(void) memset(ti_p, 0, sizeof (*ti_p));
ti_p->ti_ta_p = ta_p;
ti_p->ti_user_flags = ulwp->ul_usropts;
ti_p->ti_tid = lwpid;
ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
if (!ulwp->ul_dead) {
ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
ti_p->ti_stksize = ulwp->ul_stksiz;
}
ti_p->ti_ro_area = ts_addr;
ti_p->ti_ro_size = ulwp->ul_replace?
REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
ulwp->ul_stop? TD_THR_STOPPED :
ulwp->ul_wchan? TD_THR_SLEEP :
TD_THR_ACTIVE;
ti_p->ti_db_suspended = 0;
ti_p->ti_type = TD_THR_USER;
ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
ti_p->ti_flags = 0;
ti_p->ti_pri = ulwp->ul_pri;
ti_p->ti_lid = lwpid;
if (!ulwp->ul_dead)
ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
ti_p->ti_traceme = 0;
ti_p->ti_preemptflag = 0;
ti_p->ti_pirecflag = 0;
(void) sigemptyset(&ti_p->ti_pending);
ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
}
#endif
#pragma weak td_thr_get_info = __td_thr_get_info
td_err_e
__td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
{
struct ps_prochandle *ph_p;
td_thragent_t *ta_p;
td_err_e return_val;
psaddr_t psaddr;
if (ti_p == NULL)
return (TD_ERR);
(void) memset(ti_p, 0, sizeof (*ti_p));
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
ta_p = th_p->th_ta_p;
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
psaddr = th_p->th_unique;
if (ta_p->model == PR_MODEL_NATIVE) {
ulwp_t ulwp;
if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
((void) memset(&ulwp, 0, sizeof (ulwp)),
ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
return_val = TD_DBERR;
else
td_thr2to(ta_p, psaddr, &ulwp, ti_p);
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t ulwp;
if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
((void) memset(&ulwp, 0, sizeof (ulwp)),
ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
PS_OK)
return_val = TD_DBERR;
else
td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
#else
return_val = TD_ERR;
#endif
}
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_ta_event_addr = __td_ta_event_addr
td_err_e
__td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
{
if (ta_p == NULL)
return (TD_BADTA);
if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
return (TD_NOEVENT);
if (notify_p == NULL)
return (TD_ERR);
notify_p->type = NOTIFY_BPT;
notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
return (TD_OK);
}
static void
eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
{
int i;
for (i = 0; i < TD_EVENTSIZE; i++)
event1_p->event_bits[i] |= event2_p->event_bits[i];
}
static void
eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
{
int i;
for (i = 0; i < TD_EVENTSIZE; i++)
event1_p->event_bits[i] &= ~event2_p->event_bits[i];
}
static td_err_e
mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
{
struct ps_prochandle *ph_p;
td_err_e return_val = TD_OK;
char enable;
td_thr_events_t evset;
psaddr_t psaddr_evset;
psaddr_t psaddr_enab;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
#else
ph_unlock(th_p->th_ta_p);
return (TD_ERR);
#endif
}
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
return_val = TD_DBERR;
else {
if (onoff)
eventsetaddset(&evset, events);
else
eventsetdelset(&evset, events);
if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
!= PS_OK)
return_val = TD_DBERR;
else {
enable = 0;
if (td_eventismember(&evset, TD_EVENTS_ENABLE))
enable = 1;
if (ps_pdwrite(ph_p, psaddr_enab,
&enable, sizeof (enable)) != PS_OK)
return_val = TD_DBERR;
}
}
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_event_enable = __td_thr_event_enable
td_err_e
__td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
{
td_thr_events_t evset;
td_event_emptyset(&evset);
td_event_addset(&evset, TD_EVENTS_ENABLE);
return (mod_eventset(th_p, &evset, onoff));
}
#pragma weak td_thr_set_event = __td_thr_set_event
td_err_e
__td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
{
return (mod_eventset(th_p, events, 1));
}
static td_err_e
td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
{
struct ps_prochandle *ph_p;
td_thr_events_t targ_eventset;
td_err_e return_val;
if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
&targ_eventset, sizeof (targ_eventset)) != PS_OK)
return_val = TD_DBERR;
else {
if (onoff)
eventsetaddset(&targ_eventset, events);
else
eventsetdelset(&targ_eventset, events);
if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
&targ_eventset, sizeof (targ_eventset)) != PS_OK)
return_val = TD_DBERR;
}
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_ta_set_event = __td_ta_set_event
td_err_e
__td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
{
return (td_ta_mod_event(ta_p, events, 1));
}
#pragma weak td_thr_clear_event = __td_thr_clear_event
td_err_e
__td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
{
return (mod_eventset(th_p, events, 0));
}
#pragma weak td_ta_clear_event = __td_ta_clear_event
td_err_e
__td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
{
return (td_ta_mod_event(ta_p, events, 0));
}
#pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
td_err_e
__td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
{
struct ps_prochandle *ph_p;
td_err_e return_val = TD_OK;
psaddr_t psaddr;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_BADTA);
}
if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
td_evbuf_t evbuf;
psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
return_val = TD_DBERR;
} else if (evbuf.eventnum == TD_EVENT_NONE) {
return_val = TD_NOEVENT;
} else {
msg->event = evbuf.eventnum;
msg->th_p = (td_thrhandle_t *)th_p;
msg->msg.data = (uintptr_t)evbuf.eventdata;
evbuf.eventnum = TD_EVENT_NONE;
evbuf.eventdata = NULL;
if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
!= PS_OK)
return_val = TD_DBERR;
}
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
td_evbuf32_t evbuf;
psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
return_val = TD_DBERR;
} else if (evbuf.eventnum == TD_EVENT_NONE) {
return_val = TD_NOEVENT;
} else {
msg->event = evbuf.eventnum;
msg->th_p = (td_thrhandle_t *)th_p;
msg->msg.data = (uintptr_t)evbuf.eventdata;
evbuf.eventnum = TD_EVENT_NONE;
evbuf.eventdata = 0;
if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
!= PS_OK)
return_val = TD_DBERR;
}
#else
return_val = TD_ERR;
#endif
}
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
static int
event_msg_cb(const td_thrhandle_t *th_p, void *arg)
{
static td_thrhandle_t th;
td_event_msg_t *msg = arg;
if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
th = *th_p;
msg->th_p = &th;
return (1);
}
return (0);
}
#pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
td_err_e
__td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
{
td_err_e return_val;
if (ta_p == NULL)
return (TD_BADTA);
if (ta_p->ph_p == NULL)
return (TD_BADPH);
if (msg == NULL)
return (TD_ERR);
msg->event = TD_EVENT_NONE;
if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
TD_THR_ANY_USER_FLAGS)) != TD_OK)
return (return_val);
if (msg->event == TD_EVENT_NONE)
return (TD_NOEVENT);
return (TD_OK);
}
static lwpid_t
thr_to_lwpid(const td_thrhandle_t *th_p)
{
struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
lwpid_t lwpid;
if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
&lwpid, sizeof (lwpid)) != PS_OK)
lwpid = 0;
else if (lwpid == 0)
lwpid = 1;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
&lwpid, sizeof (lwpid)) != PS_OK)
lwpid = 0;
else if (lwpid == 0)
lwpid = 1;
#else
lwpid = 0;
#endif
}
return (lwpid);
}
#pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
td_err_e
__td_thr_dbsuspend(const td_thrhandle_t *th_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
return_val = TD_DBERR;
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_dbresume = __td_thr_dbresume
td_err_e
__td_thr_dbresume(const td_thrhandle_t *th_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
return_val = TD_DBERR;
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
td_err_e
__td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
{
return (TD_NOCAPAB);
}
#pragma weak td_thr_setsigpending = __td_thr_setsigpending
td_err_e
__td_thr_setsigpending(const td_thrhandle_t *th_p,
uchar_t ti_pending_flag, const sigset_t ti_pending)
{
return (TD_NOCAPAB);
}
#pragma weak td_thr_getgregs = __td_thr_getgregs
td_err_e
__td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_setgregs = __td_thr_setgregs
td_err_e
__td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_getfpregs = __td_thr_getfpregs
td_err_e
__td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_setfpregs = __td_thr_setfpregs
td_err_e
__td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_getxregsize = __td_thr_getxregsize
td_err_e
__td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lgetxregsize(ph_p, thr_to_lwpid(th_p), xregsize) != PS_OK)
return_val = TD_DBERR;
if (*xregsize == 0)
return_val = TD_NOXREGS;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_getxregs = __td_thr_getxregs
td_err_e
__td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
ps_err_e ps_err;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
ps_err = ps_lgetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset);
if (ps_err == PS_NOXREGS)
return_val = TD_NOXREGS;
else if (ps_err != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_thr_setxregs = __td_thr_setxregs
td_err_e
__td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(th_p->th_ta_p);
return (TD_DBERR);
}
if (ps_lsetxregs(ph_p, thr_to_lwpid(th_p), (caddr_t)xregset) != PS_OK)
return_val = TD_DBERR;
(void) ps_pcontinue(ph_p);
ph_unlock(th_p->th_ta_p);
return (return_val);
}
struct searcher {
psaddr_t addr;
int status;
};
static int
td_searcher(const td_thrhandle_t *th_p, void *data)
{
struct searcher *searcher_data = (struct searcher *)data;
if (searcher_data->addr == th_p->th_unique) {
searcher_data->status = 1;
return (1);
}
return (0);
}
#pragma weak td_thr_validate = __td_thr_validate
td_err_e
__td_thr_validate(const td_thrhandle_t *th_p)
{
td_err_e return_val;
struct searcher searcher_data = {0, 0};
if (th_p == NULL)
return (TD_BADTH);
if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
return (TD_BADTH);
searcher_data.addr = th_p->th_unique;
return_val = __td_ta_thr_iter(th_p->th_ta_p,
td_searcher, &searcher_data,
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
if (return_val == TD_OK && searcher_data.status == 0)
return_val = TD_NOTHR;
return (return_val);
}
#pragma weak td_thr_tsd = __td_thr_tsd
td_err_e
__td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
{
struct ps_prochandle *ph_p;
td_thragent_t *ta_p;
td_err_e return_val;
int maxkey;
int nkey;
psaddr_t tsd_paddr;
if (data_pp == NULL)
return (TD_ERR);
*data_pp = NULL;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
ta_p = th_p->th_ta_p;
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
tsd_metadata_t tsdm;
tsd_t stsd;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
&tsdm, sizeof (tsdm)) != PS_OK)
return_val = TD_DBERR;
else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
&tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
return_val = TD_DBERR;
else if (tsd_paddr != 0 &&
ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
return_val = TD_DBERR;
else {
maxkey = tsdm.tsdm_nused;
nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
if (key < TSD_NFAST)
tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
}
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
tsd_metadata32_t tsdm;
tsd32_t stsd;
caddr32_t addr;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
&tsdm, sizeof (tsdm)) != PS_OK)
return_val = TD_DBERR;
else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
&addr, sizeof (addr)) != PS_OK)
return_val = TD_DBERR;
else if (addr != 0 &&
ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
return_val = TD_DBERR;
else {
maxkey = tsdm.tsdm_nused;
nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
if (key < TSD_NFAST) {
tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
} else {
tsd_paddr = addr;
}
}
#else
return_val = TD_ERR;
#endif
}
if (return_val == TD_OK && (key < 1 || key >= maxkey))
return_val = TD_NOTSD;
if (return_val != TD_OK || key >= nkey) {
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
if (ta_p->model == PR_MODEL_NATIVE) {
void *value;
if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
&value, sizeof (value)) != PS_OK)
return_val = TD_DBERR;
else
*data_pp = value;
#if defined(_LP64) && defined(_SYSCALL32)
} else {
caddr32_t value32;
if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
&value32, sizeof (value32)) != PS_OK)
return_val = TD_DBERR;
else
*data_pp = (void *)(uintptr_t)value32;
#endif
}
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_thr_tlsbase = __td_thr_tlsbase
td_err_e
__td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
{
struct ps_prochandle *ph_p;
td_thragent_t *ta_p;
td_err_e return_val;
if (base == NULL)
return (TD_ERR);
*base = 0;
if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
return (return_val);
ta_p = th_p->th_ta_p;
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if (ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
tls_metadata_t tls_metadata;
TLS_modinfo tlsmod;
tls_t tls;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
&tls_metadata, sizeof (tls_metadata)) != PS_OK)
return_val = TD_DBERR;
else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
return_val = TD_NOTLS;
else if (ps_pdread(ph_p,
(psaddr_t)((TLS_modinfo *)
tls_metadata.tls_modinfo.tls_data + moduleid),
&tlsmod, sizeof (tlsmod)) != PS_OK)
return_val = TD_DBERR;
else if (tlsmod.tm_memsz == 0)
return_val = TD_NOTLS;
else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
&tls, sizeof (tls)) != PS_OK)
return_val = TD_DBERR;
else if (moduleid >= tls.tls_size)
return_val = TD_TLSDEFER;
else if (ps_pdread(ph_p,
(psaddr_t)((tls_t *)tls.tls_data + moduleid),
&tls, sizeof (tls)) != PS_OK)
return_val = TD_DBERR;
else if (tls.tls_size == 0)
return_val = TD_TLSDEFER;
else
*base = (psaddr_t)tls.tls_data;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
tls_metadata32_t tls_metadata;
TLS_modinfo32 tlsmod;
tls32_t tls;
if (ps_pdread(ph_p,
ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
&tls_metadata, sizeof (tls_metadata)) != PS_OK)
return_val = TD_DBERR;
else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
return_val = TD_NOTLS;
else if (ps_pdread(ph_p,
(psaddr_t)((TLS_modinfo32 *)
(uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
&tlsmod, sizeof (tlsmod)) != PS_OK)
return_val = TD_DBERR;
else if (tlsmod.tm_memsz == 0)
return_val = TD_NOTLS;
else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
*base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
&tls, sizeof (tls)) != PS_OK)
return_val = TD_DBERR;
else if (moduleid >= tls.tls_size)
return_val = TD_TLSDEFER;
else if (ps_pdread(ph_p,
(psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
&tls, sizeof (tls)) != PS_OK)
return_val = TD_DBERR;
else if (tls.tls_size == 0)
return_val = TD_TLSDEFER;
else
*base = (psaddr_t)tls.tls_data;
#else
return_val = TD_ERR;
#endif
}
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_thr_setprio = __td_thr_setprio
td_err_e
__td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
{
return (TD_NOCAPAB);
}
typedef struct {
td_sync_iter_f *owner_cb;
void *owner_cb_arg;
td_thrhandle_t *th_p;
} lowner_cb_ctl_t;
static int
lowner_cb(const td_synchandle_t *sh_p, void *arg)
{
lowner_cb_ctl_t *ocb = arg;
int trunc = 0;
union {
rwlock_t rwl;
mutex_t mx;
} rw_m;
if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
&rw_m, sizeof (rw_m)) != PS_OK) {
trunc = 1;
if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
&rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
return (0);
}
if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
rw_m.mx.mutex_owner == ocb->th_p->th_unique)
return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
mutex_t *rwlock = &rw_m.rwl.mutex;
if (rwlock->mutex_owner == ocb->th_p->th_unique)
return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
}
return (0);
}
#pragma weak td_thr_lockowner = __td_thr_lockowner
td_err_e
__td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
void *cb_data)
{
td_thragent_t *ta_p;
td_err_e return_val;
lowner_cb_ctl_t lcb;
if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
return (return_val);
ta_p = th_p->th_ta_p;
ph_unlock(ta_p);
lcb.owner_cb = cb;
lcb.owner_cb_arg = cb_data;
lcb.th_p = (td_thrhandle_t *)th_p;
return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
}
#pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
td_err_e
__td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val = TD_OK;
uintptr_t wchan;
if (sh_p == NULL)
return (TD_ERR);
if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
return (return_val);
if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
&wchan, sizeof (wchan)) != PS_OK)
return_val = TD_DBERR;
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
caddr32_t wchan32;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
&wchan32, sizeof (wchan32)) != PS_OK)
return_val = TD_DBERR;
wchan = wchan32;
#else
return_val = TD_ERR;
#endif
}
if (return_val != TD_OK || wchan == 0) {
sh_p->sh_ta_p = NULL;
sh_p->sh_unique = 0;
if (return_val == TD_OK)
return_val = TD_ERR;
} else {
sh_p->sh_ta_p = th_p->th_ta_p;
sh_p->sh_unique = (psaddr_t)wchan;
}
ph_unlock(th_p->th_ta_p);
return (return_val);
}
#pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
td_err_e
__td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
td_thrhandle_t *th_p)
{
return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
}
static td_err_e
sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
td_syncinfo_t *si_p)
{
int trunc = 0;
td_so_un_t generic_so;
if (ps_pdread(ph_p, sh_p->sh_unique,
&generic_so, sizeof (generic_so)) != PS_OK) {
trunc = 1;
if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
sizeof (generic_so.condition)) != PS_OK)
return (TD_DBERR);
}
switch (generic_so.condition.cond_magic) {
case MUTEX_MAGIC:
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
return (TD_DBERR);
si_p->si_type = TD_SYNC_MUTEX;
si_p->si_shared_type =
(generic_so.lock.mutex_type & USYNC_PROCESS);
(void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
sizeof (generic_so.lock.mutex_flag));
si_p->si_state.mutex_locked =
(generic_so.lock.mutex_lockw != 0);
si_p->si_size = sizeof (generic_so.lock);
si_p->si_has_waiters = generic_so.lock.mutex_waiters;
si_p->si_rcount = generic_so.lock.mutex_rcount;
si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
if (si_p->si_state.mutex_locked) {
if (si_p->si_shared_type & USYNC_PROCESS)
si_p->si_ownerpid =
generic_so.lock.mutex_ownerpid;
si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
}
break;
case COND_MAGIC:
si_p->si_type = TD_SYNC_COND;
si_p->si_shared_type =
(generic_so.condition.cond_type & USYNC_PROCESS);
(void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
sizeof (generic_so.condition.flags.flag));
si_p->si_size = sizeof (generic_so.condition);
si_p->si_has_waiters =
(generic_so.condition.cond_waiters_user |
generic_so.condition.cond_waiters_kernel)? 1 : 0;
break;
case SEMA_MAGIC:
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.semaphore, sizeof (generic_so.semaphore))
!= PS_OK)
return (TD_DBERR);
si_p->si_type = TD_SYNC_SEMA;
si_p->si_shared_type =
(generic_so.semaphore.type & USYNC_PROCESS);
si_p->si_state.sem_count = generic_so.semaphore.count;
si_p->si_size = sizeof (generic_so.semaphore);
si_p->si_has_waiters =
((lwp_sema_t *)&generic_so.semaphore)->flags[7];
si_p->si_data = (psaddr_t)generic_so.semaphore.count;
break;
case RWL_MAGIC:
{
uint32_t rwstate;
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
return (TD_DBERR);
si_p->si_type = TD_SYNC_RWLOCK;
si_p->si_shared_type =
(generic_so.rwlock.rwlock_type & USYNC_PROCESS);
si_p->si_size = sizeof (generic_so.rwlock);
rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
if (rwstate & URW_WRITE_LOCKED) {
si_p->si_state.nreaders = -1;
si_p->si_is_wlock = 1;
si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
si_p->si_owner.th_unique =
generic_so.rwlock.rwlock_owner;
if (si_p->si_shared_type & USYNC_PROCESS)
si_p->si_ownerpid =
generic_so.rwlock.rwlock_ownerpid;
} else {
si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
}
si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
break;
}
default:
return (TD_BADSH);
}
si_p->si_ta_p = sh_p->sh_ta_p;
si_p->si_sv_addr = sh_p->sh_unique;
return (TD_OK);
}
#pragma weak td_sync_get_info = __td_sync_get_info
td_err_e
__td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
{
struct ps_prochandle *ph_p;
td_err_e return_val;
if (si_p == NULL)
return (TD_ERR);
(void) memset(si_p, 0, sizeof (*si_p));
if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(sh_p->sh_ta_p);
return (TD_DBERR);
}
return_val = sync_get_info_common(sh_p, ph_p, si_p);
(void) ps_pcontinue(ph_p);
ph_unlock(sh_p->sh_ta_p);
return (return_val);
}
static uint_t
tdb_addr_hash64(uint64_t addr)
{
uint64_t value60 = (addr >> 4);
uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
return ((value30 >> 15) ^ (value30 & 0x7fff));
}
static uint_t
tdb_addr_hash32(uint64_t addr)
{
uint32_t value30 = (addr >> 2);
return ((value30 >> 15) ^ (value30 & 0x7fff));
}
static td_err_e
read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
{
psaddr_t next_desc;
uint64_t first;
uint_t ix;
if (ta_p->model == PR_MODEL_LP64)
ix = tdb_addr_hash64(sync_obj_addr);
else
ix = tdb_addr_hash32(sync_obj_addr);
if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
&first, sizeof (first)) != PS_OK)
return (TD_DBERR);
for (next_desc = (psaddr_t)first; next_desc != 0;
next_desc = (psaddr_t)sync_stats->next) {
if (ps_pdread(ta_p->ph_p, next_desc,
sync_stats, sizeof (*sync_stats)) != PS_OK)
return (TD_DBERR);
if (sync_stats->sync_addr == sync_obj_addr)
return (TD_OK);
}
(void) memset(sync_stats, 0, sizeof (*sync_stats));
return (TD_OK);
}
#pragma weak td_sync_get_stats = __td_sync_get_stats
td_err_e
__td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
{
struct ps_prochandle *ph_p;
td_thragent_t *ta_p;
td_err_e return_val;
register_sync_t enable;
psaddr_t hashaddr;
tdb_sync_stats_t sync_stats;
size_t ix;
if (ss_p == NULL)
return (TD_ERR);
(void) memset(ss_p, 0, sizeof (*ss_p));
if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
return (return_val);
ta_p = sh_p->sh_ta_p;
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(ta_p);
return (TD_DBERR);
}
if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
!= TD_OK) {
if (return_val != TD_BADSH)
goto out;
(void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
return_val = TD_OK;
}
if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
&enable, sizeof (enable)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
if (enable != REGISTER_SYNC_ON)
goto out;
if (ta_p->model == PR_MODEL_NATIVE) {
if (ps_pdread(ph_p, ta_p->uberdata_addr +
offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
&hashaddr, sizeof (&hashaddr)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
} else {
#if defined(_LP64) && defined(_SYSCALL32)
caddr32_t addr;
if (ps_pdread(ph_p, ta_p->uberdata_addr +
offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
&addr, sizeof (addr)) != PS_OK) {
return_val = TD_DBERR;
goto out;
}
hashaddr = addr;
#else
return_val = TD_ERR;
goto out;
#endif
}
if (hashaddr == 0)
return_val = TD_BADSH;
else
return_val = read_sync_stats(ta_p, hashaddr,
sh_p->sh_unique, &sync_stats);
if (return_val != TD_OK)
goto out;
switch (sync_stats.un.type) {
case TDB_MUTEX:
{
td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
ss_p->ss_info.si_type = TD_SYNC_MUTEX;
ss_p->ss_info.si_size = sizeof (mutex_t);
msp->mutex_lock =
sync_stats.un.mutex.mutex_lock;
msp->mutex_sleep =
sync_stats.un.mutex.mutex_sleep;
msp->mutex_sleep_time =
sync_stats.un.mutex.mutex_sleep_time;
msp->mutex_hold_time =
sync_stats.un.mutex.mutex_hold_time;
msp->mutex_try =
sync_stats.un.mutex.mutex_try;
msp->mutex_try_fail =
sync_stats.un.mutex.mutex_try_fail;
if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
(ix = sync_stats.sync_addr - ta_p->hash_table_addr)
< ta_p->hash_size * sizeof (thr_hash_table_t))
msp->mutex_internal =
ix / sizeof (thr_hash_table_t) + 1;
break;
}
case TDB_COND:
{
td_cond_stats_t *csp = &ss_p->ss_un.cond;
ss_p->ss_info.si_type = TD_SYNC_COND;
ss_p->ss_info.si_size = sizeof (cond_t);
csp->cond_wait =
sync_stats.un.cond.cond_wait;
csp->cond_timedwait =
sync_stats.un.cond.cond_timedwait;
csp->cond_wait_sleep_time =
sync_stats.un.cond.cond_wait_sleep_time;
csp->cond_timedwait_sleep_time =
sync_stats.un.cond.cond_timedwait_sleep_time;
csp->cond_timedwait_timeout =
sync_stats.un.cond.cond_timedwait_timeout;
csp->cond_signal =
sync_stats.un.cond.cond_signal;
csp->cond_broadcast =
sync_stats.un.cond.cond_broadcast;
if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
(ix = sync_stats.sync_addr - ta_p->hash_table_addr)
< ta_p->hash_size * sizeof (thr_hash_table_t))
csp->cond_internal =
ix / sizeof (thr_hash_table_t) + 1;
break;
}
case TDB_RWLOCK:
{
td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
ss_p->ss_info.si_size = sizeof (rwlock_t);
rwsp->rw_rdlock =
sync_stats.un.rwlock.rw_rdlock;
rwsp->rw_rdlock_try =
sync_stats.un.rwlock.rw_rdlock_try;
rwsp->rw_rdlock_try_fail =
sync_stats.un.rwlock.rw_rdlock_try_fail;
rwsp->rw_wrlock =
sync_stats.un.rwlock.rw_wrlock;
rwsp->rw_wrlock_hold_time =
sync_stats.un.rwlock.rw_wrlock_hold_time;
rwsp->rw_wrlock_try =
sync_stats.un.rwlock.rw_wrlock_try;
rwsp->rw_wrlock_try_fail =
sync_stats.un.rwlock.rw_wrlock_try_fail;
break;
}
case TDB_SEMA:
{
td_sema_stats_t *ssp = &ss_p->ss_un.sema;
ss_p->ss_info.si_type = TD_SYNC_SEMA;
ss_p->ss_info.si_size = sizeof (sema_t);
ssp->sema_wait =
sync_stats.un.sema.sema_wait;
ssp->sema_wait_sleep =
sync_stats.un.sema.sema_wait_sleep;
ssp->sema_wait_sleep_time =
sync_stats.un.sema.sema_wait_sleep_time;
ssp->sema_trywait =
sync_stats.un.sema.sema_trywait;
ssp->sema_trywait_fail =
sync_stats.un.sema.sema_trywait_fail;
ssp->sema_post =
sync_stats.un.sema.sema_post;
ssp->sema_max_count =
sync_stats.un.sema.sema_max_count;
ssp->sema_min_count =
sync_stats.un.sema.sema_min_count;
break;
}
default:
return_val = TD_BADSH;
break;
}
out:
(void) ps_pcontinue(ph_p);
ph_unlock(ta_p);
return (return_val);
}
#pragma weak td_sync_setstate = __td_sync_setstate
td_err_e
__td_sync_setstate(const td_synchandle_t *sh_p, int value)
{
struct ps_prochandle *ph_p;
int trunc = 0;
td_err_e return_val;
td_so_un_t generic_so;
uint32_t *rwstate;
if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
return (return_val);
if (ps_pstop(ph_p) != PS_OK) {
ph_unlock(sh_p->sh_ta_p);
return (TD_DBERR);
}
if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
sizeof (generic_so)) != PS_OK) {
trunc = 1;
if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
sizeof (generic_so.condition)) != PS_OK) {
(void) ps_pcontinue(ph_p);
ph_unlock(sh_p->sh_ta_p);
return (TD_DBERR);
}
}
switch (generic_so.condition.mutex_magic) {
case MUTEX_MAGIC:
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
return_val = TD_DBERR;
break;
}
generic_so.lock.mutex_lockw = (uint8_t)value;
if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
sizeof (generic_so.lock)) != PS_OK)
return_val = TD_DBERR;
break;
case SEMA_MAGIC:
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.semaphore, sizeof (generic_so.semaphore))
!= PS_OK) {
return_val = TD_DBERR;
break;
}
generic_so.semaphore.count = value;
if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
sizeof (generic_so.semaphore)) != PS_OK)
return_val = TD_DBERR;
break;
case COND_MAGIC:
return_val = TD_ERR;
break;
case RWL_MAGIC:
if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
&generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
return_val = TD_DBERR;
break;
}
rwstate = (uint32_t *)&generic_so.rwlock.readers;
*rwstate &= URW_HAS_WAITERS;
if (value < 0)
*rwstate |= URW_WRITE_LOCKED;
else
*rwstate |= (value & URW_READERS_MASK);
if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
sizeof (generic_so.rwlock)) != PS_OK)
return_val = TD_DBERR;
break;
default:
return_val = TD_BADSH;
break;
}
(void) ps_pcontinue(ph_p);
ph_unlock(sh_p->sh_ta_p);
return (return_val);
}
typedef struct {
td_thr_iter_f *waiter_cb;
psaddr_t sync_obj_addr;
uint16_t sync_magic;
void *waiter_cb_arg;
td_err_e errcode;
} waiter_cb_ctl_t;
static int
waiters_cb(const td_thrhandle_t *th_p, void *arg)
{
td_thragent_t *ta_p = th_p->th_ta_p;
struct ps_prochandle *ph_p = ta_p->ph_p;
waiter_cb_ctl_t *wcb = arg;
caddr_t wchan;
if (ta_p->model == PR_MODEL_NATIVE) {
ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
&wchan, sizeof (wchan)) != PS_OK) {
wcb->errcode = TD_DBERR;
return (1);
}
} else {
#if defined(_LP64) && defined(_SYSCALL32)
ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
caddr32_t wchan32;
if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
&wchan32, sizeof (wchan32)) != PS_OK) {
wcb->errcode = TD_DBERR;
return (1);
}
wchan = (caddr_t)(uintptr_t)wchan32;
#else
wcb->errcode = TD_ERR;
return (1);
#endif
}
if (wchan == NULL)
return (0);
if (wchan == (caddr_t)wcb->sync_obj_addr)
return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
return (0);
}
#pragma weak td_sync_waiters = __td_sync_waiters
td_err_e
__td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
{
struct ps_prochandle *ph_p;
waiter_cb_ctl_t wcb;
td_err_e return_val;
if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
return (return_val);
if (ps_pdread(ph_p,
(psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
(caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
ph_unlock(sh_p->sh_ta_p);
return (TD_DBERR);
}
ph_unlock(sh_p->sh_ta_p);
switch (wcb.sync_magic) {
case MUTEX_MAGIC:
case COND_MAGIC:
case SEMA_MAGIC:
case RWL_MAGIC:
break;
default:
return (TD_BADSH);
}
wcb.waiter_cb = cb;
wcb.sync_obj_addr = sh_p->sh_unique;
wcb.waiter_cb_arg = cb_data;
wcb.errcode = TD_OK;
return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
if (return_val != TD_OK)
return (return_val);
return (wcb.errcode);
}