#include <sys/param.h>
#include <sys/thread.h>
#include <sys/cpuvar.h>
#include <sys/inttypes.h>
#include <sys/cmn_err.h>
#include <sys/time.h>
#include <sys/ksynch.h>
#include <sys/systm.h>
#include <sys/kcpc.h>
#include <sys/cpc_impl.h>
#include <sys/cpc_pcbe.h>
#include <sys/atomic.h>
#include <sys/sunddi.h>
#include <sys/modctl.h>
#include <sys/sdt.h>
#include <sys/archsystm.h>
#include <sys/promif.h>
#include <sys/x_call.h>
#include <sys/cap_util.h>
#if defined(__x86)
#include <asm/clock.h>
#include <sys/xc_levels.h>
#endif
static kmutex_t kcpc_ctx_llock[CPC_HASH_BUCKETS];
static kcpc_ctx_t *kcpc_ctx_list[CPC_HASH_BUCKETS];
krwlock_t kcpc_cpuctx_lock;
int kcpc_cpuctx;
int kcpc_counts_include_idle = 1;
uint_t cpc_ncounters = 0;
pcbe_ops_t *pcbe_ops = NULL;
static uint32_t kcpc_intrctx_count;
static uint32_t kcpc_nullctx_count;
static int kcpc_nullctx_panic = 0;
static void kcpc_save(void *);
static void kcpc_restore(void *);
static void kcpc_lwp_create(void *, void *);
static void kcpc_free(void *, int);
static void kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx);
static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch);
static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set);
static kcpc_set_t *kcpc_set_create(kcpc_request_t *reqs, int nreqs,
int set_flags, int kmem_flags);
#define KCPC_CTX_FLAG_SET(ctx, flag) atomic_or_uint(&(ctx)->kc_flags, (flag))
#define KCPC_CTX_FLAG_CLR(ctx, flag) atomic_and_uint(&(ctx)->kc_flags, ~(flag))
#ifdef DEBUG
#define IS_HIPIL() (getpil() >= XCALL_PIL)
#else
#define IS_HIPIL()
#endif
extern int kcpc_hw_load_pcbe(void);
static int kcpc_pcbe_error = 0;
static const struct ctxop_template kcpc_ctxop_tpl = {
.ct_rev = CTXOP_TPL_REV,
.ct_save = kcpc_save,
.ct_restore = kcpc_restore,
.ct_lwp_create = kcpc_lwp_create,
.ct_free = kcpc_free,
};
int
kcpc_init(void)
{
long hash;
static uint32_t kcpc_initialized = 0;
if (kcpc_pcbe_error != 0)
return (-1);
if (atomic_cas_32(&kcpc_initialized, 0, 1) != 0)
return (0);
rw_init(&kcpc_cpuctx_lock, NULL, RW_DEFAULT, NULL);
for (hash = 0; hash < CPC_HASH_BUCKETS; hash++)
mutex_init(&kcpc_ctx_llock[hash],
NULL, MUTEX_DRIVER, (void *)(uintptr_t)15);
kcpc_pcbe_error = kcpc_hw_load_pcbe();
return (kcpc_pcbe_error == 0 ? 0 : -1);
}
void
kcpc_register_pcbe(pcbe_ops_t *ops)
{
pcbe_ops = ops;
cpc_ncounters = pcbe_ops->pcbe_ncounters();
}
void
kcpc_register_dcpc(void (*func)(uint64_t))
{
dtrace_cpc_fire = func;
}
void
kcpc_unregister_dcpc(void)
{
dtrace_cpc_fire = NULL;
}
int
kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode)
{
cpu_t *cp;
kcpc_ctx_t *ctx;
int error;
int save_spl;
ctx = kcpc_ctx_alloc(KM_SLEEP);
if (kcpc_assign_reqs(set, ctx) != 0) {
kcpc_ctx_free(ctx);
*subcode = CPC_RESOURCE_UNAVAIL;
return (EINVAL);
}
ctx->kc_cpuid = cpuid;
ctx->kc_thread = curthread;
set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);
if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
kcpc_ctx_free(ctx);
return (error);
}
set->ks_ctx = ctx;
ctx->kc_set = set;
mutex_enter(&cpu_lock);
cp = cpu_get(cpuid);
if (cp == NULL)
goto unbound;
mutex_enter(&cp->cpu_cpc_ctxlock);
kpreempt_disable();
save_spl = spl_xcall();
if (cp->cpu_cpc_ctx != NULL && !CU_CPC_ON(cp)) {
splx(save_spl);
kpreempt_enable();
mutex_exit(&cp->cpu_cpc_ctxlock);
goto unbound;
}
if (curthread->t_bind_cpu != cpuid) {
splx(save_spl);
kpreempt_enable();
mutex_exit(&cp->cpu_cpc_ctxlock);
goto unbound;
}
kcpc_program(ctx, B_FALSE, B_TRUE);
splx(save_spl);
kpreempt_enable();
mutex_exit(&cp->cpu_cpc_ctxlock);
mutex_exit(&cpu_lock);
mutex_enter(&set->ks_lock);
set->ks_state |= KCPC_SET_BOUND;
cv_signal(&set->ks_condv);
mutex_exit(&set->ks_lock);
return (0);
unbound:
mutex_exit(&cpu_lock);
set->ks_ctx = NULL;
kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
kcpc_ctx_free(ctx);
return (EAGAIN);
}
int
kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode)
{
kcpc_ctx_t *ctx;
int error;
if (t->t_cpc_ctx != NULL)
return (EEXIST);
ctx = kcpc_ctx_alloc(KM_SLEEP);
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE);
ctx->kc_hrtime = gethrtime();
if (kcpc_assign_reqs(set, ctx) != 0) {
kcpc_ctx_free(ctx);
*subcode = CPC_RESOURCE_UNAVAIL;
return (EINVAL);
}
ctx->kc_cpuid = -1;
if (set->ks_flags & CPC_BIND_LWP_INHERIT)
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_LWPINHERIT);
ctx->kc_thread = t;
t->t_cpc_ctx = ctx;
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_NONPRIV);
set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);
if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
kcpc_ctx_free(ctx);
t->t_cpc_ctx = NULL;
return (error);
}
set->ks_ctx = ctx;
ctx->kc_set = set;
ctxop_install(t, &kcpc_ctxop_tpl, ctx);
if (t == curthread) {
int save_spl;
kpreempt_disable();
save_spl = spl_xcall();
kcpc_program(ctx, B_TRUE, B_TRUE);
splx(save_spl);
kpreempt_enable();
} else {
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE);
}
mutex_enter(&set->ks_lock);
set->ks_state |= KCPC_SET_BOUND;
cv_signal(&set->ks_condv);
mutex_exit(&set->ks_lock);
return (0);
}
int
kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode)
{
int i;
int ret;
kcpc_request_t *rp;
for (i = 0; i < set->ks_nreqs; i++) {
int n;
rp = &set->ks_req[i];
n = rp->kr_picnum;
ASSERT(n >= 0 && n < cpc_ncounters);
ASSERT(ctx->kc_pics[n].kp_req == NULL);
if (rp->kr_flags & CPC_OVF_NOTIFY_EMT) {
if ((pcbe_ops->pcbe_caps & CPC_CAP_OVERFLOW_INTERRUPT)
== 0) {
*subcode = -1;
return (ENOTSUP);
}
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_SIGOVF);
}
rp->kr_config = NULL;
if ((ret = pcbe_ops->pcbe_configure(n, rp->kr_event,
rp->kr_preset, rp->kr_flags, rp->kr_nattrs, rp->kr_attr,
&(rp->kr_config), (void *)ctx)) != 0) {
kcpc_free_configs(set);
*subcode = ret;
switch (ret) {
case CPC_ATTR_REQUIRES_PRIVILEGE:
case CPC_HV_NO_ACCESS:
return (EACCES);
default:
return (EINVAL);
}
}
ctx->kc_pics[n].kp_req = rp;
rp->kr_picp = &ctx->kc_pics[n];
rp->kr_data = set->ks_data + rp->kr_index;
*rp->kr_data = rp->kr_preset;
}
return (0);
}
void
kcpc_free_configs(kcpc_set_t *set)
{
int i;
for (i = 0; i < set->ks_nreqs; i++)
if (set->ks_req[i].kr_config != NULL)
pcbe_ops->pcbe_free(set->ks_req[i].kr_config);
}
int
kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick)
{
kcpc_ctx_t *ctx = set->ks_ctx;
int save_spl;
mutex_enter(&set->ks_lock);
if ((set->ks_state & KCPC_SET_BOUND) == 0) {
mutex_exit(&set->ks_lock);
return (EINVAL);
}
mutex_exit(&set->ks_lock);
kpreempt_disable();
save_spl = spl_xcall();
if (ctx->kc_flags & KCPC_CTX_INVALID) {
splx(save_spl);
kpreempt_enable();
return (EAGAIN);
}
if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) {
if (ctx->kc_cpuid != -1) {
if (curthread->t_bind_cpu != ctx->kc_cpuid) {
splx(save_spl);
kpreempt_enable();
return (EAGAIN);
}
}
if (ctx->kc_thread == curthread) {
uint64_t curtick = KCPC_GET_TICK();
ctx->kc_hrtime = gethrtime_waitfree();
pcbe_ops->pcbe_sample(ctx);
ctx->kc_vtick += curtick - ctx->kc_rawtick;
ctx->kc_rawtick = curtick;
}
if (ctx->kc_flags & KCPC_CTX_INVALID) {
splx(save_spl);
kpreempt_enable();
return (EAGAIN);
}
}
splx(save_spl);
kpreempt_enable();
if (copyout(set->ks_data, buf,
set->ks_nreqs * sizeof (uint64_t)) == -1)
return (EFAULT);
if (copyout(&ctx->kc_hrtime, hrtime, sizeof (uint64_t)) == -1)
return (EFAULT);
if (copyout(&ctx->kc_vtick, tick, sizeof (uint64_t)) == -1)
return (EFAULT);
return (0);
}
static void
kcpc_stop_hw(kcpc_ctx_t *ctx)
{
cpu_t *cp;
kpreempt_disable();
if (ctx->kc_cpuid == CPU->cpu_id) {
cp = CPU;
} else {
cp = cpu_get(ctx->kc_cpuid);
}
ASSERT(cp != NULL && cp->cpu_cpc_ctx == ctx);
kcpc_cpu_stop(cp, B_FALSE);
kpreempt_enable();
}
int
kcpc_unbind(kcpc_set_t *set)
{
kcpc_ctx_t *ctx;
kthread_t *t;
mutex_enter(&set->ks_lock);
while ((set->ks_state & KCPC_SET_BOUND) == 0)
cv_wait(&set->ks_condv, &set->ks_lock);
mutex_exit(&set->ks_lock);
ctx = set->ks_ctx;
mutex_enter(&ctx->kc_lock);
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID);
mutex_exit(&ctx->kc_lock);
if (ctx->kc_cpuid == -1) {
t = ctx->kc_thread;
if (t == curthread) {
int save_spl;
kpreempt_disable();
save_spl = spl_xcall();
if (!(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED))
kcpc_unprogram(ctx, B_TRUE);
splx(save_spl);
kpreempt_enable();
}
VERIFY3U(ctxop_remove(t, &kcpc_ctxop_tpl, ctx), !=, 0);
t->t_cpc_set = NULL;
t->t_cpc_ctx = NULL;
} else {
cpu_t *cp;
mutex_enter(&cpu_lock);
cp = cpu_get(ctx->kc_cpuid);
if (cp != NULL) {
mutex_enter(&cp->cpu_cpc_ctxlock);
if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0)
kcpc_stop_hw(ctx);
ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED);
mutex_exit(&cp->cpu_cpc_ctxlock);
}
mutex_exit(&cpu_lock);
if (ctx->kc_thread == curthread) {
kcpc_free(ctx, 0);
curthread->t_cpc_set = NULL;
}
}
return (0);
}
int
kcpc_preset(kcpc_set_t *set, int index, uint64_t preset)
{
int i;
ASSERT(set != NULL);
ASSERT(set->ks_state & KCPC_SET_BOUND);
ASSERT(set->ks_ctx->kc_thread == curthread);
ASSERT(set->ks_ctx->kc_cpuid == -1);
if (index < 0 || index >= set->ks_nreqs)
return (EINVAL);
for (i = 0; i < set->ks_nreqs; i++)
if (set->ks_req[i].kr_index == index)
break;
ASSERT(i != set->ks_nreqs);
set->ks_req[i].kr_preset = preset;
return (0);
}
int
kcpc_restart(kcpc_set_t *set)
{
kcpc_ctx_t *ctx = set->ks_ctx;
int i;
int save_spl;
ASSERT(set->ks_state & KCPC_SET_BOUND);
ASSERT(ctx->kc_thread == curthread);
ASSERT(ctx->kc_cpuid == -1);
for (i = 0; i < set->ks_nreqs; i++) {
*(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset;
pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset,
0, 0, NULL, &set->ks_req[i].kr_config, NULL);
}
kpreempt_disable();
save_spl = spl_xcall();
if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
pcbe_ops->pcbe_allstop();
ctx->kc_rawtick = KCPC_GET_TICK();
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE);
pcbe_ops->pcbe_program(ctx);
splx(save_spl);
kpreempt_enable();
return (0);
}
int
kcpc_enable(kthread_t *t, int cmd, int enable)
{
kcpc_ctx_t *ctx = t->t_cpc_ctx;
kcpc_set_t *set = t->t_cpc_set;
kcpc_set_t *newset;
int i;
int flag;
int err;
ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock));
if (ctx == NULL) {
ASSERT(t->t_cpc_set != NULL);
ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1);
return (EINVAL);
} else if (ctx->kc_flags & KCPC_CTX_INVALID)
return (EAGAIN);
if (cmd == CPC_ENABLE) {
if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
return (EINVAL);
kpreempt_disable();
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE);
kcpc_restore(ctx);
kpreempt_enable();
} else if (cmd == CPC_DISABLE) {
if (ctx->kc_flags & KCPC_CTX_FREEZE)
return (EINVAL);
kpreempt_disable();
kcpc_save(ctx);
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE);
kpreempt_enable();
} else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) {
flag = (cmd == CPC_USR_EVENTS) ?
CPC_COUNT_USER: CPC_COUNT_SYSTEM;
kpreempt_disable();
KCPC_CTX_FLAG_SET(ctx,
KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
pcbe_ops->pcbe_allstop();
kpreempt_enable();
for (i = 0; i < set->ks_nreqs; i++) {
set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data);
if (enable)
set->ks_req[i].kr_flags |= flag;
else
set->ks_req[i].kr_flags &= ~flag;
}
newset = kcpc_dup_set(set);
if (kcpc_unbind(set) != 0)
return (EINVAL);
t->t_cpc_set = newset;
if (kcpc_bind_thread(newset, t, &err) != 0) {
t->t_cpc_set = NULL;
kcpc_free_set(newset);
return (EINVAL);
}
} else
return (EINVAL);
return (0);
}
void *
kcpc_next_config(void *token, void *current, uint64_t **data)
{
int i;
kcpc_pic_t *pic;
kcpc_ctx_t *ctx = (kcpc_ctx_t *)token;
if (current == NULL) {
for (i = 0; i < cpc_ncounters; i++)
if (ctx->kc_pics[i].kp_req != NULL)
break;
if (i == cpc_ncounters)
return (NULL);
} else {
for (i = 0; i < cpc_ncounters; i++) {
pic = &ctx->kc_pics[i];
if (pic->kp_req != NULL &&
current == pic->kp_req->kr_config)
break;
}
for (i++; i < cpc_ncounters; i++) {
pic = &ctx->kc_pics[i];
if (pic->kp_req != NULL)
break;
}
if (i == cpc_ncounters)
return (NULL);
}
if (data != NULL) {
*data = ctx->kc_pics[i].kp_req->kr_data;
}
return (ctx->kc_pics[i].kp_req->kr_config);
}
kcpc_ctx_t *
kcpc_ctx_alloc(int kmem_flags)
{
kcpc_ctx_t *ctx;
long hash;
ctx = (kcpc_ctx_t *)kmem_zalloc(sizeof (kcpc_ctx_t), kmem_flags);
if (ctx == NULL)
return (NULL);
hash = CPC_HASH_CTX(ctx);
mutex_enter(&kcpc_ctx_llock[hash]);
ctx->kc_next = kcpc_ctx_list[hash];
kcpc_ctx_list[hash] = ctx;
mutex_exit(&kcpc_ctx_llock[hash]);
ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) *
cpc_ncounters, KM_SLEEP);
ctx->kc_cpuid = -1;
return (ctx);
}
static void
kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx)
{
kcpc_set_t *ks = ctx->kc_set, *cks;
int i, j;
int code;
ASSERT(ks != NULL);
if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0)
return;
cks = kmem_zalloc(sizeof (*cks), KM_SLEEP);
cks->ks_state &= ~KCPC_SET_BOUND;
cctx->kc_set = cks;
cks->ks_flags = ks->ks_flags;
cks->ks_nreqs = ks->ks_nreqs;
cks->ks_req = kmem_alloc(cks->ks_nreqs *
sizeof (kcpc_request_t), KM_SLEEP);
cks->ks_data = kmem_alloc(cks->ks_nreqs * sizeof (uint64_t),
KM_SLEEP);
cks->ks_ctx = cctx;
for (i = 0; i < cks->ks_nreqs; i++) {
cks->ks_req[i].kr_index = ks->ks_req[i].kr_index;
cks->ks_req[i].kr_picnum = ks->ks_req[i].kr_picnum;
(void) strncpy(cks->ks_req[i].kr_event,
ks->ks_req[i].kr_event, CPC_MAX_EVENT_LEN);
cks->ks_req[i].kr_preset = ks->ks_req[i].kr_preset;
cks->ks_req[i].kr_flags = ks->ks_req[i].kr_flags;
cks->ks_req[i].kr_nattrs = ks->ks_req[i].kr_nattrs;
if (ks->ks_req[i].kr_nattrs > 0) {
cks->ks_req[i].kr_attr =
kmem_alloc(ks->ks_req[i].kr_nattrs *
sizeof (kcpc_attr_t), KM_SLEEP);
}
for (j = 0; j < ks->ks_req[i].kr_nattrs; j++) {
(void) strncpy(cks->ks_req[i].kr_attr[j].ka_name,
ks->ks_req[i].kr_attr[j].ka_name,
CPC_MAX_ATTR_LEN);
cks->ks_req[i].kr_attr[j].ka_val =
ks->ks_req[i].kr_attr[j].ka_val;
}
}
if (kcpc_configure_reqs(cctx, cks, &code) != 0)
kcpc_invalidate_config(cctx);
mutex_enter(&cks->ks_lock);
cks->ks_state |= KCPC_SET_BOUND;
cv_signal(&cks->ks_condv);
mutex_exit(&cks->ks_lock);
}
void
kcpc_ctx_free(kcpc_ctx_t *ctx)
{
kcpc_ctx_t **loc;
long hash = CPC_HASH_CTX(ctx);
mutex_enter(&kcpc_ctx_llock[hash]);
loc = &kcpc_ctx_list[hash];
ASSERT(*loc != NULL);
while (*loc != ctx)
loc = &(*loc)->kc_next;
*loc = ctx->kc_next;
mutex_exit(&kcpc_ctx_llock[hash]);
kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t));
cv_destroy(&ctx->kc_condv);
mutex_destroy(&ctx->kc_lock);
kmem_free(ctx, sizeof (*ctx));
}
kcpc_ctx_t *
kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
{
kcpc_ctx_t *ctx;
kthread_t *t = curthread;
int i;
if (t->t_flag & T_INTR_THREAD) {
klwp_t *lwp;
atomic_inc_32(&kcpc_intrctx_count);
ctx = NULL;
if ((lwp = t->t_lwp) != NULL) {
t = lwptot(lwp);
ctx = t->t_cpc_ctx;
}
} else
ctx = t->t_cpc_ctx;
if (ctx == NULL) {
ctx = curthread->t_cpu->cpu_cpc_ctx;
if (ctx != NULL) {
return (ctx);
}
if (kcpc_nullctx_panic)
panic("null cpc context, thread %p", (void *)t);
#ifdef DEBUG
cmn_err(CE_NOTE,
"null cpc context found in overflow handler!\n");
#endif
atomic_inc_32(&kcpc_nullctx_count);
} else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
for (i = 0; i < cpc_ncounters; i++) {
if (ctx->kc_pics[i].kp_req != NULL &&
bitmap & (1 << i) &&
ctx->kc_pics[i].kp_req->kr_flags &
CPC_OVF_NOTIFY_EMT) {
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE);
atomic_or_uint(&ctx->kc_pics[i].kp_flags,
KCPC_PIC_OVERFLOWED);
}
}
aston(t);
} else if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) {
return (curthread->t_cpu->cpu_cpc_ctx);
}
return (NULL);
}
uint_t
kcpc_hw_overflow_intr(caddr_t arg1, caddr_t arg2)
{
kcpc_ctx_t *ctx;
uint64_t bitmap;
uint8_t *state;
int save_spl;
if (pcbe_ops == NULL ||
(bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0)
return (DDI_INTR_UNCLAIMED);
pcbe_ops->pcbe_allstop();
if (dtrace_cpc_in_use) {
state = &cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state;
if (atomic_cas_8(state, DCPC_INTR_FREE,
DCPC_INTR_PROCESSING) == DCPC_INTR_FREE) {
int i;
kcpc_request_t req;
ASSERT(dtrace_cpc_fire != NULL);
(*dtrace_cpc_fire)(bitmap);
ctx = curthread->t_cpu->cpu_cpc_ctx;
if (ctx == NULL) {
#ifdef DEBUG
cmn_err(CE_NOTE, "null cpc context in"
"hardware overflow handler!\n");
#endif
return (DDI_INTR_CLAIMED);
}
for (i = 0; i < ctx->kc_set->ks_nreqs; i++) {
req = ctx->kc_set->ks_req[i];
if (bitmap & (1 << req.kr_picnum)) {
pcbe_ops->pcbe_configure(req.kr_picnum,
req.kr_event, req.kr_preset,
req.kr_flags, req.kr_nattrs,
req.kr_attr, &(req.kr_config),
(void *)ctx);
}
}
pcbe_ops->pcbe_program(ctx);
cpu_core[CPU->cpu_id].cpuc_dcpc_intr_state =
DCPC_INTR_FREE;
membar_producer();
}
return (DDI_INTR_CLAIMED);
}
save_spl = spl_xcall();
if ((ctx = kcpc_overflow_intr(arg1, bitmap)) != NULL) {
uint64_t curtick = KCPC_GET_TICK();
ctx->kc_hrtime = gethrtime_waitfree();
ctx->kc_vtick += curtick - ctx->kc_rawtick;
ctx->kc_rawtick = curtick;
pcbe_ops->pcbe_sample(ctx);
pcbe_ops->pcbe_program(ctx);
}
splx(save_spl);
return (DDI_INTR_CLAIMED);
}
int
kcpc_overflow_ast()
{
kcpc_ctx_t *ctx = curthread->t_cpc_ctx;
int i;
int found = 0;
uint64_t curtick = KCPC_GET_TICK();
ASSERT(ctx != NULL);
kpreempt_disable();
ctx->kc_hrtime = gethrtime_waitfree();
pcbe_ops->pcbe_sample(ctx);
kpreempt_enable();
ctx->kc_vtick += curtick - ctx->kc_rawtick;
for (i = 0; i < cpc_ncounters; i++) {
if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) {
atomic_and_uint(&ctx->kc_pics[i].kp_flags,
~KCPC_PIC_OVERFLOWED);
found = 1;
}
}
if (found)
return (1);
kpreempt_disable();
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE);
pcbe_ops->pcbe_program(ctx);
kpreempt_enable();
return (0);
}
static void
kcpc_save(void *arg)
{
kcpc_ctx_t *ctx = arg;
int err;
int save_spl;
kpreempt_disable();
save_spl = spl_xcall();
if (ctx->kc_flags & KCPC_CTX_INVALID) {
if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) {
splx(save_spl);
kpreempt_enable();
return;
}
kcpc_unprogram(ctx, B_TRUE);
splx(save_spl);
kpreempt_enable();
return;
}
pcbe_ops->pcbe_allstop();
if (ctx->kc_flags & KCPC_CTX_FREEZE) {
splx(save_spl);
kpreempt_enable();
return;
}
ctx->kc_hrtime = gethrtime_waitfree();
ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick;
pcbe_ops->pcbe_sample(ctx);
ASSERT(ctx->kc_cpuid == -1);
cu_cpc_program(CPU, &err);
splx(save_spl);
kpreempt_enable();
}
static void
kcpc_restore(void *arg)
{
kcpc_ctx_t *ctx = arg;
int save_spl;
mutex_enter(&ctx->kc_lock);
if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) ==
KCPC_CTX_INVALID) {
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED);
}
if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) {
mutex_exit(&ctx->kc_lock);
return;
}
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_RESTORE);
mutex_exit(&ctx->kc_lock);
kpreempt_disable();
save_spl = spl_xcall();
kcpc_program(ctx, B_TRUE, B_TRUE);
splx(save_spl);
kpreempt_enable();
mutex_enter(&ctx->kc_lock);
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_RESTORE);
cv_signal(&ctx->kc_condv);
mutex_exit(&ctx->kc_lock);
}
static void
kcpc_idle_save(void *arg)
{
struct cpu *cp = arg;
ASSERT(CPU == cp);
mutex_enter(&cp->cpu_cpc_ctxlock);
if ((cp->cpu_cpc_ctx == NULL) ||
(cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) {
mutex_exit(&cp->cpu_cpc_ctxlock);
return;
}
pcbe_ops->pcbe_program(cp->cpu_cpc_ctx);
mutex_exit(&cp->cpu_cpc_ctxlock);
}
static void
kcpc_idle_restore(void *arg)
{
struct cpu *cp = arg;
ASSERT(CPU == cp);
mutex_enter(&cp->cpu_cpc_ctxlock);
if ((cp->cpu_cpc_ctx == NULL) ||
(cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) {
mutex_exit(&cp->cpu_cpc_ctxlock);
return;
}
pcbe_ops->pcbe_allstop();
mutex_exit(&cp->cpu_cpc_ctxlock);
}
static const struct ctxop_template kcpc_idle_ctxop_tpl = {
.ct_rev = CTXOP_TPL_REV,
.ct_save = kcpc_idle_save,
.ct_restore = kcpc_idle_restore,
};
void
kcpc_idle_ctxop_install(kthread_t *t, struct cpu *cp)
{
ctxop_install(t, &kcpc_idle_ctxop_tpl, cp);
}
static void
kcpc_lwp_create(void *parent, void *child)
{
kthread_t *t = parent, *ct = child;
kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx;
int i;
if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0)
return;
rw_enter(&kcpc_cpuctx_lock, RW_READER);
if (ctx->kc_flags & KCPC_CTX_INVALID) {
rw_exit(&kcpc_cpuctx_lock);
return;
}
cctx = kcpc_ctx_alloc(KM_SLEEP);
kcpc_ctx_clone(ctx, cctx);
rw_exit(&kcpc_cpuctx_lock);
KCPC_CTX_FLAG_SET(cctx, ctx->kc_flags);
cctx->kc_thread = ct;
cctx->kc_cpuid = -1;
ct->t_cpc_set = cctx->kc_set;
ct->t_cpc_ctx = cctx;
if (cctx->kc_flags & KCPC_CTX_SIGOVF) {
kcpc_set_t *ks = cctx->kc_set;
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_FREEZE);
for (i = 0; i < ks->ks_nreqs; i++) {
kcpc_request_t *kr = &ks->ks_req[i];
if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) {
*(kr->kr_data) = UINT64_MAX;
atomic_or_uint(&kr->kr_picp->kp_flags,
KCPC_PIC_OVERFLOWED);
}
}
ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
aston(ct);
}
ctxop_install(ct, &kcpc_ctxop_tpl, cctx);
}
void
kcpc_free(void *arg, int isexec)
{
kcpc_ctx_t *ctx = arg;
int i;
kcpc_set_t *set = ctx->kc_set;
ASSERT(set != NULL);
mutex_enter(&ctx->kc_lock);
while (ctx->kc_flags & KCPC_CTX_RESTORE)
cv_wait(&ctx->kc_condv, &ctx->kc_lock);
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID);
mutex_exit(&ctx->kc_lock);
if (isexec) {
if (ctx->kc_cpuid != -1) {
cpu_t *cp;
mutex_enter(&cpu_lock);
cp = cpu_get(ctx->kc_cpuid);
if (cp != NULL) {
mutex_enter(&cp->cpu_cpc_ctxlock);
kcpc_stop_hw(ctx);
mutex_exit(&cp->cpu_cpc_ctxlock);
}
mutex_exit(&cpu_lock);
ASSERT(curthread->t_cpc_ctx == NULL);
} else {
int save_spl;
kpreempt_disable();
save_spl = spl_xcall();
kcpc_unprogram(ctx, B_TRUE);
curthread->t_cpc_ctx = NULL;
splx(save_spl);
kpreempt_enable();
}
ASSERT(ctx->kc_thread == curthread);
curthread->t_cpc_set = NULL;
}
for (i = 0; i < set->ks_nreqs; i++) {
if (set->ks_req[i].kr_config != NULL)
pcbe_ops->pcbe_free(set->ks_req[i].kr_config);
}
kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
kcpc_ctx_free(ctx);
kcpc_free_set(set);
}
void
kcpc_free_cpu(kcpc_ctx_t *ctx)
{
kcpc_free(ctx, 0);
}
void
kcpc_free_set(kcpc_set_t *set)
{
int i;
kcpc_request_t *req;
ASSERT(set->ks_req != NULL);
for (i = 0; i < set->ks_nreqs; i++) {
req = &set->ks_req[i];
if (req->kr_nattrs != 0) {
kmem_free(req->kr_attr,
req->kr_nattrs * sizeof (kcpc_attr_t));
}
}
kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs);
cv_destroy(&set->ks_condv);
mutex_destroy(&set->ks_lock);
kmem_free(set, sizeof (kcpc_set_t));
}
void
kcpc_invalidate_all(void)
{
kcpc_ctx_t *ctx;
long hash;
for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) {
mutex_enter(&kcpc_ctx_llock[hash]);
for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next)
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID);
mutex_exit(&kcpc_ctx_llock[hash]);
}
}
void
kcpc_invalidate_config(void *token)
{
kcpc_ctx_t *ctx = token;
ASSERT(ctx != NULL);
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID);
}
void
kcpc_passivate(void)
{
kcpc_ctx_t *ctx = curthread->t_cpc_ctx;
kcpc_set_t *set = curthread->t_cpc_set;
int save_spl;
if (set == NULL)
return;
if (ctx == NULL) {
(void) kcpc_unbind(set);
ASSERT(curthread->t_cpc_set == NULL);
return;
}
kpreempt_disable();
save_spl = spl_xcall();
curthread->t_cpc_set = NULL;
if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) {
kcpc_unprogram(ctx, B_TRUE);
KCPC_CTX_FLAG_SET(ctx,
KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
}
curthread->t_cpc_ctx = NULL;
splx(save_spl);
kpreempt_enable();
}
int
kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx)
{
int i;
int *picnum_save;
ASSERT(set->ks_nreqs <= cpc_ncounters);
picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP);
for (i = 0; i < set->ks_nreqs; i++)
if (kcpc_tryassign(set, i, picnum_save) == 0)
break;
kmem_free(picnum_save, set->ks_nreqs * sizeof (int));
if (i == set->ks_nreqs)
return (-1);
return (0);
}
static int
kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch)
{
int i;
int j;
uint64_t bitmap = 0, resmap = 0;
uint64_t ctrmap;
for (i = 0; i < set->ks_nreqs; i++) {
scratch[i] = set->ks_req[i].kr_picnum;
if (set->ks_req[i].kr_picnum != -1)
resmap |= (1 << set->ks_req[i].kr_picnum);
}
i = starting_req;
do {
if (set->ks_req[i].kr_picnum != -1) {
ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0);
bitmap |= (1 << set->ks_req[i].kr_picnum);
if (++i == set->ks_nreqs)
i = 0;
continue;
}
ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event);
for (j = 0; j < cpc_ncounters; j++) {
if (ctrmap & (1 << j) && (bitmap & (1 << j)) == 0 &&
(resmap & (1 << j)) == 0) {
bitmap |= (1 << j);
break;
}
}
if (j == cpc_ncounters) {
for (i = 0; i < set->ks_nreqs; i++)
set->ks_req[i].kr_picnum = scratch[i];
return (-1);
}
set->ks_req[i].kr_picnum = j;
if (++i == set->ks_nreqs)
i = 0;
} while (i != starting_req);
return (0);
}
kcpc_set_t *
kcpc_dup_set(kcpc_set_t *set)
{
kcpc_set_t *new;
int i;
int j;
new = kmem_zalloc(sizeof (*new), KM_SLEEP);
new->ks_state &= ~KCPC_SET_BOUND;
new->ks_flags = set->ks_flags;
new->ks_nreqs = set->ks_nreqs;
new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t),
KM_SLEEP);
new->ks_data = NULL;
new->ks_ctx = NULL;
for (i = 0; i < new->ks_nreqs; i++) {
new->ks_req[i].kr_config = NULL;
new->ks_req[i].kr_index = set->ks_req[i].kr_index;
new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum;
new->ks_req[i].kr_picp = NULL;
new->ks_req[i].kr_data = NULL;
(void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event,
CPC_MAX_EVENT_LEN);
new->ks_req[i].kr_preset = set->ks_req[i].kr_preset;
new->ks_req[i].kr_flags = set->ks_req[i].kr_flags;
new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs;
new->ks_req[i].kr_attr = kmem_alloc(new->ks_req[i].kr_nattrs *
sizeof (kcpc_attr_t), KM_SLEEP);
for (j = 0; j < new->ks_req[i].kr_nattrs; j++) {
new->ks_req[i].kr_attr[j].ka_val =
set->ks_req[i].kr_attr[j].ka_val;
(void) strncpy(new->ks_req[i].kr_attr[j].ka_name,
set->ks_req[i].kr_attr[j].ka_name,
CPC_MAX_ATTR_LEN);
}
}
return (new);
}
int
kcpc_allow_nonpriv(void *token)
{
return (((kcpc_ctx_t *)token)->kc_flags & KCPC_CTX_NONPRIV);
}
void
kcpc_invalidate(kthread_t *t)
{
kcpc_ctx_t *ctx = t->t_cpc_ctx;
if (ctx != NULL)
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID);
}
int
kcpc_pcbe_tryload(const char *prefix, uint_t first, uint_t second, uint_t third)
{
uint_t s[3];
s[0] = first;
s[1] = second;
s[2] = third;
return (modload_qualified("pcbe",
"pcbe", prefix, ".", s, 3, NULL) < 0 ? -1 : 0);
}
int
kcpc_cpu_ctx_create(cpu_t *cp, kcpc_request_list_t *req_list, int kmem_flags,
kcpc_ctx_t ***ctx_ptr_array, size_t *ctx_ptr_array_sz)
{
kcpc_ctx_t **ctx_ptrs;
int nctx;
int nctx_ptrs;
int nreqs;
kcpc_request_t *reqs;
if (cp == NULL || ctx_ptr_array == NULL || ctx_ptr_array_sz == NULL ||
req_list == NULL || req_list->krl_cnt < 1)
return (-1);
nreqs = req_list->krl_cnt;
nctx_ptrs = (nreqs + cpc_ncounters - 1) / cpc_ncounters;
ctx_ptrs = kmem_zalloc(nctx_ptrs * sizeof (kcpc_ctx_t *), kmem_flags);
if (ctx_ptrs == NULL)
return (-2);
nctx = 0;
reqs = req_list->krl_list;
while (nreqs > 0) {
kcpc_ctx_t *ctx;
kcpc_set_t *set;
int subcode;
ctx = kcpc_ctx_alloc(kmem_flags);
set = kcpc_set_create(reqs, nreqs, 0, kmem_flags);
if (set == NULL) {
kcpc_ctx_free(ctx);
break;
}
if (kcpc_assign_reqs(set, ctx) != 0) {
kcpc_free_set(set);
set = kcpc_set_create(reqs, 1, 0, kmem_flags);
if (set == NULL) {
kcpc_ctx_free(ctx);
break;
}
if (kcpc_assign_reqs(set, ctx) != 0) {
#ifdef DEBUG
cmn_err(CE_NOTE, "!kcpc_cpu_ctx_create: can't "
"assign counter event %s!\n",
set->ks_req->kr_event);
#endif
kcpc_free_set(set);
kcpc_ctx_free(ctx);
reqs++;
nreqs--;
continue;
}
}
set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t),
kmem_flags);
if (set->ks_data == NULL) {
kcpc_free_set(set);
kcpc_ctx_free(ctx);
break;
}
if (kcpc_configure_reqs(ctx, set, &subcode) != 0) {
#ifdef DEBUG
cmn_err(CE_NOTE,
"!kcpc_cpu_ctx_create: can't configure "
"set of counter event requests!\n");
#endif
reqs += set->ks_nreqs;
nreqs -= set->ks_nreqs;
kmem_free(set->ks_data,
set->ks_nreqs * sizeof (uint64_t));
kcpc_free_set(set);
kcpc_ctx_free(ctx);
continue;
}
set->ks_ctx = ctx;
ctx->kc_set = set;
ctx->kc_cpuid = cp->cpu_id;
ctx->kc_thread = curthread;
ctx_ptrs[nctx] = ctx;
reqs += set->ks_nreqs;
nreqs -= set->ks_nreqs;
nctx++;
if (nctx >= nctx_ptrs) {
kcpc_ctx_t **new;
int new_cnt;
new_cnt = nctx_ptrs +
((nreqs + cpc_ncounters - 1) / cpc_ncounters);
new = kmem_zalloc(new_cnt * sizeof (kcpc_ctx_t *),
kmem_flags);
if (new == NULL)
break;
bcopy(ctx_ptrs, new,
nctx_ptrs * sizeof (kcpc_ctx_t *));
kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *));
ctx_ptrs = new;
nctx_ptrs = new_cnt;
}
}
if (nctx == 0) {
kmem_free(ctx_ptrs, nctx_ptrs * sizeof (kcpc_ctx_t *));
*ctx_ptr_array = NULL;
*ctx_ptr_array_sz = 0;
return (-2);
}
*ctx_ptr_array = ctx_ptrs;
*ctx_ptr_array_sz = nctx_ptrs * sizeof (kcpc_ctx_t *);
return (nctx);
}
boolean_t
kcpc_event_supported(char *event)
{
if (pcbe_ops == NULL || pcbe_ops->pcbe_event_coverage(event) == 0)
return (B_FALSE);
return (B_TRUE);
}
void
kcpc_program(kcpc_ctx_t *ctx, boolean_t for_thread, boolean_t cu_interpose)
{
int error;
ASSERT(IS_HIPIL());
ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id ||
ctx->kc_cpuid == -1) && curthread->t_preempt > 0);
if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id &&
ctx->kc_cpuid != -1) || curthread->t_preempt < 1)
return;
if (cu_interpose == B_TRUE) {
cu_cpc_unprogram(CPU, &error);
} else {
kcpc_set_t *set = ctx->kc_set;
int i;
ASSERT(set != NULL);
for (i = 0; i < set->ks_nreqs; i++) {
*(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset;
pcbe_ops->pcbe_configure(0, NULL,
set->ks_req[i].kr_preset,
0, 0, NULL, &set->ks_req[i].kr_config, NULL);
}
}
ctx->kc_rawtick = KCPC_GET_TICK();
pcbe_ops->pcbe_program(ctx);
if (for_thread == B_TRUE)
KCPC_CTX_FLAG_CLR(ctx, KCPC_CTX_FREEZE);
else
CPU->cpu_cpc_ctx = ctx;
}
void
kcpc_unprogram(kcpc_ctx_t *ctx, boolean_t cu_interpose)
{
int error;
ASSERT(IS_HIPIL());
ASSERT(ctx != NULL && (ctx->kc_cpuid == CPU->cpu_id ||
ctx->kc_cpuid == -1) && curthread->t_preempt > 0);
if (ctx == NULL || (ctx->kc_cpuid != CPU->cpu_id &&
ctx->kc_cpuid != -1) || curthread->t_preempt < 1 ||
(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) != 0) {
return;
}
ASSERT(CPU->cpu_cpc_ctx == ctx || curthread->t_cpc_ctx == ctx);
pcbe_ops->pcbe_allstop();
KCPC_CTX_FLAG_SET(ctx, KCPC_CTX_INVALID_STOPPED);
if (cu_interpose == B_TRUE)
cu_cpc_program(CPU, &error);
}
int
kcpc_read(kcpc_update_func_t update_func)
{
kcpc_ctx_t *ctx;
int i;
kcpc_request_t *req;
int retval;
kcpc_set_t *set;
ASSERT(IS_HIPIL());
kpreempt_disable();
ctx = CPU->cpu_cpc_ctx;
if (ctx == NULL) {
kpreempt_enable();
return (0);
}
pcbe_ops->pcbe_sample(ctx);
set = ctx->kc_set;
if (set == NULL || set->ks_req == NULL) {
kpreempt_enable();
return (0);
}
req = set->ks_req;
retval = 0;
for (i = 0; i < set->ks_nreqs; i++) {
int ret;
if (req[i].kr_data == NULL)
break;
ret = update_func(req[i].kr_ptr, *req[i].kr_data);
if (ret < 0)
retval = ret;
}
kpreempt_enable();
return (retval);
}
kcpc_request_list_t *
kcpc_reqs_init(int nreqs, int kmem_flags)
{
kcpc_request_list_t *req_list;
kcpc_request_t *reqs;
if (nreqs < 1)
return (NULL);
req_list = kmem_zalloc(sizeof (kcpc_request_list_t), kmem_flags);
if (req_list == NULL)
return (NULL);
reqs = kmem_zalloc(nreqs * sizeof (kcpc_request_t), kmem_flags);
if (reqs == NULL) {
kmem_free(req_list, sizeof (kcpc_request_list_t));
return (NULL);
}
req_list->krl_list = reqs;
req_list->krl_cnt = 0;
req_list->krl_max = nreqs;
return (req_list);
}
int
kcpc_reqs_add(kcpc_request_list_t *req_list, char *event, uint64_t preset,
uint_t flags, uint_t nattrs, kcpc_attr_t *attr, void *ptr, int kmem_flags)
{
kcpc_request_t *req;
if (req_list == NULL || req_list->krl_list == NULL)
return (-1);
ASSERT(req_list->krl_max != 0);
if (req_list->krl_cnt > req_list->krl_max) {
kcpc_request_t *new;
kcpc_request_t *old;
old = req_list->krl_list;
new = kmem_zalloc((req_list->krl_max +
cpc_ncounters) * sizeof (kcpc_request_t), kmem_flags);
if (new == NULL)
return (-2);
req_list->krl_list = new;
bcopy(old, req_list->krl_list,
req_list->krl_cnt * sizeof (kcpc_request_t));
kmem_free(old, req_list->krl_max * sizeof (kcpc_request_t));
req_list->krl_cnt = 0;
req_list->krl_max += cpc_ncounters;
}
req = &req_list->krl_list[req_list->krl_cnt];
req->kr_config = NULL;
req->kr_picnum = -1;
req->kr_index = -1;
req->kr_data = NULL;
(void) strcpy(req->kr_event, event);
req->kr_preset = preset;
req->kr_flags = flags;
req->kr_nattrs = nattrs;
req->kr_attr = attr;
req->kr_ptr = ptr;
req_list->krl_cnt++;
return (0);
}
int
kcpc_reqs_reset(kcpc_request_list_t *req_list)
{
if (req_list == NULL || req_list->krl_list == NULL ||
req_list->krl_max <= 0)
return (-1);
bzero(req_list->krl_list, req_list->krl_max * sizeof (kcpc_request_t));
req_list->krl_cnt = 0;
return (0);
}
int
kcpc_reqs_fini(kcpc_request_list_t *req_list)
{
kmem_free(req_list->krl_list,
req_list->krl_max * sizeof (kcpc_request_t));
kmem_free(req_list, sizeof (kcpc_request_list_t));
return (0);
}
static kcpc_set_t *
kcpc_set_create(kcpc_request_t *reqs, int nreqs, int set_flags, int kmem_flags)
{
int i;
kcpc_set_t *set;
set = kmem_zalloc(sizeof (kcpc_set_t), kmem_flags);
if (set == NULL)
return (NULL);
if (nreqs < cpc_ncounters)
set->ks_nreqs = nreqs;
else
set->ks_nreqs = cpc_ncounters;
set->ks_flags = set_flags;
set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) *
set->ks_nreqs, kmem_flags);
if (set->ks_req == NULL) {
kmem_free(set, sizeof (kcpc_set_t));
return (NULL);
}
bcopy(reqs, set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs);
for (i = 0; i < set->ks_nreqs; i++)
set->ks_req[i].kr_index = i;
return (set);
}
static void
kcpc_cpustop_func(uintptr_t arg1, uintptr_t arg2 __unused)
{
boolean_t preserve_context;
kpreempt_disable();
preserve_context = (boolean_t)arg1;
if (CPU->cpu_cpc_ctx == NULL) {
kpreempt_enable();
return;
}
kcpc_unprogram(CPU->cpu_cpc_ctx, B_TRUE);
ASSERT(!preserve_context || !CU_CPC_ON(CPU));
if (!preserve_context && CPU->cpu_cpc_ctx != NULL && !CU_CPC_ON(CPU))
CPU->cpu_cpc_ctx = NULL;
kpreempt_enable();
}
void
kcpc_cpu_stop(cpu_t *cp, boolean_t preserve_context)
{
cpu_call(cp, kcpc_cpustop_func, preserve_context, 0);
}
static void
kcpc_remoteprogram_func(uintptr_t arg1, uintptr_t arg2)
{
kcpc_ctx_t *ctx = (kcpc_ctx_t *)arg1;
boolean_t for_thread = (boolean_t)arg2;
ASSERT(ctx != NULL);
kpreempt_disable();
kcpc_program(ctx, for_thread, B_TRUE);
kpreempt_enable();
}
void
kcpc_cpu_program(cpu_t *cp, kcpc_ctx_t *ctx)
{
cpu_call(cp, kcpc_remoteprogram_func, (uintptr_t)ctx,
(uintptr_t)B_FALSE);
}
char *
kcpc_list_attrs(void)
{
ASSERT(pcbe_ops != NULL);
return (pcbe_ops->pcbe_list_attrs());
}
char *
kcpc_list_events(uint_t pic)
{
ASSERT(pcbe_ops != NULL);
return (pcbe_ops->pcbe_list_events(pic));
}
uint_t
kcpc_pcbe_capabilities(void)
{
ASSERT(pcbe_ops != NULL);
return (pcbe_ops->pcbe_caps);
}
int
kcpc_pcbe_loaded(void)
{
return (pcbe_ops == NULL ? -1 : 0);
}