#include <sys/types.h>
#include <sys/sysmacros.h>
#include <sys/buf.h>
#include <sys/errno.h>
#include <sys/modctl.h>
#include <sys/conf.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/proc.h>
#include <sys/cpuvar.h>
#include <sys/ddi_impldefs.h>
#include <sys/ddi.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/fm/io/ddi.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sunddi.h>
#include <sys/sunndi.h>
#include <sys/debug.h>
#include <sys/bofi.h>
#include <sys/bofi_impl.h>
#ifdef __sparc
#include <sys/dvma.h>
#endif
extern void *bp_mapin_common(struct buf *, int);
static int bofi_ddi_check;
static int bofi_sync_check;
static int bofi_range_check;
static struct bofi_link bofi_link_array[BOFI_NLINKS], *bofi_link_freelist;
#define LLSZMASK (sizeof (uint64_t)-1)
#define HDL_HASH_TBL_SIZE 64
static struct bofi_shadow hhash_table[HDL_HASH_TBL_SIZE];
static struct bofi_shadow dhash_table[HDL_HASH_TBL_SIZE];
#define HDL_DHASH(x) \
(&dhash_table[((uintptr_t)(x) >> 3) & (HDL_HASH_TBL_SIZE-1)])
#define HDL_HHASH(x) \
(&hhash_table[((uintptr_t)(x) >> 5) & (HDL_HASH_TBL_SIZE-1)])
static struct bofi_shadow shadow_list;
static struct bofi_errent *errent_listp;
static char driver_list[NAMESIZE];
static int driver_list_size;
static int driver_list_neg;
static char nexus_name[NAMESIZE];
static int initialized = 0;
#define NCLONES 2560
static int clone_tab[NCLONES];
static dev_info_t *our_dip;
static kmutex_t bofi_mutex;
static kmutex_t clone_tab_mutex;
static kmutex_t bofi_low_mutex;
static ddi_iblock_cookie_t bofi_low_cookie;
static uint_t bofi_signal(caddr_t arg);
static int bofi_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
static int bofi_attach(dev_info_t *, ddi_attach_cmd_t);
static int bofi_detach(dev_info_t *, ddi_detach_cmd_t);
static int bofi_open(dev_t *, int, int, cred_t *);
static int bofi_close(dev_t, int, int, cred_t *);
static int bofi_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
static int bofi_errdef_alloc(struct bofi_errdef *, char *,
struct bofi_errent *);
static int bofi_errdef_free(struct bofi_errent *);
static void bofi_start(struct bofi_errctl *, char *);
static void bofi_stop(struct bofi_errctl *, char *);
static void bofi_broadcast(struct bofi_errctl *, char *);
static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
static void bofi_clear_errors(struct bofi_errctl *, char *);
static void bofi_clear_errdefs(struct bofi_errctl *, char *);
static int bofi_errdef_check(struct bofi_errstate *,
struct acc_log_elem **);
static int bofi_errdef_check_w(struct bofi_errstate *,
struct acc_log_elem **);
static int bofi_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
off_t, off_t, caddr_t *);
static int bofi_dma_allochdl(dev_info_t *, dev_info_t *,
ddi_dma_attr_t *, int (*)(caddr_t), caddr_t,
ddi_dma_handle_t *);
static int bofi_dma_freehdl(dev_info_t *, dev_info_t *,
ddi_dma_handle_t);
static int bofi_dma_bindhdl(dev_info_t *, dev_info_t *,
ddi_dma_handle_t, struct ddi_dma_req *, ddi_dma_cookie_t *,
uint_t *);
static int bofi_dma_unbindhdl(dev_info_t *, dev_info_t *,
ddi_dma_handle_t);
static int bofi_dma_flush(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
off_t, size_t, uint_t);
static int bofi_dma_ctl(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
enum ddi_dma_ctlops, off_t *, size_t *, caddr_t *, uint_t);
static int bofi_dma_win(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
static int bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip,
ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp,
void *result);
static int bofi_fm_ereport_callback(sysevent_t *ev, void *cookie);
evchan_t *bofi_error_chan;
#define FM_SIMULATED_DMA "simulated.dma"
#define FM_SIMULATED_PIO "simulated.pio"
#if defined(__sparc)
static void bofi_dvma_kaddr_load(ddi_dma_handle_t, caddr_t, uint_t,
uint_t, ddi_dma_cookie_t *);
static void bofi_dvma_unload(ddi_dma_handle_t, uint_t, uint_t);
static void bofi_dvma_sync(ddi_dma_handle_t, uint_t, uint_t);
static void bofi_dvma_reserve(dev_info_t *, ddi_dma_handle_t);
#endif
static int driver_under_test(dev_info_t *);
static int bofi_check_acc_hdl(ddi_acc_impl_t *);
static int bofi_check_dma_hdl(ddi_dma_impl_t *);
static int bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventhdl, void *impl_data);
static struct bus_ops bofi_bus_ops = {
BUSO_REV,
bofi_map,
NULL,
NULL,
NULL,
i_ddi_map_fault,
NULL,
bofi_dma_allochdl,
bofi_dma_freehdl,
bofi_dma_bindhdl,
bofi_dma_unbindhdl,
bofi_dma_flush,
bofi_dma_win,
bofi_dma_ctl,
NULL,
ddi_bus_prop_op,
ndi_busop_get_eventcookie,
ndi_busop_add_eventcall,
ndi_busop_remove_eventcall,
bofi_post_event,
NULL,
0,
0,
0,
0,
0,
0,
0,
bofi_intr_ops
};
static struct cb_ops bofi_cb_ops = {
bofi_open,
bofi_close,
nodev,
nodev,
nodev,
nodev,
nodev,
bofi_ioctl,
nodev,
nodev,
nodev,
nochpoll,
ddi_prop_op,
NULL,
D_MP,
CB_REV,
nodev,
nodev
};
static struct dev_ops bofi_ops = {
DEVO_REV,
0,
bofi_getinfo,
nulldev,
nulldev,
bofi_attach,
bofi_detach,
nulldev,
&bofi_cb_ops,
(struct bus_ops *)NULL,
nulldev,
ddi_quiesce_not_needed,
};
static void *statep;
static struct modldrv modldrv = {
&mod_driverops,
"bofi driver",
&bofi_ops
};
static struct modlinkage modlinkage = {
MODREV_1,
&modldrv,
0
};
static struct bus_ops save_bus_ops;
#if defined(__sparc)
static struct dvma_ops bofi_dvma_ops = {
DVMAO_REV,
bofi_dvma_kaddr_load,
bofi_dvma_unload,
bofi_dvma_sync
};
#endif
static caddr_t
dmareq_mapin(offset_t len, caddr_t addr, struct as *as, int flag)
{
struct buf buf;
struct proc proc;
buf.b_flags = B_PHYS;
buf.b_un.b_addr = (caddr_t)addr;
buf.b_bcount = (size_t)len;
proc.p_as = as;
buf.b_proc = &proc;
return (bp_mapin_common(&buf, flag));
}
static caddr_t
dmareq_pp_mapin(offset_t len, uint_t offset, page_t *pp, int flag)
{
struct buf buf;
buf.b_flags = B_PAGEIO;
buf.b_un.b_addr = (caddr_t)(uintptr_t)offset;
buf.b_bcount = (size_t)len;
buf.b_pages = pp;
return (bp_mapin_common(&buf, flag));
}
static caddr_t
dmareq_pplist_mapin(uint_t len, caddr_t addr, page_t **pplist, struct as *as,
int flag)
{
struct buf buf;
struct proc proc;
buf.b_flags = B_PHYS|B_SHADOW;
buf.b_un.b_addr = addr;
buf.b_bcount = len;
buf.b_shadow = pplist;
proc.p_as = as;
buf.b_proc = &proc;
return (bp_mapin_common(&buf, flag));
}
static caddr_t
ddi_dmareq_mapin(struct ddi_dma_req *dmareqp, caddr_t *mapaddrp,
offset_t *lenp)
{
int sleep = (dmareqp->dmar_fp == DDI_DMA_SLEEP) ? VM_SLEEP: VM_NOSLEEP;
*lenp = dmareqp->dmar_object.dmao_size;
if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
*mapaddrp = dmareq_pp_mapin(dmareqp->dmar_object.dmao_size,
dmareqp->dmar_object.dmao_obj.pp_obj.pp_offset,
dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp, sleep);
return (*mapaddrp);
} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
*mapaddrp = dmareq_pplist_mapin(dmareqp->dmar_object.dmao_size,
dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
dmareqp->dmar_object.dmao_obj.virt_obj.v_priv,
dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
return (*mapaddrp);
} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == &kas) {
*mapaddrp = NULL;
return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_as == NULL) {
*mapaddrp = NULL;
return (dmareqp->dmar_object.dmao_obj.virt_obj.v_addr);
} else {
*mapaddrp = dmareq_mapin(dmareqp->dmar_object.dmao_size,
dmareqp->dmar_object.dmao_obj.virt_obj.v_addr,
dmareqp->dmar_object.dmao_obj.virt_obj.v_as, sleep);
return (*mapaddrp);
}
}
static void
ddi_dmareq_mapout(caddr_t addr, offset_t len, int map_flags, page_t *pp,
page_t **pplist)
{
struct buf buf;
if (addr == NULL)
return;
buf.b_flags = B_REMAPPED | map_flags;
buf.b_un.b_addr = addr;
buf.b_bcount = (size_t)len;
buf.b_pages = pp;
buf.b_shadow = pplist;
bp_mapout(&buf);
}
static time_t
bofi_gettime()
{
timestruc_t ts;
gethrestime(&ts);
return (ts.tv_sec);
}
static int
reset_bus_ops(char *name, struct bus_ops *bop)
{
struct modctl *modp;
struct modldrv *mp;
struct bus_ops *bp;
struct dev_ops *ops;
mutex_enter(&mod_lock);
modp = &modules;
do {
if (strcmp(name, modp->mod_modname) == 0) {
if (!modp->mod_linkage) {
mutex_exit(&mod_lock);
return (0);
}
mp = modp->mod_linkage->ml_linkage[0];
if (!mp || !mp->drv_dev_ops) {
mutex_exit(&mod_lock);
return (0);
}
ops = mp->drv_dev_ops;
bp = ops->devo_bus_ops;
if (!bp) {
mutex_exit(&mod_lock);
return (0);
}
if (ops->devo_refcnt > 0) {
mutex_exit(&mod_lock);
return (0);
}
cmn_err(CE_NOTE, "bofi reset bus_ops for %s",
mp->drv_linkinfo);
bp->bus_intr_op = bop->bus_intr_op;
bp->bus_post_event = bop->bus_post_event;
bp->bus_map = bop->bus_map;
bp->bus_dma_map = bop->bus_dma_map;
bp->bus_dma_allochdl = bop->bus_dma_allochdl;
bp->bus_dma_freehdl = bop->bus_dma_freehdl;
bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
bp->bus_dma_flush = bop->bus_dma_flush;
bp->bus_dma_win = bop->bus_dma_win;
bp->bus_dma_ctl = bop->bus_dma_ctl;
mutex_exit(&mod_lock);
return (1);
}
} while ((modp = modp->mod_next) != &modules);
mutex_exit(&mod_lock);
return (0);
}
static int
modify_bus_ops(char *name, struct bus_ops *bop)
{
struct modctl *modp;
struct modldrv *mp;
struct bus_ops *bp;
struct dev_ops *ops;
if (ddi_name_to_major(name) == -1)
return (0);
mutex_enter(&mod_lock);
modp = &modules;
do {
if (strcmp(name, modp->mod_modname) == 0) {
if (!modp->mod_linkage) {
mutex_exit(&mod_lock);
return (0);
}
mp = modp->mod_linkage->ml_linkage[0];
if (!mp || !mp->drv_dev_ops) {
mutex_exit(&mod_lock);
return (0);
}
ops = mp->drv_dev_ops;
bp = ops->devo_bus_ops;
if (!bp) {
mutex_exit(&mod_lock);
return (0);
}
if (ops->devo_refcnt == 0) {
mutex_exit(&mod_lock);
return (0);
}
cmn_err(CE_NOTE, "bofi modify bus_ops for %s",
mp->drv_linkinfo);
save_bus_ops = *bp;
bp->bus_intr_op = bop->bus_intr_op;
bp->bus_post_event = bop->bus_post_event;
bp->bus_map = bop->bus_map;
bp->bus_dma_map = bop->bus_dma_map;
bp->bus_dma_allochdl = bop->bus_dma_allochdl;
bp->bus_dma_freehdl = bop->bus_dma_freehdl;
bp->bus_dma_bindhdl = bop->bus_dma_bindhdl;
bp->bus_dma_unbindhdl = bop->bus_dma_unbindhdl;
bp->bus_dma_flush = bop->bus_dma_flush;
bp->bus_dma_win = bop->bus_dma_win;
bp->bus_dma_ctl = bop->bus_dma_ctl;
mutex_exit(&mod_lock);
return (1);
}
} while ((modp = modp->mod_next) != &modules);
mutex_exit(&mod_lock);
return (0);
}
int
_init(void)
{
int e;
e = ddi_soft_state_init(&statep, sizeof (struct bofi_errent), 1);
if (e != 0)
return (e);
if ((e = mod_install(&modlinkage)) != 0)
ddi_soft_state_fini(&statep);
return (e);
}
int
_fini(void)
{
int e;
if ((e = mod_remove(&modlinkage)) != 0)
return (e);
ddi_soft_state_fini(&statep);
return (e);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
static int
bofi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
char *name;
char buf[80];
int i;
int s, ss;
int size = NAMESIZE;
int new_string;
char *ptr;
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
if (ddi_get_instance(dip) > 0)
return (DDI_FAILURE);
if (!initialized) {
if ((name = ddi_get_name(dip)) == NULL)
return (DDI_FAILURE);
(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
if (ddi_create_minor_node(dip, buf, S_IFCHR, 0,
DDI_PSEUDO, 0) == DDI_FAILURE)
return (DDI_FAILURE);
if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_MED,
&bofi_low_cookie) != DDI_SUCCESS) {
ddi_remove_minor_node(dip, buf);
return (DDI_FAILURE);
}
if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
"bofi-nexus", nexus_name, &size) != DDI_PROP_SUCCESS) {
ddi_remove_minor_node(dip, buf);
return (DDI_FAILURE);
}
if ((bofi_range_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
dip, 0, "bofi-range-check", &ptr)) != DDI_PROP_SUCCESS)
bofi_range_check = 0;
else if (strcmp(ptr, "panic") == 0)
bofi_range_check = 2;
else if (strcmp(ptr, "warn") == 0)
bofi_range_check = 1;
else
bofi_range_check = 0;
ddi_prop_free(ptr);
if ((bofi_ddi_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
dip, 0, "bofi-ddi-check", &ptr)) != DDI_PROP_SUCCESS)
bofi_ddi_check = 0;
else if (strcmp(ptr, "on") == 0)
bofi_ddi_check = 1;
else
bofi_ddi_check = 0;
ddi_prop_free(ptr);
if ((bofi_sync_check = ddi_prop_lookup_string(DDI_DEV_T_ANY,
dip, 0, "bofi-sync-check", &ptr)) != DDI_PROP_SUCCESS)
bofi_sync_check = 0;
else if (strcmp(ptr, "on") == 0)
bofi_sync_check = 1;
else
bofi_sync_check = 0;
ddi_prop_free(ptr);
size = NAMESIZE;
if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 0,
"bofi-to-test", driver_list, &size) != DDI_PROP_SUCCESS)
driver_list[0] = 0;
driver_list_neg = 1;
new_string = 1;
driver_list_size = strlen(driver_list);
for (i = 0; i < driver_list_size; i++) {
if (driver_list[i] == ' ') {
driver_list[i] = '\0';
new_string = 1;
} else if (new_string) {
if (driver_list[i] != '!')
driver_list_neg = 0;
new_string = 0;
}
}
mutex_init(&clone_tab_mutex, NULL, MUTEX_DRIVER,
NULL);
ss = spl8();
s = spl8();
splx(ss);
mutex_init(&bofi_mutex, NULL, MUTEX_SPIN, (void *)(uintptr_t)s);
mutex_init(&bofi_low_mutex, NULL, MUTEX_DRIVER,
(void *)bofi_low_cookie);
shadow_list.next = &shadow_list;
shadow_list.prev = &shadow_list;
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
hhash_table[i].hnext = &hhash_table[i];
hhash_table[i].hprev = &hhash_table[i];
dhash_table[i].dnext = &dhash_table[i];
dhash_table[i].dprev = &dhash_table[i];
}
for (i = 1; i < BOFI_NLINKS; i++)
bofi_link_array[i].link = &bofi_link_array[i-1];
bofi_link_freelist = &bofi_link_array[BOFI_NLINKS - 1];
if (modify_bus_ops(nexus_name, &bofi_bus_ops) == 0) {
ddi_remove_minor_node(dip, buf);
mutex_destroy(&clone_tab_mutex);
mutex_destroy(&bofi_mutex);
mutex_destroy(&bofi_low_mutex);
return (DDI_FAILURE);
}
if (sysevent_evc_bind(FM_ERROR_CHAN, &bofi_error_chan, 0) == 0)
(void) sysevent_evc_subscribe(bofi_error_chan, "bofi",
EC_FM, bofi_fm_ereport_callback, NULL, 0);
our_dip = dip;
ddi_report_dev(dip);
initialized = 1;
}
return (DDI_SUCCESS);
}
static int
bofi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
char *name;
char buf[80];
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
if (ddi_get_instance(dip) > 0)
return (DDI_FAILURE);
if ((name = ddi_get_name(dip)) == NULL)
return (DDI_FAILURE);
(void) snprintf(buf, sizeof (buf), "%s,ctl", name);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
if (shadow_list.next != &shadow_list || errent_listp != NULL) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (DDI_FAILURE);
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (reset_bus_ops(nexus_name, &save_bus_ops) == 0)
return (DDI_FAILURE);
(void) sysevent_evc_unbind(bofi_error_chan);
mutex_destroy(&clone_tab_mutex);
mutex_destroy(&bofi_mutex);
mutex_destroy(&bofi_low_mutex);
ddi_remove_minor_node(dip, buf);
our_dip = NULL;
initialized = 0;
return (DDI_SUCCESS);
}
static int
bofi_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
{
dev_t dev = (dev_t)arg;
int minor = (int)getminor(dev);
int retval;
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
if (minor != 0 || our_dip == NULL) {
*result = (void *)NULL;
retval = DDI_FAILURE;
} else {
*result = (void *)our_dip;
retval = DDI_SUCCESS;
}
break;
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)0;
retval = DDI_SUCCESS;
break;
default:
retval = DDI_FAILURE;
}
return (retval);
}
static int
bofi_open(dev_t *devp, int flag, int otyp, cred_t *credp)
{
int minor = (int)getminor(*devp);
struct bofi_errent *softc;
if (minor != 0)
return (ENXIO);
if (!initialized)
return (ENXIO);
mutex_enter(&clone_tab_mutex);
for (minor = 1; minor < NCLONES; minor++) {
if (clone_tab[minor] == 0) {
clone_tab[minor] = 1;
break;
}
}
mutex_exit(&clone_tab_mutex);
if (minor == NCLONES)
return (EAGAIN);
if (ddi_soft_state_zalloc(statep, minor) != DDI_SUCCESS) {
mutex_enter(&clone_tab_mutex);
clone_tab[minor] = 0;
mutex_exit(&clone_tab_mutex);
return (EAGAIN);
}
softc = ddi_get_soft_state(statep, minor);
softc->cnext = softc;
softc->cprev = softc;
*devp = makedevice(getmajor(*devp), minor);
return (0);
}
static int
bofi_close(dev_t dev, int flag, int otyp, cred_t *credp)
{
int minor = (int)getminor(dev);
struct bofi_errent *softc;
struct bofi_errent *ep, *next_ep;
softc = ddi_get_soft_state(statep, minor);
if (softc == NULL)
return (ENXIO);
for (ep = softc->cnext; ep != softc; ) {
next_ep = ep->cnext;
(void) bofi_errdef_free(ep);
ep = next_ep;
}
mutex_enter(&clone_tab_mutex);
clone_tab[minor] = 0;
mutex_exit(&clone_tab_mutex);
ddi_soft_state_free(statep, minor);
return (0);
}
static int
bofi_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
int *rvalp)
{
struct bofi_errent *softc;
int minor = (int)getminor(dev);
struct bofi_errdef errdef;
struct bofi_errctl errctl;
struct bofi_errstate errstate;
void *ed_handle;
struct bofi_get_handles get_handles;
struct bofi_get_hdl_info hdl_info;
struct handle_info *hdlip;
struct handle_info *hib;
char *buffer;
char *bufptr;
char *endbuf;
int req_count, count, err;
char *namep;
struct bofi_shadow *hp;
int retval;
struct bofi_shadow *hhashp;
int i;
switch (cmd) {
case BOFI_ADD_DEF:
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errdef32 errdef_32;
if (ddi_copyin((void *)arg, &errdef_32,
sizeof (struct bofi_errdef32), mode)) {
return (EFAULT);
}
errdef.namesize = errdef_32.namesize;
(void) strncpy(errdef.name, errdef_32.name, NAMESIZE);
errdef.instance = errdef_32.instance;
errdef.rnumber = errdef_32.rnumber;
errdef.offset = errdef_32.offset;
errdef.len = errdef_32.len;
errdef.access_type = errdef_32.access_type;
errdef.access_count = errdef_32.access_count;
errdef.fail_count = errdef_32.fail_count;
errdef.acc_chk = errdef_32.acc_chk;
errdef.optype = errdef_32.optype;
errdef.operand = errdef_32.operand;
errdef.log.logsize = errdef_32.log.logsize;
errdef.log.entries = errdef_32.log.entries;
errdef.log.flags = errdef_32.log.flags;
errdef.log.wrapcnt = errdef_32.log.wrapcnt;
errdef.log.start_time = errdef_32.log.start_time;
errdef.log.stop_time = errdef_32.log.stop_time;
errdef.log.logbase =
(caddr_t)(uintptr_t)errdef_32.log.logbase;
errdef.errdef_handle = errdef_32.errdef_handle;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &errdef,
sizeof (struct bofi_errdef), mode))
return (EFAULT);
break;
}
#else
if (ddi_copyin((void *)arg, &errdef,
sizeof (struct bofi_errdef), mode) != 0)
return (EFAULT);
#endif
if (errdef.fail_count == 0)
errdef.optype = 0;
if (errdef.optype != 0) {
if (errdef.access_type & BOFI_INTR &&
errdef.optype != BOFI_DELAY_INTR &&
errdef.optype != BOFI_LOSE_INTR &&
errdef.optype != BOFI_EXTRA_INTR)
return (EINVAL);
if ((errdef.access_type & (BOFI_DMA_RW|BOFI_PIO_R)) &&
errdef.optype == BOFI_NO_TRANSFER)
return (EINVAL);
if ((errdef.access_type & (BOFI_PIO_RW)) &&
errdef.optype != BOFI_EQUAL &&
errdef.optype != BOFI_OR &&
errdef.optype != BOFI_XOR &&
errdef.optype != BOFI_AND &&
errdef.optype != BOFI_NO_TRANSFER)
return (EINVAL);
}
softc = ddi_get_soft_state(statep, minor);
if (softc == NULL)
return (ENXIO);
if (errdef.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errdef.namesize+1, KM_SLEEP);
(void) strncpy(namep, errdef.name, errdef.namesize);
if (bofi_errdef_alloc(&errdef, namep, softc) != DDI_SUCCESS) {
(void) bofi_errdef_free((struct bofi_errent *)
(uintptr_t)errdef.errdef_handle);
kmem_free(namep, errdef.namesize+1);
return (EINVAL);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errdef32 errdef_32;
errdef_32.namesize = errdef.namesize;
(void) strncpy(errdef_32.name, errdef.name, NAMESIZE);
errdef_32.instance = errdef.instance;
errdef_32.rnumber = errdef.rnumber;
errdef_32.offset = errdef.offset;
errdef_32.len = errdef.len;
errdef_32.access_type = errdef.access_type;
errdef_32.access_count = errdef.access_count;
errdef_32.fail_count = errdef.fail_count;
errdef_32.acc_chk = errdef.acc_chk;
errdef_32.optype = errdef.optype;
errdef_32.operand = errdef.operand;
errdef_32.log.logsize = errdef.log.logsize;
errdef_32.log.entries = errdef.log.entries;
errdef_32.log.flags = errdef.log.flags;
errdef_32.log.wrapcnt = errdef.log.wrapcnt;
errdef_32.log.start_time = errdef.log.start_time;
errdef_32.log.stop_time = errdef.log.stop_time;
errdef_32.log.logbase =
(caddr32_t)(uintptr_t)errdef.log.logbase;
errdef_32.errdef_handle = errdef.errdef_handle;
if (ddi_copyout(&errdef_32, (void *)arg,
sizeof (struct bofi_errdef32), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
errdef.errdef_handle);
kmem_free(namep, errdef.namesize+1);
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout(&errdef, (void *)arg,
sizeof (struct bofi_errdef), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
errdef.errdef_handle);
kmem_free(namep, errdef.namesize+1);
return (EFAULT);
}
break;
}
#else
if (ddi_copyout(&errdef, (void *)arg,
sizeof (struct bofi_errdef), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
(uintptr_t)errdef.errdef_handle);
kmem_free(namep, errdef.namesize+1);
return (EFAULT);
}
#endif
return (0);
case BOFI_DEL_DEF:
if (ddi_copyin((void *)arg, &ed_handle,
sizeof (void *), mode) != 0)
return (EFAULT);
return (bofi_errdef_free((struct bofi_errent *)ed_handle));
case BOFI_START:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_start(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_STOP:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_stop(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_BROADCAST:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_broadcast(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_CLEAR_ACC_CHK:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_clear_acc_chk(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_CLEAR_ERRORS:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_clear_errors(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_CLEAR_ERRDEFS:
if (ddi_copyin((void *)arg, &errctl,
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
if (errctl.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(errctl.namesize+1, KM_SLEEP);
(void) strncpy(namep, errctl.name, errctl.namesize);
bofi_clear_errdefs(&errctl, namep);
kmem_free(namep, errctl.namesize+1);
return (0);
case BOFI_CHK_STATE:
{
struct acc_log_elem *klg;
size_t uls;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errstate32 errstate_32;
if (ddi_copyin((void *)arg, &errstate_32,
sizeof (struct bofi_errstate32), mode) != 0) {
return (EFAULT);
}
errstate.fail_time = errstate_32.fail_time;
errstate.msg_time = errstate_32.msg_time;
errstate.access_count = errstate_32.access_count;
errstate.fail_count = errstate_32.fail_count;
errstate.acc_chk = errstate_32.acc_chk;
errstate.errmsg_count = errstate_32.errmsg_count;
(void) strncpy(errstate.buffer, errstate_32.buffer,
ERRMSGSIZE);
errstate.severity = errstate_32.severity;
errstate.log.logsize = errstate_32.log.logsize;
errstate.log.entries = errstate_32.log.entries;
errstate.log.flags = errstate_32.log.flags;
errstate.log.wrapcnt = errstate_32.log.wrapcnt;
errstate.log.start_time = errstate_32.log.start_time;
errstate.log.stop_time = errstate_32.log.stop_time;
errstate.log.logbase =
(caddr_t)(uintptr_t)errstate_32.log.logbase;
errstate.errdef_handle = errstate_32.errdef_handle;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &errstate,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else
if (ddi_copyin((void *)arg, &errstate,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif
if ((retval = bofi_errdef_check(&errstate, &klg)) == EINVAL)
return (EINVAL);
uls = errstate.log.logsize;
if (errstate.log.entries > uls && uls)
errstate.log.entries = uls;
if (errstate.log.stop_time == 0ul)
(void) drv_getparm(TIME, &(errstate.log.stop_time));
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errstate32 errstate_32;
errstate_32.fail_time = errstate.fail_time;
errstate_32.msg_time = errstate.msg_time;
errstate_32.access_count = errstate.access_count;
errstate_32.fail_count = errstate.fail_count;
errstate_32.acc_chk = errstate.acc_chk;
errstate_32.errmsg_count = errstate.errmsg_count;
(void) strncpy(errstate_32.buffer, errstate.buffer,
ERRMSGSIZE);
errstate_32.severity = errstate.severity;
errstate_32.log.logsize = errstate.log.logsize;
errstate_32.log.entries = errstate.log.entries;
errstate_32.log.flags = errstate.log.flags;
errstate_32.log.wrapcnt = errstate.log.wrapcnt;
errstate_32.log.start_time = errstate.log.start_time;
errstate_32.log.stop_time = errstate.log.stop_time;
errstate_32.log.logbase =
(caddr32_t)(uintptr_t)errstate.log.logbase;
errstate_32.errdef_handle = errstate.errdef_handle;
if (ddi_copyout(&errstate_32, (void *)arg,
sizeof (struct bofi_errstate32), mode) != 0)
return (EFAULT);
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout(&errstate, (void *)arg,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else
if (ddi_copyout(&errstate, (void *)arg,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif
if (uls && errstate.log.entries &&
ddi_copyout(klg, errstate.log.logbase,
errstate.log.entries * sizeof (struct acc_log_elem),
mode) != 0) {
return (EFAULT);
}
return (retval);
}
case BOFI_CHK_STATE_W:
{
struct acc_log_elem *klg;
size_t uls;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errstate32 errstate_32;
if (ddi_copyin((void *)arg, &errstate_32,
sizeof (struct bofi_errstate32), mode) != 0) {
return (EFAULT);
}
errstate.fail_time = errstate_32.fail_time;
errstate.msg_time = errstate_32.msg_time;
errstate.access_count = errstate_32.access_count;
errstate.fail_count = errstate_32.fail_count;
errstate.acc_chk = errstate_32.acc_chk;
errstate.errmsg_count = errstate_32.errmsg_count;
(void) strncpy(errstate.buffer, errstate_32.buffer,
ERRMSGSIZE);
errstate.severity = errstate_32.severity;
errstate.log.logsize = errstate_32.log.logsize;
errstate.log.entries = errstate_32.log.entries;
errstate.log.flags = errstate_32.log.flags;
errstate.log.wrapcnt = errstate_32.log.wrapcnt;
errstate.log.start_time = errstate_32.log.start_time;
errstate.log.stop_time = errstate_32.log.stop_time;
errstate.log.logbase =
(caddr_t)(uintptr_t)errstate_32.log.logbase;
errstate.errdef_handle = errstate_32.errdef_handle;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &errstate,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else
if (ddi_copyin((void *)arg, &errstate,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif
if ((retval = bofi_errdef_check_w(&errstate, &klg)) == EINVAL)
return (EINVAL);
uls = errstate.log.logsize;
uls = errstate.log.logsize;
if (errstate.log.entries > uls && uls)
errstate.log.entries = uls;
if (errstate.log.stop_time == 0ul)
(void) drv_getparm(TIME, &(errstate.log.stop_time));
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_errstate32 errstate_32;
errstate_32.fail_time = errstate.fail_time;
errstate_32.msg_time = errstate.msg_time;
errstate_32.access_count = errstate.access_count;
errstate_32.fail_count = errstate.fail_count;
errstate_32.acc_chk = errstate.acc_chk;
errstate_32.errmsg_count = errstate.errmsg_count;
(void) strncpy(errstate_32.buffer, errstate.buffer,
ERRMSGSIZE);
errstate_32.severity = errstate.severity;
errstate_32.log.logsize = errstate.log.logsize;
errstate_32.log.entries = errstate.log.entries;
errstate_32.log.flags = errstate.log.flags;
errstate_32.log.wrapcnt = errstate.log.wrapcnt;
errstate_32.log.start_time = errstate.log.start_time;
errstate_32.log.stop_time = errstate.log.stop_time;
errstate_32.log.logbase =
(caddr32_t)(uintptr_t)errstate.log.logbase;
errstate_32.errdef_handle = errstate.errdef_handle;
if (ddi_copyout(&errstate_32, (void *)arg,
sizeof (struct bofi_errstate32), mode) != 0)
return (EFAULT);
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout(&errstate, (void *)arg,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else
if (ddi_copyout(&errstate, (void *)arg,
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif
if (uls && errstate.log.entries &&
ddi_copyout(klg, errstate.log.logbase,
errstate.log.entries * sizeof (struct acc_log_elem),
mode) != 0) {
return (EFAULT);
}
return (retval);
}
case BOFI_GET_HANDLES:
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_get_handles32 get_handles_32;
if (ddi_copyin((void *)arg, &get_handles_32,
sizeof (get_handles_32), mode) != 0) {
return (EFAULT);
}
get_handles.namesize = get_handles_32.namesize;
(void) strncpy(get_handles.name, get_handles_32.name,
NAMESIZE);
get_handles.instance = get_handles_32.instance;
get_handles.count = get_handles_32.count;
get_handles.buffer =
(caddr_t)(uintptr_t)get_handles_32.buffer;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &get_handles,
sizeof (get_handles), mode) != 0)
return (EFAULT);
break;
}
#else
if (ddi_copyin((void *)arg, &get_handles,
sizeof (get_handles), mode) != 0)
return (EFAULT);
#endif
if (get_handles.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(get_handles.namesize+1, KM_SLEEP);
(void) strncpy(namep, get_handles.name, get_handles.namesize);
req_count = get_handles.count;
bufptr = buffer = kmem_zalloc(req_count, KM_SLEEP);
endbuf = bufptr + req_count;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
hhashp = &hhash_table[i];
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
if (!driver_under_test(hp->dip))
continue;
if (ddi_name_to_major(ddi_get_name(hp->dip)) !=
ddi_name_to_major(namep))
continue;
if (hp->instance != get_handles.instance)
continue;
(void) snprintf(bufptr, (size_t)(endbuf-bufptr),
" %s %d %s ", hp->name, hp->instance,
(hp->type == BOFI_INT_HDL) ? "INTR" :
(hp->type == BOFI_ACC_HDL) ? "PIO" :
(hp->type == BOFI_DMA_HDL) ? "DMA" :
(hp->hparrayp != NULL) ? "DVMA" : "DMA*");
bufptr += strlen(bufptr);
if (hp->type == BOFI_ACC_HDL) {
if (hp->len == INT_MAX - hp->offset)
(void) snprintf(bufptr,
(size_t)(endbuf-bufptr),
"reg set %d off 0x%llx\n",
hp->rnumber, hp->offset);
else
(void) snprintf(bufptr,
(size_t)(endbuf-bufptr),
"reg set %d off 0x%llx"
" len 0x%llx\n",
hp->rnumber, hp->offset,
hp->len);
} else if (hp->type == BOFI_DMA_HDL)
(void) snprintf(bufptr,
(size_t)(endbuf-bufptr),
"handle no %d len 0x%llx"
" addr 0x%p\n", hp->rnumber,
hp->len, (void *)hp->addr);
else if (hp->type == BOFI_NULL &&
hp->hparrayp == NULL)
(void) snprintf(bufptr,
(size_t)(endbuf-bufptr),
"handle no %d\n", hp->rnumber);
else
(void) snprintf(bufptr,
(size_t)(endbuf-bufptr), "\n");
bufptr += strlen(bufptr);
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
err = ddi_copyout(buffer, get_handles.buffer, req_count, mode);
kmem_free(namep, get_handles.namesize+1);
kmem_free(buffer, req_count);
if (err != 0)
return (EFAULT);
else
return (0);
case BOFI_GET_HANDLE_INFO:
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_get_hdl_info32 hdl_info_32;
if (ddi_copyin((void *)arg, &hdl_info_32,
sizeof (hdl_info_32), mode)) {
return (EFAULT);
}
hdl_info.namesize = hdl_info_32.namesize;
(void) strncpy(hdl_info.name, hdl_info_32.name,
NAMESIZE);
hdl_info.count = hdl_info_32.count;
hdl_info.hdli = (caddr_t)(uintptr_t)hdl_info_32.hdli;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &hdl_info,
sizeof (hdl_info), mode))
return (EFAULT);
break;
}
#else
if (ddi_copyin((void *)arg, &hdl_info,
sizeof (hdl_info), mode))
return (EFAULT);
#endif
if (hdl_info.namesize > NAMESIZE)
return (EINVAL);
namep = kmem_zalloc(hdl_info.namesize + 1, KM_SLEEP);
(void) strncpy(namep, hdl_info.name, hdl_info.namesize);
req_count = hdl_info.count;
count = hdl_info.count = 0;
if (req_count > 0) {
hib = hdlip =
kmem_zalloc(req_count * sizeof (struct handle_info),
KM_SLEEP);
} else {
hib = hdlip = 0;
req_count = hdl_info.count = 0;
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
hhashp = &hhash_table[i];
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
if (!driver_under_test(hp->dip) ||
ddi_name_to_major(ddi_get_name(hp->dip)) !=
ddi_name_to_major(namep) ||
++(hdl_info.count) > req_count ||
count == req_count)
continue;
hdlip->instance = hp->instance;
hdlip->rnumber = hp->rnumber;
switch (hp->type) {
case BOFI_ACC_HDL:
hdlip->access_type = BOFI_PIO_RW;
hdlip->offset = hp->offset;
hdlip->len = hp->len;
break;
case BOFI_DMA_HDL:
hdlip->access_type = 0;
if (hp->flags & DDI_DMA_WRITE)
hdlip->access_type |=
BOFI_DMA_W;
if (hp->flags & DDI_DMA_READ)
hdlip->access_type |=
BOFI_DMA_R;
hdlip->len = hp->len;
hdlip->addr_cookie =
(uint64_t)(uintptr_t)hp->addr;
break;
case BOFI_INT_HDL:
hdlip->access_type = BOFI_INTR;
break;
default:
hdlip->access_type = 0;
break;
}
hdlip++;
count++;
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
err = 0;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct bofi_get_hdl_info32 hdl_info_32;
hdl_info_32.namesize = hdl_info.namesize;
(void) strncpy(hdl_info_32.name, hdl_info.name,
NAMESIZE);
hdl_info_32.count = hdl_info.count;
hdl_info_32.hdli = (caddr32_t)(uintptr_t)hdl_info.hdli;
if (ddi_copyout(&hdl_info_32, (void *)arg,
sizeof (hdl_info_32), mode) != 0) {
kmem_free(namep, hdl_info.namesize+1);
if (req_count > 0)
kmem_free(hib,
req_count * sizeof (*hib));
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout(&hdl_info, (void *)arg,
sizeof (hdl_info), mode) != 0) {
kmem_free(namep, hdl_info.namesize+1);
if (req_count > 0)
kmem_free(hib,
req_count * sizeof (*hib));
return (EFAULT);
}
break;
}
#else
if (ddi_copyout(&hdl_info, (void *)arg,
sizeof (hdl_info), mode) != 0) {
kmem_free(namep, hdl_info.namesize+1);
if (req_count > 0)
kmem_free(hib, req_count * sizeof (*hib));
return (EFAULT);
}
#endif
if (count > 0) {
if (ddi_copyout(hib, hdl_info.hdli,
count * sizeof (*hib), mode) != 0) {
kmem_free(namep, hdl_info.namesize+1);
if (req_count > 0)
kmem_free(hib,
req_count * sizeof (*hib));
return (EFAULT);
}
}
kmem_free(namep, hdl_info.namesize+1);
if (req_count > 0)
kmem_free(hib, req_count * sizeof (*hib));
return (err);
default:
return (ENOTTY);
}
}
static int
bofi_errdef_alloc(struct bofi_errdef *errdefp, char *namep,
struct bofi_errent *softc)
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_link *lp;
ep = kmem_zalloc(sizeof (struct bofi_errent), KM_SLEEP);
ep->errdef = *errdefp;
ep->name = namep;
ep->errdef.errdef_handle = (uint64_t)(uintptr_t)ep;
ep->errstate.severity = DDI_SERVICE_RESTORED;
ep->errstate.errdef_handle = (uint64_t)(uintptr_t)ep;
cv_init(&ep->cv, NULL, CV_DRIVER, NULL);
ep->errdef.log.entries = 0;
ep->errdef.log.wrapcnt = 0;
if (ep->errdef.access_type & BOFI_LOG)
ep->logbase = kmem_alloc(sizeof (struct acc_log_elem) *
ep->errdef.log.logsize, KM_SLEEP);
else
ep->logbase = NULL;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
ep->next = errent_listp;
errent_listp = ep;
ep->cnext = softc->cnext;
softc->cnext->cprev = ep;
ep->cprev = softc;
softc->cnext = ep;
for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
if (ddi_name_to_major(hp->name) == ddi_name_to_major(namep) &&
hp->instance == errdefp->instance &&
(((errdefp->access_type & BOFI_DMA_RW) &&
(ep->errdef.rnumber == -1 ||
hp->rnumber == ep->errdef.rnumber) &&
hp->type == BOFI_DMA_HDL &&
(((uintptr_t)(hp->addr + ep->errdef.offset +
ep->errdef.len) & ~LLSZMASK) >
((uintptr_t)((hp->addr + ep->errdef.offset) +
LLSZMASK) & ~LLSZMASK))) ||
((errdefp->access_type & BOFI_INTR) &&
hp->type == BOFI_INT_HDL) ||
((errdefp->access_type & BOFI_PIO_RW) &&
hp->type == BOFI_ACC_HDL &&
(errdefp->rnumber == -1 ||
hp->rnumber == errdefp->rnumber) &&
(errdefp->len == 0 ||
hp->offset < errdefp->offset + errdefp->len) &&
hp->offset + hp->len > errdefp->offset))) {
lp = bofi_link_freelist;
if (lp != NULL) {
bofi_link_freelist = lp->link;
lp->errentp = ep;
lp->link = hp->link;
hp->link = lp;
}
}
}
errdefp->errdef_handle = (uint64_t)(uintptr_t)ep;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
ep->softintr_id = NULL;
return (ddi_add_softintr(our_dip, DDI_SOFTINT_MED, &ep->softintr_id,
NULL, NULL, bofi_signal, (caddr_t)&ep->errdef));
}
static int
bofi_errdef_free(struct bofi_errent *ep)
{
struct bofi_errent *hep, *prev_hep;
struct bofi_link *lp, *prev_lp, *next_lp;
struct bofi_shadow *hp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
prev_hep = NULL;
for (hep = errent_listp; hep != NULL; ) {
if (hep == ep)
break;
prev_hep = hep;
hep = hep->next;
}
if (hep == NULL) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (EINVAL);
}
if (prev_hep)
prev_hep->next = hep->next;
else
errent_listp = hep->next;
hep->cnext->cprev = hep->cprev;
hep->cprev->cnext = hep->cnext;
for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
prev_lp = NULL;
for (lp = hp->link; lp != NULL; ) {
if (lp->errentp == ep) {
if (prev_lp)
prev_lp->link = lp->link;
else
hp->link = lp->link;
next_lp = lp->link;
lp->link = bofi_link_freelist;
bofi_link_freelist = lp;
lp = next_lp;
} else {
prev_lp = lp;
lp = lp->link;
}
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
cv_destroy(&ep->cv);
kmem_free(ep->name, ep->errdef.namesize+1);
if ((ep->errdef.access_type & BOFI_LOG) &&
ep->errdef.log.logsize && ep->logbase)
kmem_free(ep->logbase,
sizeof (struct acc_log_elem) * ep->errdef.log.logsize);
if (ep->softintr_id)
ddi_remove_softintr(ep->softintr_id);
kmem_free(ep, sizeof (struct bofi_errent));
return (0);
}
static void
bofi_start(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
ep->state |= BOFI_DEV_ACTIVE;
(void) drv_getparm(TIME, &(ep->errdef.log.start_time));
ep->errdef.log.stop_time = 0ul;
}
mutex_exit(&bofi_low_mutex);
}
static void
bofi_stop(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
ep->state &= ~BOFI_DEV_ACTIVE;
if (ep->errdef.log.stop_time == 0ul)
(void) drv_getparm(TIME,
&(ep->errdef.log.stop_time));
}
mutex_exit(&bofi_low_mutex);
}
static uint_t
bofi_signal(caddr_t arg)
{
struct bofi_errdef *edp = (struct bofi_errdef *)arg;
struct bofi_errent *hep;
struct bofi_errent *ep =
(struct bofi_errent *)(uintptr_t)edp->errdef_handle;
mutex_enter(&bofi_low_mutex);
for (hep = errent_listp; hep != NULL; ) {
if (hep == ep)
break;
hep = hep->next;
}
if (hep == NULL) {
mutex_exit(&bofi_low_mutex);
return (DDI_INTR_UNCLAIMED);
}
if ((ep->errdef.access_type & BOFI_LOG) &&
(edp->log.flags & BOFI_LOG_FULL)) {
edp->log.stop_time = bofi_gettime();
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
if (ep->errstate.msg_time != 0) {
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
mutex_exit(&bofi_low_mutex);
return (DDI_INTR_CLAIMED);
}
static void
bofi_broadcast(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
mutex_exit(&bofi_low_mutex);
}
static void
bofi_clear_acc_chk(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
mutex_enter(&bofi_mutex);
if (ep->errdef.access_count == 0 &&
ep->errdef.fail_count == 0)
ep->errdef.acc_chk = 0;
mutex_exit(&bofi_mutex);
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
mutex_exit(&bofi_low_mutex);
}
static void
bofi_clear_errors(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
mutex_enter(&bofi_mutex);
if (ep->errdef.access_count == 0) {
ep->errdef.acc_chk = 0;
ep->errdef.fail_count = 0;
mutex_exit(&bofi_mutex);
if (ep->errdef.log.stop_time == 0ul)
(void) drv_getparm(TIME,
&(ep->errdef.log.stop_time));
} else
mutex_exit(&bofi_mutex);
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
mutex_exit(&bofi_low_mutex);
}
static void
bofi_clear_errdefs(struct bofi_errctl *errctlp, char *namep)
{
struct bofi_errent *ep;
mutex_enter(&bofi_low_mutex);
for (ep = errent_listp; ep != NULL; ep = ep->next)
if (strncmp(namep, ep->name, NAMESIZE) == 0 &&
errctlp->instance == ep->errdef.instance) {
mutex_enter(&bofi_mutex);
ep->errdef.acc_chk = 0;
ep->errdef.access_count = 0;
ep->errdef.fail_count = 0;
mutex_exit(&bofi_mutex);
if (ep->errdef.log.stop_time == 0ul)
(void) drv_getparm(TIME,
&(ep->errdef.log.stop_time));
ep->state |= BOFI_NEW_MESSAGE;
if (ep->state & BOFI_MESSAGE_WAIT)
cv_broadcast(&ep->cv);
ep->state &= ~BOFI_MESSAGE_WAIT;
}
mutex_exit(&bofi_low_mutex);
}
static int
bofi_errdef_check(struct bofi_errstate *errstatep, struct acc_log_elem **logpp)
{
struct bofi_errent *hep;
struct bofi_errent *ep;
ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
mutex_enter(&bofi_low_mutex);
for (hep = errent_listp; hep != NULL; hep = hep->next)
if (hep == ep)
break;
if (hep == NULL) {
mutex_exit(&bofi_low_mutex);
return (EINVAL);
}
mutex_enter(&bofi_mutex);
ep->errstate.access_count = ep->errdef.access_count;
ep->errstate.fail_count = ep->errdef.fail_count;
ep->errstate.acc_chk = ep->errdef.acc_chk;
ep->errstate.log = ep->errdef.log;
*logpp = ep->logbase;
*errstatep = ep->errstate;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (0);
}
static int
bofi_errdef_check_w(struct bofi_errstate *errstatep,
struct acc_log_elem **logpp)
{
struct bofi_errent *hep;
struct bofi_errent *ep;
int rval = 0;
ep = (struct bofi_errent *)(uintptr_t)errstatep->errdef_handle;
mutex_enter(&bofi_low_mutex);
retry:
for (hep = errent_listp; hep != NULL; hep = hep->next)
if (hep == ep)
break;
if (hep == NULL) {
mutex_exit(&bofi_low_mutex);
return (EINVAL);
}
if (rval == 0 && !(ep->state & BOFI_NEW_MESSAGE)) {
ep->state |= BOFI_MESSAGE_WAIT;
if (cv_wait_sig(&ep->cv, &bofi_low_mutex) == 0) {
if (!(ep->state & BOFI_NEW_MESSAGE))
rval = EINTR;
}
goto retry;
}
ep->state &= ~BOFI_NEW_MESSAGE;
mutex_enter(&bofi_mutex);
ep->errstate.access_count = ep->errdef.access_count;
ep->errstate.fail_count = ep->errdef.fail_count;
ep->errstate.acc_chk = ep->errdef.acc_chk;
ep->errstate.log = ep->errdef.log;
*logpp = ep->logbase;
*errstatep = ep->errstate;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (rval);
}
static int
driver_under_test(dev_info_t *rdip)
{
int i;
char *rname;
major_t rmaj;
rname = ddi_get_name(rdip);
rmaj = ddi_name_to_major(rname);
for (i = 0; i < driver_list_size; i += (1 + strlen(&driver_list[i]))) {
if (driver_list_neg == 0) {
if (rmaj == ddi_name_to_major(&driver_list[i]))
return (1);
} else {
if (rmaj == ddi_name_to_major(&driver_list[i+1]))
return (0);
}
}
if (driver_list_neg == 0)
return (0);
else
return (1);
}
static void
log_acc_event(struct bofi_errent *ep, uint_t at, offset_t offset, off_t len,
size_t repcount, uint64_t *valuep)
{
struct bofi_errdef *edp = &(ep->errdef);
struct acc_log *log = &edp->log;
ASSERT(log != NULL);
ASSERT(MUTEX_HELD(&bofi_mutex));
if (log->flags & BOFI_LOG_REPIO)
repcount = 1;
else if (repcount == 0 && edp->access_count > 0 &&
(log->flags & BOFI_LOG_FULL) == 0)
edp->access_count += 1;
if (repcount && log->entries < log->logsize) {
struct acc_log_elem *elem = ep->logbase + log->entries;
if (log->flags & BOFI_LOG_TIMESTAMP)
elem->access_time = bofi_gettime();
elem->access_type = at;
elem->offset = offset;
elem->value = valuep ? *valuep : 0ll;
elem->size = len;
elem->repcount = repcount;
++log->entries;
if (log->entries == log->logsize) {
log->flags |= BOFI_LOG_FULL;
ddi_trigger_softintr(((struct bofi_errent *)
(uintptr_t)edp->errdef_handle)->softintr_id);
}
}
if ((log->flags & BOFI_LOG_WRAP) && edp->access_count <= 1) {
log->wrapcnt++;
edp->access_count = log->logsize;
log->entries = 0;
}
}
static void
do_dma_corrupt(struct bofi_shadow *hp, struct bofi_errent *ep,
uint_t synctype, off_t off, off_t length)
{
uint64_t operand;
int i;
off_t len;
caddr_t logaddr;
uint64_t *addr;
uint64_t *endaddr;
ddi_dma_impl_t *hdlp;
ndi_err_t *errp;
ASSERT(MUTEX_HELD(&bofi_mutex));
if ((ep->errdef.access_count ||
ep->errdef.fail_count) &&
(ep->errdef.access_type & BOFI_LOG)) {
uint_t atype;
if (synctype == DDI_DMA_SYNC_FORDEV)
atype = BOFI_DMA_W;
else if (synctype == DDI_DMA_SYNC_FORCPU ||
synctype == DDI_DMA_SYNC_FORKERNEL)
atype = BOFI_DMA_R;
else
atype = 0;
if ((off <= ep->errdef.offset &&
off + length > ep->errdef.offset) ||
(off > ep->errdef.offset &&
off < ep->errdef.offset + ep->errdef.len)) {
logaddr = (caddr_t)((uintptr_t)(hp->addr +
off + LLSZMASK) & ~LLSZMASK);
log_acc_event(ep, atype, logaddr - hp->addr,
length, 1, 0);
}
}
if (ep->errdef.access_count > 1) {
ep->errdef.access_count--;
} else if (ep->errdef.fail_count > 0) {
ep->errdef.fail_count--;
ep->errdef.access_count = 0;
if (ep->errstate.fail_time == 0)
ep->errstate.fail_time = bofi_gettime();
addr = (uint64_t *)((uintptr_t)((hp->addr +
ep->errdef.offset) + LLSZMASK) & ~LLSZMASK);
endaddr = (uint64_t *)((uintptr_t)(hp->addr + min(hp->len,
ep->errdef.offset + ep->errdef.len)) & ~LLSZMASK);
len = endaddr - addr;
operand = ep->errdef.operand;
hdlp = (ddi_dma_impl_t *)(hp->hdl.dma_handle);
errp = &hdlp->dmai_error;
if (ep->errdef.acc_chk & 2) {
uint64_t ena;
char buf[FM_MAX_CLASS];
errp->err_status = DDI_FM_NONFATAL;
(void) snprintf(buf, FM_MAX_CLASS, FM_SIMULATED_DMA);
ena = fm_ena_generate(0, FM_ENA_FMT1);
ddi_fm_ereport_post(hp->dip, buf, ena,
DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
FM_EREPORT_VERS0, NULL);
}
switch (ep->errdef.optype) {
case BOFI_EQUAL :
for (i = 0; i < len; i++)
*(addr + i) = operand;
break;
case BOFI_AND :
for (i = 0; i < len; i++)
*(addr + i) &= operand;
break;
case BOFI_OR :
for (i = 0; i < len; i++)
*(addr + i) |= operand;
break;
case BOFI_XOR :
for (i = 0; i < len; i++)
*(addr + i) ^= operand;
break;
default:
break;
}
}
}
static uint64_t do_bofi_rd8(struct bofi_shadow *, caddr_t);
static uint64_t do_bofi_rd16(struct bofi_shadow *, caddr_t);
static uint64_t do_bofi_rd32(struct bofi_shadow *, caddr_t);
static uint64_t do_bofi_rd64(struct bofi_shadow *, caddr_t);
static uint64_t
do_pior_corrupt(struct bofi_shadow *hp, caddr_t addr,
uint64_t (*func)(), size_t repcount, size_t accsize)
{
struct bofi_errent *ep;
struct bofi_link *lp;
uint64_t operand;
uintptr_t minlen;
intptr_t base;
int done_get = 0;
uint64_t get_val, gv;
ddi_acc_impl_t *hdlp;
ndi_err_t *errp;
ASSERT(MUTEX_HELD(&bofi_mutex));
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
if (ep->errdef.len == 0)
minlen = hp->len;
else
minlen = min(hp->len, ep->errdef.len);
base = addr - hp->addr - ep->errdef.offset + hp->offset;
if ((ep->errdef.access_type & BOFI_PIO_R) &&
(ep->state & BOFI_DEV_ACTIVE) &&
base >= 0 && base < minlen) {
if (ep->errdef.access_count > 1) {
ep->errdef.access_count--;
if (done_get == 0) {
done_get = 1;
gv = get_val = func(hp, addr);
}
if (ep->errdef.access_type & BOFI_LOG) {
log_acc_event(ep, BOFI_PIO_R,
addr - hp->addr,
accsize, repcount, &gv);
}
} else if (ep->errdef.fail_count > 0) {
ep->errdef.fail_count--;
ep->errdef.access_count = 0;
if (ep->errstate.fail_time == 0)
ep->errstate.fail_time = bofi_gettime();
operand = ep->errdef.operand;
if (done_get == 0) {
if (ep->errdef.optype ==
BOFI_NO_TRANSFER)
return (operand);
done_get = 1;
gv = get_val = func(hp, addr);
}
if (ep->errdef.access_type & BOFI_LOG) {
log_acc_event(ep, BOFI_PIO_R,
addr - hp->addr,
accsize, repcount, &gv);
}
hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
errp = hdlp->ahi_err;
if (ep->errdef.acc_chk & 1) {
uint64_t ena;
char buf[FM_MAX_CLASS];
errp->err_status = DDI_FM_NONFATAL;
(void) snprintf(buf, FM_MAX_CLASS,
FM_SIMULATED_PIO);
ena = fm_ena_generate(0, FM_ENA_FMT1);
ddi_fm_ereport_post(hp->dip, buf, ena,
DDI_NOSLEEP, FM_VERSION,
DATA_TYPE_UINT8, FM_EREPORT_VERS0,
NULL);
}
switch (ep->errdef.optype) {
case BOFI_EQUAL :
get_val = operand;
break;
case BOFI_AND :
get_val &= operand;
break;
case BOFI_OR :
get_val |= operand;
break;
case BOFI_XOR :
get_val ^= operand;
break;
default:
break;
}
}
}
}
if (done_get == 0)
return (func(hp, addr));
else
return (get_val);
}
static int
do_piow_corrupt(struct bofi_shadow *hp, caddr_t addr, uint64_t *valuep,
size_t size, size_t repcount)
{
struct bofi_errent *ep;
struct bofi_link *lp;
uintptr_t minlen;
intptr_t base;
uint64_t v = *valuep;
ddi_acc_impl_t *hdlp;
ndi_err_t *errp;
ASSERT(MUTEX_HELD(&bofi_mutex));
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
if (ep->errdef.len == 0)
minlen = hp->len;
else
minlen = min(hp->len, ep->errdef.len);
base = (caddr_t)addr - hp->addr - ep->errdef.offset +hp->offset;
if ((ep->errdef.access_type & BOFI_PIO_W) &&
(ep->state & BOFI_DEV_ACTIVE) &&
base >= 0 && base < minlen) {
if (ep->errdef.access_count > 1) {
ep->errdef.access_count--;
if (ep->errdef.access_type & BOFI_LOG)
log_acc_event(ep, BOFI_PIO_W,
addr - hp->addr, size,
repcount, &v);
} else if (ep->errdef.fail_count > 0) {
ep->errdef.fail_count--;
ep->errdef.access_count = 0;
if (ep->errdef.access_type & BOFI_LOG)
log_acc_event(ep, BOFI_PIO_W,
addr - hp->addr, size,
repcount, &v);
if (ep->errstate.fail_time == 0)
ep->errstate.fail_time = bofi_gettime();
hdlp = (ddi_acc_impl_t *)(hp->hdl.acc_handle);
errp = hdlp->ahi_err;
if (ep->errdef.acc_chk & 1) {
uint64_t ena;
char buf[FM_MAX_CLASS];
errp->err_status = DDI_FM_NONFATAL;
(void) snprintf(buf, FM_MAX_CLASS,
FM_SIMULATED_PIO);
ena = fm_ena_generate(0, FM_ENA_FMT1);
ddi_fm_ereport_post(hp->dip, buf, ena,
DDI_NOSLEEP, FM_VERSION,
DATA_TYPE_UINT8, FM_EREPORT_VERS0,
NULL);
}
switch (ep->errdef.optype) {
case BOFI_EQUAL :
*valuep = ep->errdef.operand;
break;
case BOFI_AND :
*valuep &= ep->errdef.operand;
break;
case BOFI_OR :
*valuep |= ep->errdef.operand;
break;
case BOFI_XOR :
*valuep ^= ep->errdef.operand;
break;
case BOFI_NO_TRANSFER :
return (0);
default:
break;
}
}
}
}
return (1);
}
static uint64_t
do_bofi_rd8(struct bofi_shadow *hp, caddr_t addr)
{
return (hp->save.acc.ahi_get8(&hp->save.acc, (uint8_t *)addr));
}
#define BOFI_READ_CHECKS(type) \
if (bofi_ddi_check) \
addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
if (bofi_range_check && ((caddr_t)addr < hp->addr || \
(caddr_t)addr - hp->addr >= hp->len)) { \
cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
"ddi_get() out of range addr %p not in %p/%llx", \
(void *)addr, (void *)hp->addr, hp->len); \
return (0); \
}
static uint8_t
bofi_rd8(ddi_acc_impl_t *handle, uint8_t *addr)
{
struct bofi_shadow *hp;
uint8_t retval;
hp = handle->ahi_common.ah_bus_private;
BOFI_READ_CHECKS(uint8_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex))
return (hp->save.acc.ahi_get8(&hp->save.acc, addr));
retval = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd8, 1,
1);
mutex_exit(&bofi_mutex);
return (retval);
}
static uint64_t
do_bofi_rd16(struct bofi_shadow *hp, caddr_t addr)
{
return (hp->save.acc.ahi_get16(&hp->save.acc, (uint16_t *)addr));
}
static uint16_t
bofi_rd16(ddi_acc_impl_t *handle, uint16_t *addr)
{
struct bofi_shadow *hp;
uint16_t retval;
hp = handle->ahi_common.ah_bus_private;
BOFI_READ_CHECKS(uint16_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex))
return (hp->save.acc.ahi_get16(&hp->save.acc, addr));
retval = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd16, 1,
2);
mutex_exit(&bofi_mutex);
return (retval);
}
static uint64_t
do_bofi_rd32(struct bofi_shadow *hp, caddr_t addr)
{
return (hp->save.acc.ahi_get32(&hp->save.acc, (uint32_t *)addr));
}
static uint32_t
bofi_rd32(ddi_acc_impl_t *handle, uint32_t *addr)
{
struct bofi_shadow *hp;
uint32_t retval;
hp = handle->ahi_common.ah_bus_private;
BOFI_READ_CHECKS(uint32_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex))
return (hp->save.acc.ahi_get32(&hp->save.acc, addr));
retval = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd32, 1,
4);
mutex_exit(&bofi_mutex);
return (retval);
}
static uint64_t
do_bofi_rd64(struct bofi_shadow *hp, caddr_t addr)
{
return (hp->save.acc.ahi_get64(&hp->save.acc, (uint64_t *)addr));
}
static uint64_t
bofi_rd64(ddi_acc_impl_t *handle, uint64_t *addr)
{
struct bofi_shadow *hp;
uint64_t retval;
hp = handle->ahi_common.ah_bus_private;
BOFI_READ_CHECKS(uint64_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex))
return (hp->save.acc.ahi_get64(&hp->save.acc, addr));
retval = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr, do_bofi_rd64, 1,
8);
mutex_exit(&bofi_mutex);
return (retval);
}
#define BOFI_WRITE_TESTS(type) \
if (bofi_ddi_check) \
addr = (type *)((uintptr_t)addr - 64 + hp->addr); \
if (bofi_range_check && ((caddr_t)addr < hp->addr || \
(caddr_t)addr - hp->addr >= hp->len)) { \
cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
"ddi_put() out of range addr %p not in %p/%llx\n", \
(void *)addr, (void *)hp->addr, hp->len); \
return; \
}
static void
bofi_wr8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t value)
{
struct bofi_shadow *hp;
uint64_t llvalue = value;
hp = handle->ahi_common.ah_bus_private;
BOFI_WRITE_TESTS(uint8_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
return;
}
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, 1))
hp->save.acc.ahi_put8(&hp->save.acc, addr, (uint8_t)llvalue);
mutex_exit(&bofi_mutex);
}
static void
bofi_wr16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t value)
{
struct bofi_shadow *hp;
uint64_t llvalue = value;
hp = handle->ahi_common.ah_bus_private;
BOFI_WRITE_TESTS(uint16_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
return;
}
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, 1))
hp->save.acc.ahi_put16(&hp->save.acc, addr, (uint16_t)llvalue);
mutex_exit(&bofi_mutex);
}
static void
bofi_wr32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t value)
{
struct bofi_shadow *hp;
uint64_t llvalue = value;
hp = handle->ahi_common.ah_bus_private;
BOFI_WRITE_TESTS(uint32_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
return;
}
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, 1))
hp->save.acc.ahi_put32(&hp->save.acc, addr, (uint32_t)llvalue);
mutex_exit(&bofi_mutex);
}
static void
bofi_wr64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t value)
{
struct bofi_shadow *hp;
uint64_t llvalue = value;
hp = handle->ahi_common.ah_bus_private;
BOFI_WRITE_TESTS(uint64_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
return;
}
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, 1))
hp->save.acc.ahi_put64(&hp->save.acc, addr, (uint64_t)llvalue);
mutex_exit(&bofi_mutex);
}
#define BOFI_REP_READ_TESTS(type) \
if (bofi_ddi_check) \
dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
(caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
"ddi_rep_get() out of range addr %p not in %p/%llx\n", \
(void *)dev_addr, (void *)hp->addr, hp->len); \
if ((caddr_t)dev_addr < hp->addr || \
(caddr_t)dev_addr - hp->addr >= hp->len) \
return; \
repcount = (type *)(hp->addr + hp->len) - dev_addr; \
}
static void
bofi_rep_rd8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint8_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_READ_TESTS(uint8_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_get8(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
*(host_addr + i) = (uint8_t)do_pior_corrupt(hp, (caddr_t)addr,
do_bofi_rd8, i ? 0 : repcount, 1);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_rd16(ddi_acc_impl_t *handle, uint16_t *host_addr,
uint16_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint16_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_READ_TESTS(uint16_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_get16(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
*(host_addr + i) = (uint16_t)do_pior_corrupt(hp, (caddr_t)addr,
do_bofi_rd16, i ? 0 : repcount, 2);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_rd32(ddi_acc_impl_t *handle, uint32_t *host_addr,
uint32_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint32_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_READ_TESTS(uint32_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_get32(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
*(host_addr + i) = (uint32_t)do_pior_corrupt(hp, (caddr_t)addr,
do_bofi_rd32, i ? 0 : repcount, 4);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_rd64(ddi_acc_impl_t *handle, uint64_t *host_addr,
uint64_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint64_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_READ_TESTS(uint64_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_get64(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
*(host_addr + i) = (uint64_t)do_pior_corrupt(hp, (caddr_t)addr,
do_bofi_rd64, i ? 0 : repcount, 8);
}
mutex_exit(&bofi_mutex);
}
#define BOFI_REP_WRITE_TESTS(type) \
if (bofi_ddi_check) \
dev_addr = (type *)((uintptr_t)dev_addr - 64 + hp->addr); \
if (bofi_range_check && ((caddr_t)dev_addr < hp->addr || \
(caddr_t)(dev_addr + repcount) - hp->addr > hp->len)) { \
cmn_err((bofi_range_check == 2) ? CE_PANIC : CE_WARN, \
"ddi_rep_put() out of range addr %p not in %p/%llx\n", \
(void *)dev_addr, (void *)hp->addr, hp->len); \
if ((caddr_t)dev_addr < hp->addr || \
(caddr_t)dev_addr - hp->addr >= hp->len) \
return; \
repcount = (type *)(hp->addr + hp->len) - dev_addr; \
}
static void
bofi_rep_wr8(ddi_acc_impl_t *handle, uint8_t *host_addr, uint8_t *dev_addr,
size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint64_t llvalue;
uint8_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_WRITE_TESTS(uint8_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_put8(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
llvalue = *(host_addr + i);
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 1, i ? 0 :
repcount))
hp->save.acc.ahi_put8(&hp->save.acc, addr,
(uint8_t)llvalue);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_wr16(ddi_acc_impl_t *handle, uint16_t *host_addr,
uint16_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint64_t llvalue;
uint16_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_WRITE_TESTS(uint16_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_put16(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
llvalue = *(host_addr + i);
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 2, i ? 0 :
repcount))
hp->save.acc.ahi_put16(&hp->save.acc, addr,
(uint16_t)llvalue);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_wr32(ddi_acc_impl_t *handle, uint32_t *host_addr,
uint32_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint64_t llvalue;
uint32_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_WRITE_TESTS(uint32_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_put32(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
llvalue = *(host_addr + i);
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 4, i ? 0 :
repcount))
hp->save.acc.ahi_put32(&hp->save.acc, addr,
(uint32_t)llvalue);
}
mutex_exit(&bofi_mutex);
}
static void
bofi_rep_wr64(ddi_acc_impl_t *handle, uint64_t *host_addr,
uint64_t *dev_addr, size_t repcount, uint_t flags)
{
struct bofi_shadow *hp;
int i;
uint64_t llvalue;
uint64_t *addr;
hp = handle->ahi_common.ah_bus_private;
BOFI_REP_WRITE_TESTS(uint64_t)
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
hp->save.acc.ahi_rep_put64(&hp->save.acc, host_addr, dev_addr,
repcount, flags);
return;
}
for (i = 0; i < repcount; i++) {
llvalue = *(host_addr + i);
addr = dev_addr + ((flags == DDI_DEV_AUTOINCR) ? i : 0);
if (do_piow_corrupt(hp, (caddr_t)addr, &llvalue, 8, i ? 0 :
repcount))
hp->save.acc.ahi_put64(&hp->save.acc, addr,
(uint64_t)llvalue);
}
mutex_exit(&bofi_mutex);
}
static int
bofi_map(dev_info_t *dip, dev_info_t *rdip,
ddi_map_req_t *reqp, off_t offset, off_t len, caddr_t *vaddrp)
{
ddi_acc_impl_t *ap;
struct bofi_shadow *hp;
struct bofi_errent *ep;
struct bofi_link *lp, *next_lp;
int retval;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
switch (reqp->map_op) {
case DDI_MO_MAP_LOCKED:
retval = save_bus_ops.bus_map(dip, rdip, reqp, offset, len,
vaddrp);
if (retval != DDI_SUCCESS)
return (retval);
ap = (ddi_acc_impl_t *)reqp->map_handlep;
if (ap == NULL)
return (DDI_SUCCESS);
if (!driver_under_test(ap->ahi_common.ah_dip))
return (DDI_SUCCESS);
hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
(void) strncpy(hp->name, ddi_get_name(ap->ahi_common.ah_dip),
NAMESIZE);
hp->instance = ddi_get_instance(ap->ahi_common.ah_dip);
hp->dip = ap->ahi_common.ah_dip;
hp->addr = *vaddrp;
if (bofi_ddi_check)
*vaddrp = (caddr_t)64;
hp->rnumber = ((ddi_acc_hdl_t *)ap)->ah_rnumber;
hp->offset = offset;
if (len == 0)
hp->len = INT_MAX - offset;
else
hp->len = min(len, INT_MAX - offset);
hp->hdl.acc_handle = (ddi_acc_handle_t)ap;
hp->link = NULL;
hp->type = BOFI_ACC_HDL;
hp->save.acc = *ap;
ap->ahi_get8 = bofi_rd8;
ap->ahi_get16 = bofi_rd16;
ap->ahi_get32 = bofi_rd32;
ap->ahi_get64 = bofi_rd64;
ap->ahi_put8 = bofi_wr8;
ap->ahi_put16 = bofi_wr16;
ap->ahi_put32 = bofi_wr32;
ap->ahi_put64 = bofi_wr64;
ap->ahi_rep_get8 = bofi_rep_rd8;
ap->ahi_rep_get16 = bofi_rep_rd16;
ap->ahi_rep_get32 = bofi_rep_rd32;
ap->ahi_rep_get64 = bofi_rep_rd64;
ap->ahi_rep_put8 = bofi_rep_wr8;
ap->ahi_rep_put16 = bofi_rep_wr16;
ap->ahi_rep_put32 = bofi_rep_wr32;
ap->ahi_rep_put64 = bofi_rep_wr64;
ap->ahi_fault_check = bofi_check_acc_hdl;
#if defined(__sparc)
#else
ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
#endif
ap->ahi_common.ah_bus_private = hp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->next = shadow_list.next;
shadow_list.next->prev = hp;
hp->prev = &shadow_list;
shadow_list.next = hp;
hhashp = HDL_HHASH(ap);
hp->hnext = hhashp->hnext;
hhashp->hnext->hprev = hp;
hp->hprev = hhashp;
hhashp->hnext = hp;
dhashp = HDL_DHASH(hp->dip);
hp->dnext = dhashp->dnext;
dhashp->dnext->dprev = hp;
hp->dprev = dhashp;
dhashp->dnext = hp;
for (ep = errent_listp; ep != NULL; ep = ep->next) {
if (ddi_name_to_major(hp->name) ==
ddi_name_to_major(ep->name) &&
hp->instance == ep->errdef.instance &&
(ep->errdef.access_type & BOFI_PIO_RW) &&
(ep->errdef.rnumber == -1 ||
hp->rnumber == ep->errdef.rnumber) &&
(ep->errdef.len == 0 ||
offset < ep->errdef.offset + ep->errdef.len) &&
offset + hp->len > ep->errdef.offset) {
lp = bofi_link_freelist;
if (lp != NULL) {
bofi_link_freelist = lp->link;
lp->errentp = ep;
lp->link = hp->link;
hp->link = lp;
}
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (DDI_SUCCESS);
case DDI_MO_UNMAP:
ap = (ddi_acc_impl_t *)reqp->map_handlep;
if (ap == NULL)
break;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(ap);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.acc_handle == (ddi_acc_handle_t)ap)
break;
if (hp == hhashp) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
break;
}
*ap = hp->save.acc;
*vaddrp = hp->addr;
hp->hnext->hprev = hp->hprev;
hp->hprev->hnext = hp->hnext;
hp->dnext->dprev = hp->dprev;
hp->dprev->dnext = hp->dnext;
hp->next->prev = hp->prev;
hp->prev->next = hp->next;
for (lp = hp->link; lp != NULL; ) {
next_lp = lp->link;
lp->link = bofi_link_freelist;
bofi_link_freelist = lp;
lp = next_lp;
}
hp->link = NULL;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
kmem_free(hp, sizeof (struct bofi_shadow));
break;
default:
break;
}
return (save_bus_ops.bus_map(dip, rdip, reqp, offset, len, vaddrp));
}
static void
chain_on_errdefs(struct bofi_shadow *hp)
{
struct bofi_errent *ep;
struct bofi_link *lp;
ASSERT(MUTEX_HELD(&bofi_mutex));
for (ep = errent_listp; ep != NULL; ep = ep->next) {
if (ddi_name_to_major(hp->name) ==
ddi_name_to_major(ep->name) &&
hp->instance == ep->errdef.instance &&
(ep->errdef.rnumber == -1 ||
hp->rnumber == ep->errdef.rnumber) &&
((ep->errdef.access_type & BOFI_DMA_RW) &&
(((uintptr_t)(hp->addr + ep->errdef.offset +
ep->errdef.len) & ~LLSZMASK) >
((uintptr_t)((hp->addr + ep->errdef.offset) +
LLSZMASK) & ~LLSZMASK)))) {
lp = bofi_link_freelist;
if (lp != NULL) {
bofi_link_freelist = lp->link;
lp->errentp = ep;
lp->link = hp->link;
hp->link = lp;
if ((ep->errdef.access_type & BOFI_DMA_W) &&
(hp->flags & DDI_DMA_WRITE) &&
(ep->state & BOFI_DEV_ACTIVE)) {
do_dma_corrupt(hp, ep,
DDI_DMA_SYNC_FORDEV,
0, hp->len);
}
}
}
}
}
static void
xbcopy(void *from, void *to, u_longlong_t len)
{
uchar_t *f = from;
uchar_t *t = to;
while (len--)
*t++ = *f++;
}
static int
bofi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
{
int retval = DDI_DMA_NORESOURCES;
struct bofi_shadow *hp, *xhp;
int maxrnumber = 0;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
ddi_dma_impl_t *mp;
if (!driver_under_test(rdip))
return (save_bus_ops.bus_dma_allochdl(dip, rdip, attrp,
waitfp, arg, handlep));
hp = kmem_zalloc(sizeof (struct bofi_shadow),
((waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP));
if (hp == NULL) {
if (waitfp != DDI_DMA_DONTWAIT)
(void) timeout((void (*)())(uintptr_t)waitfp, arg, 10);
return (retval);
}
(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
hp->instance = ddi_get_instance(rdip);
hp->dip = rdip;
hp->link = NULL;
hp->type = BOFI_NULL;
retval = save_bus_ops.bus_dma_allochdl(dip, rdip, attrp, waitfp, arg,
handlep);
if (retval != DDI_SUCCESS) {
kmem_free(hp, sizeof (struct bofi_shadow));
return (retval);
}
hp->hdl.dma_handle = *handlep;
mp = (ddi_dma_impl_t *)*handlep;
mp->dmai_fault_check = bofi_check_dma_hdl;
if (save_bus_ops.bus_dma_bindhdl == DEVI(rdip)->devi_bus_dma_bindfunc)
DEVI(rdip)->devi_bus_dma_bindfunc = bofi_dma_bindhdl;
if (save_bus_ops.bus_dma_unbindhdl ==
DEVI(rdip)->devi_bus_dma_unbindfunc)
DEVI(rdip)->devi_bus_dma_unbindfunc = bofi_dma_unbindhdl;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
dhashp = HDL_DHASH(hp->dip);
for (xhp = dhashp->dnext; xhp != dhashp; xhp = xhp->dnext)
if (ddi_name_to_major(xhp->name) ==
ddi_name_to_major(hp->name) &&
xhp->instance == hp->instance &&
(xhp->type == BOFI_DMA_HDL ||
xhp->type == BOFI_NULL))
if (xhp->rnumber >= maxrnumber) {
if (xhp->rnumber == INT_MAX)
maxrnumber = INT_MAX;
else
maxrnumber = xhp->rnumber + 1;
}
hp->rnumber = maxrnumber;
hp->next = shadow_list.next;
shadow_list.next->prev = hp;
hp->prev = &shadow_list;
shadow_list.next = hp;
hhashp = HDL_HHASH(*handlep);
hp->hnext = hhashp->hnext;
hhashp->hnext->hprev = hp;
hp->hprev = hhashp;
hhashp->hnext = hp;
dhashp = HDL_DHASH(hp->dip);
hp->dnext = dhashp->dnext;
dhashp->dnext->dprev = hp;
hp->dprev = dhashp;
dhashp->dnext = hp;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
static int
bofi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
{
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
retval = save_bus_ops.bus_dma_freehdl(dip, rdip, handle);
if (retval != DDI_SUCCESS) {
return (retval);
}
if (hp == hhashp)
return (retval);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
if (hp->type != BOFI_NULL)
panic("driver freeing bound dma_handle");
hp->hnext->hprev = hp->hprev;
hp->hprev->hnext = hp->hnext;
hp->dnext->dprev = hp->dprev;
hp->dprev->dnext = hp->dnext;
hp->next->prev = hp->prev;
hp->prev->next = hp->next;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
kmem_free(hp, sizeof (struct bofi_shadow));
return (retval);
}
static int
bofi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
ddi_dma_handle_t handle, struct ddi_dma_req *dmareqp,
ddi_dma_cookie_t *cookiep, uint_t *ccountp)
{
int retval = DDI_DMA_NORESOURCES;
auto struct ddi_dma_req dmareq;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
ddi_dma_impl_t *mp;
unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (hp == hhashp) {
return save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
cookiep, ccountp);
}
if (hp->type != BOFI_NULL)
return (DDI_DMA_INUSE);
hp->flags = dmareqp->dmar_flags;
if (dmareqp->dmar_object.dmao_type == DMA_OTYP_PAGES) {
hp->map_flags = B_PAGEIO;
hp->map_pp = dmareqp->dmar_object.dmao_obj.pp_obj.pp_pp;
} else if (dmareqp->dmar_object.dmao_obj.virt_obj.v_priv != NULL) {
hp->map_flags = B_SHADOW;
hp->map_pplist = dmareqp->dmar_object.dmao_obj.virt_obj.v_priv;
} else {
hp->map_flags = 0;
}
hp->addr = ddi_dmareq_mapin(dmareqp, &hp->mapaddr, &hp->len);
if (hp->addr == NULL)
goto error;
if (bofi_sync_check) {
hp->origaddr = hp->addr;
hp->allocaddr = ddi_umem_alloc(
((uintptr_t)hp->addr & pagemask) + hp->len,
(dmareqp->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP,
&hp->umem_cookie);
if (hp->allocaddr == NULL)
goto error;
hp->addr = hp->allocaddr + ((uintptr_t)hp->addr & pagemask);
if (dmareqp->dmar_flags & DDI_DMA_WRITE)
xbcopy(hp->origaddr, hp->addr, hp->len);
dmareq = *dmareqp;
dmareq.dmar_object.dmao_size = hp->len;
dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
dmareq.dmar_object.dmao_obj.virt_obj.v_as = &kas;
dmareq.dmar_object.dmao_obj.virt_obj.v_addr = hp->addr;
dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
dmareqp = &dmareq;
}
retval = save_bus_ops.bus_dma_bindhdl(dip, rdip, handle, dmareqp,
cookiep, ccountp);
if (retval != DDI_SUCCESS)
goto error2;
mp = (ddi_dma_impl_t *)handle;
mp->dmai_rflags &= ~DMP_NOSYNC;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->type = BOFI_DMA_HDL;
chain_on_errdefs(hp);
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
error:
if (dmareqp->dmar_fp != DDI_DMA_DONTWAIT) {
(void) timeout((void (*)())(uintptr_t)dmareqp->dmar_fp,
dmareqp->dmar_arg, 10);
}
error2:
if (hp) {
ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
hp->map_pp, hp->map_pplist);
if (bofi_sync_check && hp->allocaddr)
ddi_umem_free(hp->umem_cookie);
hp->mapaddr = NULL;
hp->allocaddr = NULL;
hp->origaddr = NULL;
}
return (retval);
}
static int
bofi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
{
struct bofi_link *lp, *next_lp;
struct bofi_errent *ep;
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
retval = save_bus_ops.bus_dma_unbindhdl(dip, rdip, handle);
if (retval != DDI_SUCCESS)
return (retval);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle)
break;
if (hp == hhashp) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
if (hp->type == BOFI_NULL)
panic("driver unbinding unbound dma_handle");
for (lp = hp->link; lp != NULL; ) {
next_lp = lp->link;
ep = lp->errentp;
if ((ep->errdef.access_type & BOFI_DMA_R) &&
(hp->flags & DDI_DMA_READ) &&
(ep->state & BOFI_DEV_ACTIVE)) {
do_dma_corrupt(hp, ep, DDI_DMA_SYNC_FORCPU, 0, hp->len);
}
lp->link = bofi_link_freelist;
bofi_link_freelist = lp;
lp = next_lp;
}
hp->link = NULL;
hp->type = BOFI_NULL;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (bofi_sync_check && (hp->flags & DDI_DMA_READ))
if (hp->allocaddr)
xbcopy(hp->addr, hp->origaddr, hp->len);
ddi_dmareq_mapout(hp->mapaddr, hp->len, hp->map_flags,
hp->map_pp, hp->map_pplist);
if (bofi_sync_check && hp->allocaddr)
ddi_umem_free(hp->umem_cookie);
hp->mapaddr = NULL;
hp->allocaddr = NULL;
hp->origaddr = NULL;
return (retval);
}
static int
bofi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
ddi_dma_handle_t handle, off_t off, size_t len, uint_t flags)
{
struct bofi_link *lp;
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
if (flags == DDI_DMA_SYNC_FORCPU || flags == DDI_DMA_SYNC_FORKERNEL) {
retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
len, flags);
if (retval != DDI_SUCCESS)
return (retval);
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle &&
hp->type == BOFI_DMA_HDL)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (hp != hhashp) {
if (bofi_sync_check && flags == DDI_DMA_SYNC_FORDEV)
if (hp->allocaddr)
xbcopy(hp->origaddr+off, hp->addr+off,
len ? len : (hp->len - off));
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
if ((((ep->errdef.access_type & BOFI_DMA_R) &&
(flags == DDI_DMA_SYNC_FORCPU ||
flags == DDI_DMA_SYNC_FORKERNEL)) ||
((ep->errdef.access_type & BOFI_DMA_W) &&
(flags == DDI_DMA_SYNC_FORDEV))) &&
(ep->state & BOFI_DEV_ACTIVE)) {
do_dma_corrupt(hp, ep, flags, off,
len ? len : (hp->len - off));
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (bofi_sync_check && (flags == DDI_DMA_SYNC_FORCPU ||
flags == DDI_DMA_SYNC_FORKERNEL))
if (hp->allocaddr)
xbcopy(hp->addr+off, hp->origaddr+off,
len ? len : (hp->len - off));
}
if (flags == DDI_DMA_SYNC_FORDEV)
retval = save_bus_ops.bus_dma_flush(dip, rdip, handle, off,
len, flags);
return (retval);
}
static int
bofi_dma_win(dev_info_t *dip, dev_info_t *rdip,
ddi_dma_handle_t handle, uint_t win, off_t *offp,
size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
{
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
ddi_dma_impl_t *mp;
retval = save_bus_ops.bus_dma_win(dip, rdip, handle, win, offp, lenp,
cookiep, ccountp);
if (retval != DDI_SUCCESS)
return (retval);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle)
break;
if (hp != hhashp) {
mp = (ddi_dma_impl_t *)handle;
mp->dmai_rflags &= ~DMP_NOSYNC;
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
static int
bofi_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
{
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
int i;
struct bofi_shadow *dummyhp;
retval = save_bus_ops.bus_dma_ctl(dip, rdip, handle, request, offp,
lenp, objp, flags);
if (retval != DDI_SUCCESS)
return (retval);
if (!driver_under_test(rdip))
return (DDI_SUCCESS);
#if defined(__sparc)
if (request == DDI_DMA_RESERVE) {
bofi_dvma_reserve(rdip, *(ddi_dma_handle_t *)objp);
return (DDI_SUCCESS);
}
#endif
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == handle)
break;
if (hp == hhashp) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
switch (request) {
case DDI_DMA_RELEASE:
dummyhp = hp;
dummyhp->hnext->hprev = dummyhp->hprev;
dummyhp->hprev->hnext = dummyhp->hnext;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
for (i = 0; i < dummyhp->len; i++) {
hp = dummyhp->hparrayp[i];
if (hp->type != BOFI_NULL)
panic("driver releasing loaded dvma");
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->dnext->dprev = hp->dprev;
hp->dprev->dnext = hp->dnext;
hp->next->prev = hp->prev;
hp->prev->next = hp->next;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (bofi_sync_check && hp->allocaddr)
ddi_umem_free(hp->umem_cookie);
kmem_free(hp, sizeof (struct bofi_shadow));
}
kmem_free(dummyhp->hparrayp, dummyhp->len *
sizeof (struct bofi_shadow *));
kmem_free(dummyhp, sizeof (struct bofi_shadow));
return (retval);
default:
break;
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
#if defined(__sparc)
static void
bofi_dvma_reserve(dev_info_t *rdip, ddi_dma_handle_t handle)
{
struct bofi_shadow *hp;
struct bofi_shadow *dummyhp;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
ddi_dma_impl_t *mp;
struct fast_dvma *nexus_private;
int i, count;
mp = (ddi_dma_impl_t *)handle;
count = mp->dmai_ndvmapages;
dummyhp = kmem_zalloc(sizeof (*dummyhp), KM_SLEEP);
if (mp->dmai_rflags & DMP_BYPASSNEXUS) {
nexus_private = (struct fast_dvma *)mp->dmai_nexus_private;
dummyhp->save.dvma_ops = *(nexus_private->ops);
nexus_private->ops = &bofi_dvma_ops;
}
(void) strncpy(dummyhp->name, ddi_get_name(rdip), NAMESIZE);
dummyhp->instance = ddi_get_instance(rdip);
dummyhp->rnumber = -1;
dummyhp->dip = rdip;
dummyhp->len = count;
dummyhp->hdl.dma_handle = handle;
dummyhp->link = NULL;
dummyhp->type = BOFI_NULL;
dummyhp->hparrayp = kmem_alloc(count *
sizeof (struct bofi_shadow *), KM_SLEEP);
for (i = 0; i < count; i++) {
hp = kmem_zalloc(sizeof (*hp), KM_SLEEP);
(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
hp->instance = ddi_get_instance(rdip);
hp->rnumber = -1;
hp->dip = rdip;
hp->hdl.dma_handle = 0;
hp->link = NULL;
hp->type = BOFI_NULL;
if (bofi_sync_check) {
unsigned long pagemask = ddi_ptob(rdip, 1) - 1;
hp->allocaddr = ddi_umem_alloc(
((int)(uintptr_t)hp->addr & pagemask)
+ pagemask + 1,
KM_SLEEP, &hp->umem_cookie);
hp->addr = hp->allocaddr +
((int)(uintptr_t)hp->addr & pagemask);
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->next = shadow_list.next;
shadow_list.next->prev = hp;
hp->prev = &shadow_list;
shadow_list.next = hp;
dhashp = HDL_DHASH(hp->dip);
hp->dnext = dhashp->dnext;
dhashp->dnext->dprev = hp;
hp->dprev = dhashp;
dhashp->dnext = hp;
dummyhp->hparrayp[i] = hp;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(handle);
dummyhp->hnext = hhashp->hnext;
hhashp->hnext->hprev = dummyhp;
dummyhp->hprev = hhashp;
hhashp->hnext = dummyhp;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
}
static void
bofi_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
ddi_dma_cookie_t *cp)
{
struct bofi_shadow *dummyhp;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
struct bofi_errent *ep;
struct bofi_link *lp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(h);
for (dummyhp = hhashp->hnext; dummyhp != hhashp;
dummyhp = dummyhp->hnext)
if (dummyhp->hdl.dma_handle == h)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (dummyhp == hhashp) {
panic("driver dvma_kaddr_load with no reserve");
}
hp = dummyhp->hparrayp[index];
if (hp->type != BOFI_NULL)
panic("driver loading loaded dvma");
if (bofi_sync_check && hp->allocaddr) {
hp->origaddr = a;
a = hp->addr;
} else
hp->addr = a;
hp->len = len;
dummyhp->save.dvma_ops.dvma_kaddr_load(h, a, len, index, cp);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->type = BOFI_DMA_HDL;
for (ep = errent_listp; ep != NULL; ep = ep->next) {
if (ddi_name_to_major(hp->name) ==
ddi_name_to_major(ep->name) &&
hp->instance == ep->errdef.instance &&
(ep->errdef.rnumber == -1 ||
hp->rnumber == ep->errdef.rnumber) &&
((ep->errdef.access_type & BOFI_DMA_RW) &&
(((uintptr_t)(hp->addr + ep->errdef.offset +
ep->errdef.len) & ~LLSZMASK) >
((uintptr_t)((hp->addr + ep->errdef.offset) +
LLSZMASK) & ~LLSZMASK)))) {
lp = bofi_link_freelist;
if (lp != NULL) {
bofi_link_freelist = lp->link;
lp->errentp = ep;
lp->link = hp->link;
hp->link = lp;
}
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
}
static void
bofi_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
{
struct bofi_link *lp, *next_lp;
struct bofi_errent *ep;
struct bofi_shadow *dummyhp;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(h);
for (dummyhp = hhashp->hnext; dummyhp != hhashp;
dummyhp = dummyhp->hnext)
if (dummyhp->hdl.dma_handle == h)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (dummyhp == hhashp) {
panic("driver dvma_unload with no reserve");
}
dummyhp->save.dvma_ops.dvma_unload(h, index, view);
hp = dummyhp->hparrayp[index];
if (hp->type == BOFI_NULL)
panic("driver unloading unloaded dvma");
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
for (lp = hp->link; lp != NULL; ) {
next_lp = lp->link;
ep = lp->errentp;
if ((ep->errdef.access_type & BOFI_DMA_R) &&
(view == DDI_DMA_SYNC_FORCPU ||
view == DDI_DMA_SYNC_FORKERNEL) &&
(ep->state & BOFI_DEV_ACTIVE)) {
do_dma_corrupt(hp, ep, view, 0, hp->len);
}
lp->link = bofi_link_freelist;
bofi_link_freelist = lp;
lp = next_lp;
}
hp->link = NULL;
hp->type = BOFI_NULL;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (bofi_sync_check &&
(view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL))
if (hp->allocaddr)
xbcopy(hp->addr, hp->origaddr, hp->len);
}
static void
bofi_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
{
struct bofi_link *lp;
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *dummyhp;
struct bofi_shadow *hhashp;
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(h);
for (dummyhp = hhashp->hnext; dummyhp != hhashp;
dummyhp = dummyhp->hnext)
if (dummyhp->hdl.dma_handle == h)
break;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (dummyhp == hhashp) {
panic("driver dvma_sync with no reserve");
}
hp = dummyhp->hparrayp[index];
if (hp->type == BOFI_NULL)
panic("driver syncing unloaded dvma");
if (view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)
dummyhp->save.dvma_ops.dvma_sync(h, index, view);
if (bofi_sync_check && view == DDI_DMA_SYNC_FORDEV) {
if (hp->allocaddr)
xbcopy(hp->origaddr, hp->addr, hp->len);
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
if ((((ep->errdef.access_type & BOFI_DMA_R) &&
(view == DDI_DMA_SYNC_FORCPU ||
view == DDI_DMA_SYNC_FORKERNEL)) ||
((ep->errdef.access_type & BOFI_DMA_W) &&
(view == DDI_DMA_SYNC_FORDEV))) &&
(ep->state & BOFI_DEV_ACTIVE)) {
do_dma_corrupt(hp, ep, view, 0, hp->len);
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
if (bofi_sync_check &&
(view == DDI_DMA_SYNC_FORCPU || view == DDI_DMA_SYNC_FORKERNEL)) {
if (hp->allocaddr)
xbcopy(hp->addr, hp->origaddr, hp->len);
}
if (view == DDI_DMA_SYNC_FORDEV)
dummyhp->save.dvma_ops.dvma_sync(h, index, view);
}
#endif
static uint_t
bofi_intercept_intr(caddr_t xp, caddr_t arg2)
{
struct bofi_errent *ep;
struct bofi_link *lp;
struct bofi_shadow *hp;
int intr_count = 1;
int i;
uint_t retval = DDI_INTR_UNCLAIMED;
uint_t result;
int unclaimed_counter = 0;
int jabber_detected = 0;
hp = (struct bofi_shadow *)xp;
if (hp->link == NULL)
return (hp->save.intr.int_handler
(hp->save.intr.int_handler_arg1, arg2));
mutex_enter(&bofi_mutex);
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
if (ep->state & BOFI_DEV_ACTIVE) {
if ((ep->errdef.access_count ||
ep->errdef.fail_count) &&
(ep->errdef.access_type & BOFI_LOG))
log_acc_event(ep, BOFI_INTR, 0, 0, 1, 0);
if (ep->errdef.access_count > 1) {
ep->errdef.access_count--;
} else if (ep->errdef.fail_count > 0) {
ep->errdef.fail_count--;
ep->errdef.access_count = 0;
if (ep->errstate.fail_time == 0)
ep->errstate.fail_time = bofi_gettime();
switch (ep->errdef.optype) {
case BOFI_DELAY_INTR:
if (!hp->hilevel) {
drv_usecwait
(ep->errdef.operand);
}
break;
case BOFI_LOSE_INTR:
intr_count = 0;
break;
case BOFI_EXTRA_INTR:
intr_count += ep->errdef.operand;
break;
default:
break;
}
}
}
}
mutex_exit(&bofi_mutex);
for (i = 0; i < intr_count; i++) {
result = hp->save.intr.int_handler
(hp->save.intr.int_handler_arg1, arg2);
if (result == DDI_INTR_CLAIMED)
unclaimed_counter >>= 1;
else if (++unclaimed_counter >= 20)
jabber_detected = 1;
if (i == 0)
retval = result;
}
if (intr_count > 1000 && !jabber_detected)
panic("undetected interrupt jabber: %s%d",
hp->name, hp->instance);
return (retval);
}
static int
bofi_check_acc_hdl(ddi_acc_impl_t *handle)
{
struct bofi_shadow *hp;
struct bofi_link *lp;
uint_t result = 0;
hp = handle->ahi_common.ah_bus_private;
if (!hp->link || !mutex_tryenter(&bofi_mutex)) {
return (0);
}
for (lp = hp->link; lp != NULL; lp = lp->link) {
if (lp->errentp->errdef.access_count == 0 &&
(lp->errentp->state & BOFI_DEV_ACTIVE)) {
result = (lp->errentp->errdef.acc_chk & 1);
}
}
mutex_exit(&bofi_mutex);
return (result);
}
static int
bofi_check_dma_hdl(ddi_dma_impl_t *handle)
{
struct bofi_shadow *hp;
struct bofi_link *lp;
struct bofi_shadow *hhashp;
uint_t result = 0;
if (!mutex_tryenter(&bofi_mutex)) {
return (0);
}
hhashp = HDL_HHASH(handle);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext)
if (hp->hdl.dma_handle == (ddi_dma_handle_t)handle)
break;
if (hp == hhashp) {
mutex_exit(&bofi_mutex);
return (0);
}
if (!hp->link) {
mutex_exit(&bofi_mutex);
return (0);
}
for (lp = hp->link; lp != NULL; lp = lp->link) {
if (lp->errentp->errdef.access_count == 0 &&
(lp->errentp->state & BOFI_DEV_ACTIVE)) {
result = ((lp->errentp->errdef.acc_chk & 2) ? 1 : 0);
}
}
mutex_exit(&bofi_mutex);
return (result);
}
static int
bofi_post_event(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventhdl, void *impl_data)
{
ddi_eventcookie_t ec;
struct ddi_fault_event_data *arg;
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *dhashp;
struct bofi_link *lp;
ASSERT(eventhdl);
if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) != DDI_SUCCESS)
return (DDI_FAILURE);
if (ec != eventhdl)
return (save_bus_ops.bus_post_event(dip, rdip, eventhdl,
impl_data));
arg = (struct ddi_fault_event_data *)impl_data;
mutex_enter(&bofi_mutex);
dhashp = HDL_DHASH(arg->f_dip);
for (hp = dhashp->dnext; hp != dhashp; hp = hp->dnext) {
if (hp->dip == arg->f_dip) {
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
ep->errstate.errmsg_count++;
if ((ep->errstate.msg_time == 0 ||
ep->errstate.severity > arg->f_impact) &&
(ep->state & BOFI_DEV_ACTIVE)) {
ep->errstate.msg_time = bofi_gettime();
ep->errstate.severity = arg->f_impact;
(void) strncpy(ep->errstate.buffer,
arg->f_message, ERRMSGSIZE);
ddi_trigger_softintr(ep->softintr_id);
}
}
}
}
mutex_exit(&bofi_mutex);
return (save_bus_ops.bus_post_event(dip, rdip, eventhdl, impl_data));
}
static int
bofi_fm_ereport_callback(sysevent_t *ev, void *cookie)
{
char *class = "";
char *path = "";
char *ptr;
nvlist_t *nvlist;
nvlist_t *detector;
ddi_fault_impact_t impact;
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_link *lp;
char service_class[FM_MAX_CLASS];
char hppath[MAXPATHLEN];
int service_ereport = 0;
(void) sysevent_get_attr_list(ev, &nvlist);
(void) nvlist_lookup_string(nvlist, FM_CLASS, &class);
if (nvlist_lookup_nvlist(nvlist, FM_EREPORT_DETECTOR, &detector) == 0)
(void) nvlist_lookup_string(detector, FM_FMRI_DEV_PATH, &path);
(void) snprintf(service_class, FM_MAX_CLASS, "%s.%s.%s.",
FM_EREPORT_CLASS, DDI_IO_CLASS, DDI_FM_SERVICE_IMPACT);
if (strncmp(class, service_class, strlen(service_class) - 1) == 0)
service_ereport = 1;
mutex_enter(&bofi_mutex);
for (hp = shadow_list.next; hp != &shadow_list; hp = hp->next) {
(void) ddi_pathname(hp->dip, hppath);
if (strcmp(path, hppath) != 0)
continue;
for (lp = hp->link; lp != NULL; lp = lp->link) {
ep = lp->errentp;
ep->errstate.errmsg_count++;
if (!(ep->state & BOFI_DEV_ACTIVE))
continue;
if (ep->errstate.msg_time != 0)
continue;
if (service_ereport) {
ptr = class + strlen(service_class);
if (strcmp(ptr, DDI_FM_SERVICE_LOST) == 0)
impact = DDI_SERVICE_LOST;
else if (strcmp(ptr,
DDI_FM_SERVICE_DEGRADED) == 0)
impact = DDI_SERVICE_DEGRADED;
else if (strcmp(ptr,
DDI_FM_SERVICE_RESTORED) == 0)
impact = DDI_SERVICE_RESTORED;
else
impact = DDI_SERVICE_UNAFFECTED;
if (ep->errstate.severity > impact)
ep->errstate.severity = impact;
} else if (ep->errstate.buffer[0] == '\0') {
(void) strncpy(ep->errstate.buffer, class,
ERRMSGSIZE);
}
if (ep->errstate.buffer[0] != '\0' &&
ep->errstate.severity < DDI_SERVICE_RESTORED) {
ep->errstate.msg_time = bofi_gettime();
ddi_trigger_softintr(ep->softintr_id);
}
}
}
nvlist_free(nvlist);
mutex_exit(&bofi_mutex);
return (0);
}
static int
bofi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
ddi_intr_handle_impl_t *hdlp, void *result)
{
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
struct bofi_errent *ep;
struct bofi_link *lp, *next_lp;
switch (intr_op) {
case DDI_INTROP_ADDISR:
if (!driver_under_test(rdip))
return (save_bus_ops.bus_intr_op(dip, rdip,
intr_op, hdlp, result));
hp = kmem_zalloc(sizeof (struct bofi_shadow), KM_SLEEP);
(void) strncpy(hp->name, ddi_get_name(rdip), NAMESIZE);
hp->instance = ddi_get_instance(rdip);
hp->save.intr.int_handler = hdlp->ih_cb_func;
hp->save.intr.int_handler_arg1 = hdlp->ih_cb_arg1;
hdlp->ih_cb_func = (ddi_intr_handler_t *)bofi_intercept_intr;
hdlp->ih_cb_arg1 = (caddr_t)hp;
hp->bofi_inum = hdlp->ih_inum;
hp->dip = rdip;
hp->link = NULL;
hp->type = BOFI_INT_HDL;
if (hdlp->ih_pri >= ddi_intr_get_hilevel_pri())
hp->hilevel = 1;
else
hp->hilevel = 0;
retval = save_bus_ops.bus_intr_op(dip, rdip,
intr_op, hdlp, result);
if (retval != DDI_SUCCESS) {
kmem_free(hp, sizeof (struct bofi_shadow));
return (retval);
}
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hp->next = shadow_list.next;
shadow_list.next->prev = hp;
hp->prev = &shadow_list;
shadow_list.next = hp;
hhashp = HDL_HHASH(hdlp->ih_inum);
hp->hnext = hhashp->hnext;
hhashp->hnext->hprev = hp;
hp->hprev = hhashp;
hhashp->hnext = hp;
dhashp = HDL_DHASH(hp->dip);
hp->dnext = dhashp->dnext;
dhashp->dnext->dprev = hp;
hp->dprev = dhashp;
dhashp->dnext = hp;
for (ep = errent_listp; ep != NULL; ep = ep->next) {
if (ddi_name_to_major(hp->name) ==
ddi_name_to_major(ep->name) &&
hp->instance == ep->errdef.instance &&
(ep->errdef.access_type & BOFI_INTR)) {
lp = bofi_link_freelist;
if (lp != NULL) {
bofi_link_freelist = lp->link;
lp->errentp = ep;
lp->link = hp->link;
hp->link = lp;
}
}
}
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
case DDI_INTROP_REMISR:
retval = save_bus_ops.bus_intr_op(dip, rdip,
intr_op, hdlp, result);
mutex_enter(&bofi_low_mutex);
mutex_enter(&bofi_mutex);
hhashp = HDL_HHASH(hdlp->ih_inum);
for (hp = hhashp->hnext; hp != hhashp; hp = hp->hnext) {
if (hp->dip == rdip &&
hp->type == BOFI_INT_HDL &&
hp->bofi_inum == hdlp->ih_inum) {
break;
}
}
if (hp == hhashp) {
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
return (retval);
}
hp->hnext->hprev = hp->hprev;
hp->hprev->hnext = hp->hnext;
hp->dnext->dprev = hp->dprev;
hp->dprev->dnext = hp->dnext;
hp->next->prev = hp->prev;
hp->prev->next = hp->next;
for (lp = hp->link; lp != NULL; ) {
next_lp = lp->link;
lp->link = bofi_link_freelist;
bofi_link_freelist = lp;
lp = next_lp;
}
hp->link = NULL;
mutex_exit(&bofi_mutex);
mutex_exit(&bofi_low_mutex);
kmem_free(hp, sizeof (struct bofi_shadow));
return (retval);
default:
return (save_bus_ops.bus_intr_op(dip, rdip,
intr_op, hdlp, result));
}
}