#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/async.h>
#include <sys/spl.h>
#include <sys/sunddi.h>
#include <sys/machsystm.h>
#include <sys/ddi_impldefs.h>
#include <sys/pci/pci_obj.h>
#include <sys/sdt.h>
#include <sys/clock.h>
#ifdef NOT_DEFINED
uint_t ino_to_pil[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
4, 0, 0, 0,
4,
6,
3,
9,
9,
14,
4,
8,
14,
12,
12,
12,
0,
0,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14,
14
};
#endif
#define PCI_SIMBA_VENID 0x108e
#define PCI_SIMBA_DEVID 0x5000
static int
map_pcidev_cfg_reg(dev_info_t *dip, dev_info_t *rdip, ddi_acc_handle_t *hdl_p)
{
dev_info_t *cdip;
dev_info_t *pci_dip = NULL;
pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
int simba_found = 0, pci_bridge_found = 0;
for (cdip = rdip; cdip && cdip != dip; cdip = ddi_get_parent(cdip)) {
ddi_acc_handle_t config_handle;
uint32_t vendor_id = ddi_getprop(DDI_DEV_T_ANY, cdip,
DDI_PROP_DONTPASS, "vendor-id", 0xffff);
DEBUG4(DBG_A_INTX, pci_p->pci_dip,
"map dev cfg reg for %s%d: @%s%d\n",
ddi_driver_name(rdip), ddi_get_instance(rdip),
ddi_driver_name(cdip), ddi_get_instance(cdip));
if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
"no-dma-interrupt-sync"))
continue;
if (vendor_id == 0xffff)
continue;
if (!pci_dip)
pci_dip = cdip;
if (vendor_id == PCI_SIMBA_VENID) {
uint32_t device_id = ddi_getprop(DDI_DEV_T_ANY,
cdip, DDI_PROP_DONTPASS, "device-id", -1);
if (device_id == PCI_SIMBA_DEVID) {
simba_found = 1;
DEBUG0(DBG_A_INTX, pci_p->pci_dip,
"\tFound simba\n");
continue;
}
}
if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) {
cmn_err(CE_WARN,
"%s%d: can't get brdg cfg space for %s%d\n",
ddi_driver_name(dip), ddi_get_instance(dip),
ddi_driver_name(cdip), ddi_get_instance(cdip));
return (DDI_FAILURE);
}
if (pci_config_get8(config_handle, PCI_CONF_BASCLASS)
== PCI_CLASS_BRIDGE) {
DEBUG0(DBG_A_INTX, pci_p->pci_dip,
"\tFound PCI to xBus bridge\n");
pci_bridge_found = 1;
}
pci_config_teardown(&config_handle);
}
if (!pci_bridge_found)
return (DDI_SUCCESS);
if (!simba_found && (CHIP_TYPE(pci_p) < PCI_CHIP_SCHIZO))
return (DDI_SUCCESS);
if (pci_config_setup(pci_dip, hdl_p) != DDI_SUCCESS) {
cmn_err(CE_WARN, "%s%d: can not get config space for %s%d\n",
ddi_driver_name(dip), ddi_get_instance(dip),
ddi_driver_name(cdip), ddi_get_instance(cdip));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
pci_spurintr(ib_ino_pil_t *ipil_p) {
ib_ino_info_t *ino_p = ipil_p->ipil_ino_p;
ih_t *ih_p = ipil_p->ipil_ih_start;
pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p;
char *err_fmt_str;
boolean_t blocked = B_FALSE;
int i;
if (ino_p->ino_unclaimed_intrs > pci_unclaimed_intr_max)
return (DDI_INTR_CLAIMED);
if (!ino_p->ino_unclaimed_intrs)
ino_p->ino_spurintr_begin = ddi_get_lbolt();
ino_p->ino_unclaimed_intrs++;
if (ino_p->ino_unclaimed_intrs <= pci_unclaimed_intr_max)
goto clear;
if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin)
> pci_spurintr_duration) {
ino_p->ino_unclaimed_intrs = 0;
goto clear;
}
err_fmt_str = "%s%d: ino 0x%x blocked";
blocked = B_TRUE;
goto warn;
clear:
if (!pci_spurintr_msgs) {
IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
return (DDI_INTR_CLAIMED);
}
err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x";
warn:
cmn_err(CE_WARN, err_fmt_str, NAMEINST(pci_p->pci_dip), ino_p->ino_ino);
for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next)
cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip),
ih_p->ih_inum);
cmn_err(CE_CONT, "!\n");
if (blocked == B_FALSE)
IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
return (DDI_INTR_CLAIMED);
}
extern uint64_t intr_get_time(void);
uint_t
pci_intr_wrapper(caddr_t arg)
{
ib_ino_pil_t *ipil_p = (ib_ino_pil_t *)arg;
ib_ino_info_t *ino_p = ipil_p->ipil_ino_p;
uint_t result = 0, r = DDI_INTR_UNCLAIMED;
pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p;
pbm_t *pbm_p = pci_p->pci_pbm_p;
ih_t *ih_p = ipil_p->ipil_ih_start;
int i;
for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) {
dev_info_t *dip = ih_p->ih_dip;
uint_t (*handler)() = ih_p->ih_handler;
caddr_t arg1 = ih_p->ih_handler_arg1;
caddr_t arg2 = ih_p->ih_handler_arg2;
ddi_acc_handle_t cfg_hdl = ih_p->ih_config_handle;
if (pci_intr_dma_sync && cfg_hdl && pbm_p->pbm_sync_reg_pa) {
(void) pci_config_get16(cfg_hdl, PCI_CONF_VENID);
pci_pbm_dma_sync(pbm_p, ino_p->ino_ino);
}
if (ih_p->ih_intr_state == PCI_INTR_STATE_DISABLE) {
DEBUG3(DBG_INTR, pci_p->pci_dip,
"pci_intr_wrapper: %s%d interrupt %d is disabled\n",
ddi_driver_name(dip), ddi_get_instance(dip),
ino_p->ino_ino);
continue;
}
DTRACE_PROBE4(interrupt__start, dev_info_t, dip,
void *, handler, caddr_t, arg1, caddr_t, arg2);
r = (*handler)(arg1, arg2);
if (ipil_p->ipil_pil <= LOCK_LEVEL)
atomic_add_64(&ih_p->ih_ticks, intr_get_time());
DTRACE_PROBE4(interrupt__complete, dev_info_t, dip,
void *, handler, caddr_t, arg1, int, r);
result += r;
if (pci_check_all_handlers)
continue;
if (result)
break;
}
if (result)
ino_p->ino_claimed |= (1 << ipil_p->ipil_pil);
if (ipil_p->ipil_pil != ino_p->ino_lopil)
return (DDI_INTR_CLAIMED);
if (!ino_p->ino_claimed)
return (pci_spurintr(ipil_p));
ino_p->ino_unclaimed_intrs = 0;
ino_p->ino_claimed = 0;
IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
return (DDI_INTR_CLAIMED);
}
dev_info_t *
get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
{
dev_info_t *cdip = rdip;
for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
;
return (cdip);
}
static struct {
kstat_named_t pciintr_ks_name;
kstat_named_t pciintr_ks_type;
kstat_named_t pciintr_ks_cpu;
kstat_named_t pciintr_ks_pil;
kstat_named_t pciintr_ks_time;
kstat_named_t pciintr_ks_ino;
kstat_named_t pciintr_ks_cookie;
kstat_named_t pciintr_ks_devpath;
kstat_named_t pciintr_ks_buspath;
} pciintr_ks_template = {
{ "name", KSTAT_DATA_CHAR },
{ "type", KSTAT_DATA_CHAR },
{ "cpu", KSTAT_DATA_UINT64 },
{ "pil", KSTAT_DATA_UINT64 },
{ "time", KSTAT_DATA_UINT64 },
{ "ino", KSTAT_DATA_UINT64 },
{ "cookie", KSTAT_DATA_UINT64 },
{ "devpath", KSTAT_DATA_STRING },
{ "buspath", KSTAT_DATA_STRING },
};
static uint32_t pciintr_ks_instance;
static char ih_devpath[MAXPATHLEN];
static char ih_buspath[MAXPATHLEN];
kmutex_t pciintr_ks_template_lock;
int
pci_ks_update(kstat_t *ksp, int rw)
{
ih_t *ih_p = ksp->ks_private;
int maxlen = sizeof (pciintr_ks_template.pciintr_ks_name.value.c);
ib_ino_pil_t *ipil_p = ih_p->ih_ipil_p;
ib_ino_info_t *ino_p = ipil_p->ipil_ino_p;
ib_t *ib_p = ino_p->ino_ib_p;
pci_t *pci_p = ib_p->ib_pci_p;
ib_ino_t ino;
ino = ino_p->ino_ino;
(void) snprintf(pciintr_ks_template.pciintr_ks_name.value.c, maxlen,
"%s%d", ddi_driver_name(ih_p->ih_dip),
ddi_get_instance(ih_p->ih_dip));
(void) ddi_pathname(ih_p->ih_dip, ih_devpath);
(void) ddi_pathname(pci_p->pci_dip, ih_buspath);
kstat_named_setstr(&pciintr_ks_template.pciintr_ks_devpath, ih_devpath);
kstat_named_setstr(&pciintr_ks_template.pciintr_ks_buspath, ih_buspath);
if (ih_p->ih_intr_state == PCI_INTR_STATE_ENABLE) {
(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
"fixed");
pciintr_ks_template.pciintr_ks_cpu.value.ui64 =
ino_p->ino_cpuid;
pciintr_ks_template.pciintr_ks_pil.value.ui64 =
ipil_p->ipil_pil;
pciintr_ks_template.pciintr_ks_time.value.ui64 = ih_p->ih_nsec +
(uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks,
ino_p->ino_cpuid);
pciintr_ks_template.pciintr_ks_ino.value.ui64 = ino;
pciintr_ks_template.pciintr_ks_cookie.value.ui64 =
IB_INO_TO_MONDO(ib_p, ino);
} else {
(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
"disabled");
pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 0;
pciintr_ks_template.pciintr_ks_pil.value.ui64 = 0;
pciintr_ks_template.pciintr_ks_time.value.ui64 = 0;
pciintr_ks_template.pciintr_ks_ino.value.ui64 = 0;
pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 0;
}
return (0);
}
int
pci_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
{
pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
ib_t *ib_p = pci_p->pci_ib_p;
cb_t *cb_p = pci_p->pci_cb_p;
ih_t *ih_p;
ib_ino_t ino;
ib_ino_info_t *ino_p;
ib_ino_pil_t *ipil_p, *ipil_list;
ib_mondo_t mondo;
uint32_t cpu_id;
int ret;
int32_t weight;
ino = IB_MONDO_TO_INO(hdlp->ih_vector);
DEBUG3(DBG_A_INTX, dip, "pci_add_intr: rdip=%s%d ino=%x\n",
ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
if (ino > ib_p->ib_max_ino) {
DEBUG1(DBG_A_INTX, dip, "ino %x is invalid\n", ino);
return (DDI_INTR_NOTFOUND);
}
if (hdlp->ih_vector & PCI_PULSE_INO) {
volatile uint64_t *map_reg_addr;
map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
if (mondo == 0)
goto fail1;
hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
if (i_ddi_add_ivintr(hdlp) != DDI_SUCCESS)
goto fail1;
mutex_enter(&ib_p->ib_intr_lock);
cpu_id = intr_dist_cpuid();
*map_reg_addr = ib_get_map_reg(mondo, cpu_id);
mutex_exit(&ib_p->ib_intr_lock);
*map_reg_addr;
goto done;
}
if ((mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino)) == 0)
goto fail1;
ino = IB_MONDO_TO_INO(mondo);
mutex_enter(&ib_p->ib_ino_lst_mutex);
ih_p = ib_alloc_ih(rdip, hdlp->ih_inum,
hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
if (map_pcidev_cfg_reg(dip, rdip, &ih_p->ih_config_handle))
goto fail2;
ino_p = ib_locate_ino(ib_p, ino);
ipil_list = ino_p ? ino_p->ino_ipil_p:NULL;
if (ino_p && (ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) {
if (ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum)) {
DEBUG1(DBG_A_INTX, dip, "dup intr #%d\n",
hdlp->ih_inum);
goto fail3;
}
cpu_id = ino_p->ino_cpuid;
weight = pci_class_to_intr_weight(rdip);
intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
ib_ino_add_intr(pci_p, ipil_p, ih_p);
goto ino_done;
}
if (hdlp->ih_pri == 0)
hdlp->ih_pri = pci_class_to_pil(rdip);
ipil_p = ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p);
ino_p = ipil_p->ipil_ino_p;
hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
ino_p->ino_mondo = hdlp->ih_vector;
DEBUG2(DBG_A_INTX, dip, "pci_add_intr: pil=0x%x mondo=0x%x\n",
hdlp->ih_pri, hdlp->ih_vector);
DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
(ddi_intr_handler_t *)pci_intr_wrapper, (caddr_t)ipil_p, NULL);
ret = i_ddi_add_ivintr(hdlp);
DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);
if (ret != DDI_SUCCESS)
goto fail4;
ipil_p->ipil_pil = hdlp->ih_pri;
IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
if (ipil_list == NULL)
ino_p->ino_cpuid = pci_intr_dist_cpuid(ib_p, ino_p);
cpu_id = ino_p->ino_cpuid;
ino_p->ino_established = 1;
weight = pci_class_to_intr_weight(rdip);
intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
if (!ipil_list) {
*ino_p->ino_map_reg = ib_get_map_reg(mondo, cpu_id);
*ino_p->ino_map_reg;
}
ino_done:
hdlp->ih_target = ino_p->ino_cpuid;
ih_p->ih_ipil_p = ipil_p;
ih_p->ih_ksp = kstat_create("pci_intrs",
atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts",
KSTAT_TYPE_NAMED,
sizeof (pciintr_ks_template) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (ih_p->ih_ksp != NULL) {
ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2;
ih_p->ih_ksp->ks_lock = &pciintr_ks_template_lock;
ih_p->ih_ksp->ks_data = &pciintr_ks_template;
ih_p->ih_ksp->ks_private = ih_p;
ih_p->ih_ksp->ks_update = pci_ks_update;
kstat_install(ih_p->ih_ksp);
}
ib_ino_map_reg_share(ib_p, ino, ino_p);
mutex_exit(&ib_p->ib_ino_lst_mutex);
done:
DEBUG2(DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
hdlp->ih_vector, hdlp->ih_pri);
return (DDI_SUCCESS);
fail4:
ib_delete_ino_pil(ib_p, ipil_p);
fail3:
if (ih_p->ih_config_handle)
pci_config_teardown(&ih_p->ih_config_handle);
fail2:
mutex_exit(&ib_p->ib_ino_lst_mutex);
kmem_free(ih_p, sizeof (ih_t));
fail1:
DEBUG2(DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
hdlp->ih_vector, hdlp->ih_pri);
return (DDI_FAILURE);
}
int
pci_remove_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
{
pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
ib_t *ib_p = pci_p->pci_ib_p;
cb_t *cb_p = pci_p->pci_cb_p;
ib_ino_t ino;
ib_mondo_t mondo;
ib_ino_info_t *ino_p;
ib_ino_pil_t *ipil_p;
ih_t *ih_p;
ino = IB_MONDO_TO_INO(hdlp->ih_vector);
DEBUG3(DBG_R_INTX, dip, "pci_rem_intr: rdip=%s%d ino=%x\n",
ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
if (hdlp->ih_vector & PCI_PULSE_INO) {
volatile uint64_t *map_reg_addr;
map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
IB_INO_INTR_RESET(map_reg_addr);
*map_reg_addr;
mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
if (mondo == 0) {
DEBUG1(DBG_R_INTX, dip,
"can't get mondo for ino %x\n", ino);
return (DDI_FAILURE);
}
if (hdlp->ih_pri == 0)
hdlp->ih_pri = pci_class_to_pil(rdip);
hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
DEBUG2(DBG_R_INTX, dip, "pci_rem_intr: pil=0x%x mondo=0x%x\n",
hdlp->ih_pri, hdlp->ih_vector);
i_ddi_rem_ivintr(hdlp);
DEBUG2(DBG_R_INTX, dip, "pulse success mondo=%x reg=%p\n",
mondo, map_reg_addr);
return (DDI_SUCCESS);
}
mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino);
if (mondo == 0) {
DEBUG1(DBG_R_INTX, dip, "can't get mondo for ino %x\n", ino);
return (DDI_FAILURE);
}
ino = IB_MONDO_TO_INO(mondo);
mutex_enter(&ib_p->ib_ino_lst_mutex);
ino_p = ib_locate_ino(ib_p, ino);
if (!ino_p) {
int r = cb_remove_xintr(pci_p, dip, rdip, ino, mondo);
if (r != DDI_SUCCESS)
cmn_err(CE_WARN, "%s%d-xintr: ino %x is invalid",
ddi_driver_name(dip), ddi_get_instance(dip), ino);
mutex_exit(&ib_p->ib_ino_lst_mutex);
return (r);
}
ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri);
ih_p = ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum);
ib_ino_rem_intr(pci_p, ipil_p, ih_p);
intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip);
if (ipil_p->ipil_ih_size == 0) {
IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino));
hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
i_ddi_rem_ivintr(hdlp);
ib_delete_ino_pil(ib_p, ipil_p);
}
if (ib_ino_map_reg_unshare(ib_p, ino, ino_p) || ino_p->ino_ipil_size) {
IB_INO_INTR_ON(ino_p->ino_map_reg);
*ino_p->ino_map_reg;
}
mutex_exit(&ib_p->ib_ino_lst_mutex);
if (ino_p->ino_ipil_size == 0)
kmem_free(ino_p, sizeof (ib_ino_info_t));
DEBUG1(DBG_R_INTX, dip, "success! mondo=%x\n", mondo);
return (DDI_SUCCESS);
}
void
pci_intr_teardown(pci_t *pci_p)
{
kmem_free(pci_p->pci_inos, pci_p->pci_inos_len);
pci_p->pci_inos = NULL;
pci_p->pci_inos_len = 0;
}