#include <sys/usb/hcd/xhci/xhci.h>
void
xhci_event_fini(xhci_t *xhcip)
{
xhci_event_ring_t *xev = &xhcip->xhci_event;
xhci_ring_free(&xev->xev_ring);
if (xev->xev_segs != NULL)
xhci_dma_free(&xev->xev_dma);
xev->xev_segs = NULL;
}
static int
xhci_event_alloc(xhci_t *xhcip, xhci_event_ring_t *xev)
{
int ret;
ddi_dma_attr_t attr;
ddi_device_acc_attr_t acc;
xhci_dma_acc_attr(xhcip, &acc);
xhci_dma_dma_attr(xhcip, &attr);
if (!xhci_dma_alloc(xhcip, &xev->xev_dma, &attr, &acc, B_FALSE,
sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS, B_FALSE))
return (ENOMEM);
if ((ret = xhci_ring_alloc(xhcip, &xev->xev_ring)) != 0) {
xhci_dma_free(&xev->xev_dma);
return (ret);
}
xev->xev_segs = (void *)xev->xev_dma.xdb_va;
return (0);
}
int
xhci_event_init(xhci_t *xhcip)
{
int ret;
uint32_t reg;
xhci_event_ring_t *xev = &xhcip->xhci_event;
if (xev->xev_segs == NULL) {
if ((ret = xhci_event_alloc(xhcip, xev)) != 0)
return (ret);
}
if ((ret = xhci_ring_reset(xhcip, &xev->xev_ring)) != 0) {
xhci_event_fini(xhcip);
return (ret);
}
bzero(xev->xev_segs, sizeof (xhci_event_segment_t) * XHCI_EVENT_NSEGS);
xev->xev_segs[0].xes_addr = LE_64(xhci_dma_pa(&xev->xev_ring.xr_dma));
xev->xev_segs[0].xes_size = LE_16(xev->xev_ring.xr_ntrb);
reg = xhci_get32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0));
reg &= ~XHCI_ERSTS_MASK;
reg |= XHCI_ERSTS_SET(XHCI_EVENT_NSEGS);
xhci_put32(xhcip, XHCI_R_RUN, XHCI_ERSTSZ(0), reg);
xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0),
xhci_dma_pa(&xev->xev_ring.xr_dma));
xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERSTBA(0),
xhci_dma_pa(&xev->xev_dma));
if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
xhci_event_fini(xhcip);
ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
return (EIO);
}
return (0);
}
static boolean_t
xhci_event_process_psc(xhci_t *xhcip, xhci_trb_t *trb)
{
uint32_t port;
if (XHCI_TRB_GET_CODE(LE_32(trb->trb_status)) != XHCI_CODE_SUCCESS) {
return (B_TRUE);
}
port = XHCI_TRB_PORTID(LE_64(trb->trb_addr));
if (port < 1 || port > xhcip->xhci_caps.xcap_max_ports) {
return (B_FALSE);
}
xhci_root_hub_psc_callback(xhcip);
return (B_TRUE);
}
boolean_t
xhci_event_process_trb(xhci_t *xhcip, xhci_trb_t *trb)
{
uint32_t type;
type = LE_32(trb->trb_flags) & XHCI_TRB_TYPE_MASK;
switch (type) {
case XHCI_EVT_PORT_CHANGE:
if (!xhci_event_process_psc(xhcip, trb))
return (B_FALSE);
break;
case XHCI_EVT_CMD_COMPLETE:
if (!xhci_command_event_callback(xhcip, trb))
return (B_FALSE);
break;
case XHCI_EVT_DOORBELL:
xhci_error(xhcip, "received xHCI VF interrupt even "
"though virtual functions are not supported, "
"resetting device");
xhci_fm_runtime_reset(xhcip);
return (B_FALSE);
case XHCI_EVT_XFER:
if (!xhci_endpoint_transfer_callback(xhcip, trb))
return (B_FALSE);
break;
default:
break;
}
return (B_TRUE);
}
boolean_t
xhci_event_process(xhci_t *xhcip)
{
int nevents;
uint64_t addr;
xhci_ring_t *xrp = &xhcip->xhci_event.xev_ring;
mutex_enter(&xhcip->xhci_lock);
if (xhcip->xhci_state & XHCI_S_ERROR) {
mutex_exit(&xhcip->xhci_lock);
return (B_FALSE);
}
mutex_exit(&xhcip->xhci_lock);
if (xhcip->xhci_event.xev_segs == NULL)
return (B_TRUE);
XHCI_DMA_SYNC(xrp->xr_dma, DDI_DMA_SYNC_FORKERNEL);
if (xhci_check_dma_handle(xhcip, &xrp->xr_dma) != DDI_FM_OK) {
xhci_error(xhcip, "encountered fatal FM error trying to "
"synchronize event ring: resetting device");
xhci_fm_runtime_reset(xhcip);
return (B_FALSE);
}
for (nevents = 0; nevents < xrp->xr_ntrb; nevents++) {
xhci_trb_t *trb;
if ((trb = xhci_ring_event_advance(xrp)) == NULL)
break;
if (!xhci_event_process_trb(xhcip, trb))
return (B_FALSE);
}
addr = xhci_dma_pa(&xrp->xr_dma) + sizeof (xhci_trb_t) * xrp->xr_tail;
addr |= XHCI_ERDP_BUSY;
xhci_put64(xhcip, XHCI_R_RUN, XHCI_ERDP(0), addr);
if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
xhci_error(xhcip, "failed to write to event ring dequeue "
"pointer: encountered fatal FM error, resetting device");
xhci_fm_runtime_reset(xhcip);
return (B_FALSE);
}
return (B_TRUE);
}