#include <sys/conf.h>
#include <sys/kmem.h>
#include <sys/debug.h>
#include <sys/modctl.h>
#include <sys/autoconf.h>
#include <sys/ddi_impldefs.h>
#include <sys/ddi.h>
#include <sys/inttypes.h>
#include <sys/sunddi.h>
#include <sys/sunndi.h>
#include <sys/avintr.h>
#include <sys/spl.h>
#include <sys/promif.h>
#include <sys/list.h>
#include <sys/bootconf.h>
#include <sys/bootsvcs.h>
#include <sys/sysmacros.h>
#include <sys/pci.h>
#include <sys/pci_cap.h>
#include <sys/stdbit.h>
#include "virtio.h"
#include "virtio_impl.h"
#include "virtio_endian.h"
static struct modlmisc virtio_modlmisc = {
.misc_modops = &mod_miscops,
.misc_linkinfo = "VIRTIO common routines",
};
static struct modlinkage virtio_modlinkage = {
.ml_rev = MODREV_1,
.ml_linkage = { &virtio_modlmisc, NULL }
};
int
_init(void)
{
return (mod_install(&virtio_modlinkage));
}
int
_fini(void)
{
return (mod_remove(&virtio_modlinkage));
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&virtio_modlinkage, modinfop));
}
static void virtio_unmap_cap(virtio_t *, virtio_pci_cap_t *);
static boolean_t virtio_map_cap(virtio_t *, virtio_pci_cap_t *);
static void virtio_discover_pci_caps(virtio_t *, ddi_acc_handle_t);
static void virtio_set_status(virtio_t *, uint8_t);
static int virtio_chain_append_impl(virtio_chain_t *, uint64_t, size_t,
uint16_t);
static int virtio_interrupts_setup(virtio_t *, int);
static void virtio_interrupts_teardown(virtio_t *);
static void virtio_interrupts_disable_locked(virtio_t *);
static void virtio_queue_free(virtio_queue_t *);
static int virtio_bar_to_rnumber(virtio_t *, uint8_t);
int virtio_force_legacy = 0;
ddi_device_acc_attr_t virtio_acc_attr = {
.devacc_attr_version = DDI_DEVICE_ATTR_V1,
.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC,
.devacc_attr_dataorder = DDI_STORECACHING_OK_ACC,
.devacc_attr_access = DDI_DEFAULT_ACC
};
ddi_dma_attr_t virtio_dma_attr_queue = {
.dma_attr_version = DMA_ATTR_V0,
.dma_attr_addr_lo = 0x0000000000000000,
.dma_attr_addr_hi = 0x00000FFFFFFFF000,
.dma_attr_count_max = 0x00000000FFFFFFFF,
.dma_attr_align = VIRTIO_PAGE_SIZE,
.dma_attr_burstsizes = 1,
.dma_attr_minxfer = 1,
.dma_attr_maxxfer = 0x00000000FFFFFFFF,
.dma_attr_seg = 0x00000000FFFFFFFF,
.dma_attr_sgllen = 1,
.dma_attr_granular = 1,
.dma_attr_flags = 0
};
ddi_dma_attr_t virtio_dma_attr_indirect = {
.dma_attr_version = DMA_ATTR_V0,
.dma_attr_addr_lo = 0x0000000000000000,
.dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFF,
.dma_attr_count_max = 0x00000000FFFFFFFF,
.dma_attr_align = sizeof (struct virtio_vq_desc),
.dma_attr_burstsizes = 1,
.dma_attr_minxfer = 1,
.dma_attr_maxxfer = 0x00000000FFFFFFFF,
.dma_attr_seg = 0x00000000FFFFFFFF,
.dma_attr_sgllen = 1,
.dma_attr_granular = 1,
.dma_attr_flags = 0
};
void
virtio_fini(virtio_t *vio, boolean_t failed)
{
mutex_enter(&vio->vio_mutex);
virtio_interrupts_teardown(vio);
virtio_queue_t *viq;
while ((viq = list_remove_head(&vio->vio_queues)) != NULL) {
virtio_queue_free(viq);
}
list_destroy(&vio->vio_queues);
mutex_destroy(&vio->vio_qlock);
if (failed) {
vio->vio_ops->vop_set_status_locked(vio, VIRTIO_STATUS_FAILED);
} else {
vio->vio_ops->vop_device_reset_locked(vio);
}
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_PROVIDER;
if (vio->vio_initlevel & VIRTIO_INITLEVEL_REGS) {
if (vio->vio_bar != NULL)
ddi_regs_map_free(&vio->vio_barh);
virtio_unmap_cap(vio, &vio->vio_cap_common);
virtio_unmap_cap(vio, &vio->vio_cap_notify);
virtio_unmap_cap(vio, &vio->vio_cap_isr);
virtio_unmap_cap(vio, &vio->vio_cap_device);
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_REGS;
}
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_SHUTDOWN;
VERIFY0(vio->vio_initlevel);
mutex_exit(&vio->vio_mutex);
mutex_destroy(&vio->vio_mutex);
kmem_free(vio, sizeof (*vio));
}
virtio_t *
virtio_init(dev_info_t *dip)
{
ddi_acc_handle_t pci;
if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "pci_config_setup failed");
return (NULL);
}
uint16_t devid;
if ((devid = pci_config_get16(pci, PCI_CONF_DEVID)) == PCI_EINVAL16) {
dev_err(dip, CE_WARN, "could not read config space devid");
pci_config_teardown(&pci);
return (NULL);
}
uint8_t revid;
if ((revid = pci_config_get8(pci, PCI_CONF_REVID)) == PCI_EINVAL8) {
dev_err(dip, CE_WARN, "could not read config space revid");
pci_config_teardown(&pci);
return (NULL);
}
virtio_t *vio = kmem_zalloc(sizeof (*vio), KM_SLEEP);
vio->vio_dip = dip;
virtio_discover_pci_caps(vio, pci);
pci_config_teardown(&pci);
boolean_t found_modern_caps =
(vio->vio_cap_common.vpc_type != 0 &&
vio->vio_cap_notify.vpc_type != 0 &&
vio->vio_cap_isr.vpc_type != 0 &&
vio->vio_cap_device.vpc_type != 0);
if (devid >= VIRTIO_MIN_MODERN_DEVID) {
if (!found_modern_caps) {
dev_err(dip, CE_WARN,
"Did not find required PCI capabilities for a "
" modern VirtIO device");
kmem_free(vio, sizeof (*vio));
return (NULL);
}
vio->vio_mode = VIRTIO_MODE_MODERN;
vio->vio_ops = &virtio_modern_ops;
} else {
if (revid != 0) {
dev_err(dip, CE_WARN, "PCI Revision %u incorrect for "
"transitional or legacy virtio device",
(uint_t)revid);
kmem_free(vio, sizeof (*vio));
return (NULL);
}
if (found_modern_caps && virtio_force_legacy == 0) {
vio->vio_mode = VIRTIO_MODE_TRANSITIONAL;
vio->vio_ops = &virtio_modern_ops;
} else {
vio->vio_mode = VIRTIO_MODE_LEGACY;
vio->vio_ops = &virtio_legacy_ops;
}
}
if (vio->vio_mode == VIRTIO_MODE_LEGACY) {
int rnumber = virtio_bar_to_rnumber(vio, VIRTIO_LEGACY_BAR);
if (rnumber == -1 || ddi_regs_map_setup(dip, rnumber,
(caddr_t *)&vio->vio_bar, 0, 0, &virtio_acc_attr,
&vio->vio_barh) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "Failed to map BAR0");
kmem_free(vio, sizeof (*vio));
return (NULL);
}
} else {
if (!virtio_map_cap(vio, &vio->vio_cap_common) ||
!virtio_map_cap(vio, &vio->vio_cap_notify) ||
!virtio_map_cap(vio, &vio->vio_cap_isr) ||
!virtio_map_cap(vio, &vio->vio_cap_device)) {
kmem_free(vio, sizeof (*vio));
return (NULL);
}
}
vio->vio_initlevel |= VIRTIO_INITLEVEL_REGS;
mutex_init(&vio->vio_mutex, NULL, MUTEX_DRIVER, NULL);
list_create(&vio->vio_queues, sizeof (virtio_queue_t),
offsetof(virtio_queue_t, viq_link));
mutex_init(&vio->vio_qlock, NULL, MUTEX_DRIVER, NULL);
vio->vio_qcur = UINT16_MAX;
virtio_device_reset(vio);
virtio_set_status(vio, VIRTIO_STATUS_ACKNOWLEDGE);
virtio_set_status(vio, VIRTIO_STATUS_DRIVER);
vio->vio_features_device = vio->vio_ops->vop_device_get_features(vio);
vio->vio_features = vio->vio_features_device;
return (vio);
}
boolean_t
virtio_init_features(virtio_t *vio, uint64_t driver_features,
boolean_t allow_indirect)
{
if (!virtio_modern(vio) && driver_features >> 32 != 0) {
dev_err(vio->vio_dip, CE_WARN,
"driver programming error; high bits set in features");
return (B_FALSE);
}
if (allow_indirect)
driver_features |= VIRTIO_F_RING_INDIRECT_DESC;
if (virtio_modern(vio))
driver_features |= VIRTIO_F_VERSION_1;
vio->vio_features &= driver_features;
if (!vio->vio_ops->vop_device_set_features(vio, vio->vio_features)) {
dev_err(vio->vio_dip, CE_WARN, "feature negotiation failed");
return (B_FALSE);
}
if (!virtio_modern(vio))
vio->vio_legacy_cfg_offset = VIRTIO_LEGACY_CFG_OFFSET;
return (B_TRUE);
}
void
virtio_register_cfgchange_handler(virtio_t *vio, ddi_intr_handler_t *func,
void *funcarg)
{
VERIFY(!(vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ADDED));
VERIFY(!vio->vio_cfgchange_handler_added);
mutex_enter(&vio->vio_mutex);
vio->vio_cfgchange_handler = func;
vio->vio_cfgchange_handlerarg = funcarg;
mutex_exit(&vio->vio_mutex);
}
int
virtio_init_complete(virtio_t *vio, int allowed_interrupt_types)
{
VERIFY(!(vio->vio_initlevel & VIRTIO_INITLEVEL_PROVIDER));
vio->vio_initlevel |= VIRTIO_INITLEVEL_PROVIDER;
if (!list_is_empty(&vio->vio_queues) ||
vio->vio_cfgchange_handler != NULL) {
if (virtio_interrupts_setup(vio, allowed_interrupt_types) !=
DDI_SUCCESS) {
return (DDI_FAILURE);
}
}
mutex_destroy(&vio->vio_mutex);
mutex_init(&vio->vio_mutex, NULL, MUTEX_DRIVER, virtio_intr_pri(vio));
for (virtio_queue_t *viq = list_head(&vio->vio_queues); viq != NULL;
viq = list_next(&vio->vio_queues, viq)) {
mutex_destroy(&viq->viq_mutex);
mutex_init(&viq->viq_mutex, NULL, MUTEX_DRIVER,
virtio_intr_pri(vio));
}
for (virtio_queue_t *viq = list_head(&vio->vio_queues); viq != NULL;
viq = list_next(&vio->vio_queues, viq)) {
vio->vio_ops->vop_queue_enable_set(vio, viq->viq_index, true);
}
virtio_set_status(vio, VIRTIO_STATUS_DRIVER_OK);
return (DDI_SUCCESS);
}
boolean_t
virtio_features_present(virtio_t *vio, uint64_t feature_mask)
{
return ((vio->vio_features & feature_mask) == feature_mask);
}
uint32_t
virtio_features(virtio_t *vio)
{
return (vio->vio_features);
}
boolean_t
virtio_modern(virtio_t *vio)
{
return (vio->vio_mode != VIRTIO_MODE_LEGACY);
}
void
virtio_acquireq(virtio_t *vio, uint16_t qidx)
{
mutex_enter(&vio->vio_qlock);
if (vio->vio_qcur != qidx) {
vio->vio_ops->vop_queue_select(vio, qidx);
vio->vio_qcur = qidx;
}
}
void
virtio_releaseq(virtio_t *vio)
{
mutex_exit(&vio->vio_qlock);
}
void *
virtio_intr_pri(virtio_t *vio)
{
VERIFY(vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ADDED);
return (DDI_INTR_PRI(vio->vio_interrupt_priority));
}
static void
virtio_unmap_cap(virtio_t *vio, virtio_pci_cap_t *cap)
{
if (cap->vpc_type != 0 && cap->vpc_bar != NULL)
ddi_regs_map_free(&cap->vpc_barh);
}
static boolean_t
virtio_map_cap(virtio_t *vio, virtio_pci_cap_t *cap)
{
static uint8_t baridx = UINT8_MAX;
static int rnumber = -1;
VERIFY(cap->vpc_type);
if (baridx != cap->vpc_baridx) {
baridx = cap->vpc_baridx;
rnumber = virtio_bar_to_rnumber(vio, baridx);
}
if (rnumber == -1 || ddi_regs_map_setup(vio->vio_dip, rnumber,
(caddr_t *)&cap->vpc_bar, cap->vpc_offset, cap->vpc_size,
&virtio_acc_attr, &cap->vpc_barh) != DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN,
"Failed to map CAP %u @ "
"BAR%u 0x%" PRIx64 "+%" PRIx64,
cap->vpc_type, cap->vpc_baridx,
cap->vpc_offset, cap->vpc_size);
return (B_FALSE);
}
return (B_TRUE);
}
static void
virtio_discover_pci_caps(virtio_t *vio, ddi_acc_handle_t pci)
{
uint16_t idx;
for (idx = 0; ; idx++) {
virtio_pci_cap_t *cap;
uint16_t base;
uint32_t id;
if (pci_cap_probe(pci, idx, &id, &base) != DDI_SUCCESS)
break;
if (id != PCI_CAP_ID_VS)
continue;
uint8_t type = pci_cap_get(pci, PCI_CAP_CFGSZ_8, idx, base,
VIRTIO_PCI_CAP_TYPE);
uint8_t min_len = VIRTIO_PCI_CAP_BARLEN + sizeof (uint32_t);
switch (type) {
case VPC_COMMON_CFG:
cap = &vio->vio_cap_common;
break;
case VPC_NOTIFY_CFG:
cap = &vio->vio_cap_notify;
min_len += sizeof (uint32_t);
break;
case VPC_ISR_CFG:
cap = &vio->vio_cap_isr;
break;
case VPC_DEVICE_CFG:
cap = &vio->vio_cap_device;
break;
default:
continue;
}
uint8_t caplen = pci_cap_get(pci, PCI_CAP_CFGSZ_8, idx, base,
VIRTIO_PCI_CAP_LEN);
if (caplen == PCI_EINVAL8 || caplen < min_len)
continue;
if (cap->vpc_type != 0)
continue;
cap->vpc_baridx = pci_cap_get(pci, PCI_CAP_CFGSZ_8, idx, base,
VIRTIO_PCI_CAP_BAR);
if (cap->vpc_type == PCI_EINVAL8)
continue;
cap->vpc_offset = pci_cap_get(pci, PCI_CAP_CFGSZ_32, idx, base,
VIRTIO_PCI_CAP_BAROFF);
if (cap->vpc_offset == PCI_EINVAL32)
continue;
cap->vpc_size = pci_cap_get(pci, PCI_CAP_CFGSZ_32, idx, base,
VIRTIO_PCI_CAP_BARLEN);
if (cap->vpc_size == PCI_EINVAL32)
continue;
if (type == VPC_NOTIFY_CFG) {
vio->vio_multiplier = pci_cap_get(pci, PCI_CAP_CFGSZ_32,
idx, base, VIRTIO_PCI_CAP_MULTIPLIER);
if (vio->vio_multiplier == PCI_EINVAL32)
continue;
}
cap->vpc_type = type;
}
}
static void
virtio_set_status(virtio_t *vio, uint8_t status)
{
mutex_enter(&vio->vio_mutex);
vio->vio_ops->vop_set_status_locked(vio, status);
mutex_exit(&vio->vio_mutex);
}
void
virtio_device_reset(virtio_t *vio)
{
mutex_enter(&vio->vio_mutex);
vio->vio_ops->vop_device_reset_locked(vio);
mutex_exit(&vio->vio_mutex);
}
void
virtio_shutdown(virtio_t *vio)
{
mutex_enter(&vio->vio_mutex);
if (vio->vio_initlevel & VIRTIO_INITLEVEL_SHUTDOWN) {
mutex_exit(&vio->vio_mutex);
return;
}
for (virtio_queue_t *viq = list_head(&vio->vio_queues); viq != NULL;
viq = list_next(&vio->vio_queues, viq)) {
mutex_enter(&viq->viq_mutex);
viq->viq_shutdown = B_TRUE;
mutex_exit(&viq->viq_mutex);
}
vio->vio_ops->vop_device_reset_locked(vio);
vio->vio_initlevel |= VIRTIO_INITLEVEL_SHUTDOWN;
mutex_exit(&vio->vio_mutex);
}
int
virtio_quiesce(virtio_t *vio)
{
if (vio->vio_initlevel & VIRTIO_INITLEVEL_SHUTDOWN) {
return (DDI_SUCCESS);
}
vio->vio_ops->vop_device_reset_locked(vio);
return (DDI_SUCCESS);
}
uint8_t
virtio_dev_getgen(virtio_t *vio)
{
return (vio->vio_ops->vop_device_cfg_gen(vio));
}
uint8_t
virtio_dev_get8(virtio_t *vio, uintptr_t offset)
{
return (vio->vio_ops->vop_device_cfg_get8(vio, offset));
}
uint16_t
virtio_dev_get16(virtio_t *vio, uintptr_t offset)
{
return (vio->vio_ops->vop_device_cfg_get16(vio, offset));
}
uint32_t
virtio_dev_get32(virtio_t *vio, uintptr_t offset)
{
return (vio->vio_ops->vop_device_cfg_get32(vio, offset));
}
uint64_t
virtio_dev_get64(virtio_t *vio, uintptr_t offset)
{
return (vio->vio_ops->vop_device_cfg_get64(vio, offset));
}
void
virtio_dev_put8(virtio_t *vio, uintptr_t offset, uint8_t value)
{
vio->vio_ops->vop_device_cfg_put8(vio, offset, value);
}
void
virtio_dev_put16(virtio_t *vio, uintptr_t offset, uint16_t value)
{
vio->vio_ops->vop_device_cfg_put16(vio, offset, value);
}
void
virtio_dev_put32(virtio_t *vio, uintptr_t offset, uint32_t value)
{
vio->vio_ops->vop_device_cfg_put32(vio, offset, value);
}
static int
virtio_inflight_compar(const void *lp, const void *rp)
{
const virtio_chain_t *l = lp;
const virtio_chain_t *r = rp;
if (l->vic_head < r->vic_head) {
return (-1);
} else if (l->vic_head > r->vic_head) {
return (1);
} else {
return (0);
}
}
virtio_queue_t *
virtio_queue_alloc(virtio_t *vio, uint16_t qidx, const char *name,
ddi_intr_handler_t *func, void *funcarg, boolean_t force_direct,
uint_t max_segs)
{
char space_name[256];
uint64_t noff = 0;
uint16_t qsz;
if (max_segs < 1) {
dev_err(vio->vio_dip, CE_WARN, "queue \"%s\" (%u) "
"segment count must be at least 1", name, (uint_t)qidx);
return (NULL);
}
mutex_enter(&vio->vio_mutex);
if (vio->vio_initlevel & VIRTIO_INITLEVEL_PROVIDER) {
dev_err(vio->vio_dip, CE_WARN, "queue \"%s\" (%u) "
"alloc after init complete", name, (uint_t)qidx);
mutex_exit(&vio->vio_mutex);
return (NULL);
}
qsz = vio->vio_ops->vop_queue_size_get(vio, qidx);
if (qsz == 0) {
dev_err(vio->vio_dip, CE_WARN, "queue \"%s\" (%u) "
"does not exist on device", name, (uint_t)qidx);
mutex_exit(&vio->vio_mutex);
return (NULL);
}
if (vio->vio_ops->vop_queue_size_set != NULL)
vio->vio_ops->vop_queue_size_set(vio, qidx, qsz);
if (virtio_modern(vio)) {
noff = vio->vio_ops->vop_queue_noff_get(vio, qidx);
if (noff > vio->vio_cap_notify.vpc_size - sizeof (uint32_t)) {
dev_err(vio->vio_dip, CE_WARN, "queue \"%s\" (%u) "
"invalid notification offset 0x%" PRIx64 " "
"for notify region of size 0x%" PRIx64,
name, (uint_t)qidx,
noff, vio->vio_cap_notify.vpc_size);
return (NULL);
}
}
mutex_exit(&vio->vio_mutex);
virtio_queue_t *viq = kmem_zalloc(sizeof (*viq), KM_SLEEP);
viq->viq_virtio = vio;
viq->viq_name = name;
viq->viq_index = qidx;
viq->viq_size = qsz;
viq->viq_noff = noff;
viq->viq_func = func;
viq->viq_funcarg = funcarg;
viq->viq_max_segs = max_segs;
avl_create(&viq->viq_inflight, virtio_inflight_compar,
sizeof (virtio_chain_t), offsetof(virtio_chain_t, vic_node));
mutex_init(&viq->viq_mutex, NULL, MUTEX_DRIVER, NULL);
if (virtio_features_present(vio, VIRTIO_F_RING_INDIRECT_DESC) &&
!force_direct) {
viq->viq_indirect = B_TRUE;
}
(void) snprintf(space_name, sizeof (space_name), "%s%d_vq_%s",
ddi_get_name(vio->vio_dip), ddi_get_instance(vio->vio_dip), name);
if ((viq->viq_descmap = id_space_create(space_name, 0, qsz)) == NULL) {
dev_err(vio->vio_dip, CE_WARN, "could not allocate descriptor "
"ID space");
virtio_queue_free(viq);
return (NULL);
}
const uint_t align = virtio_modern(vio) ? MODERN_VQ_ALIGN :
VIRTIO_PAGE_SIZE;
const size_t sz_descs = sizeof (virtio_vq_desc_t) * qsz;
const size_t sz_driver = P2ROUNDUP_TYPED(sz_descs +
sizeof (virtio_vq_driver_t) +
sizeof (uint16_t) * qsz,
align, size_t);
const size_t sz_device = P2ROUNDUP_TYPED(sizeof (virtio_vq_device_t) +
sizeof (virtio_vq_elem_t) * qsz,
align, size_t);
if (virtio_dma_init(vio, &viq->viq_dma, sz_driver + sz_device,
&virtio_dma_attr_queue, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
KM_SLEEP) != DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN, "could not allocate queue "
"DMA memory");
virtio_queue_free(viq);
return (NULL);
}
viq->viq_dma_descs = virtio_dma_va(&viq->viq_dma, 0);
viq->viq_dma_driver = virtio_dma_va(&viq->viq_dma, sz_descs);
viq->viq_dma_device = virtio_dma_va(&viq->viq_dma, sz_driver);
mutex_enter(&vio->vio_mutex);
for (virtio_queue_t *chkvq = list_head(&vio->vio_queues); chkvq != NULL;
chkvq = list_next(&vio->vio_queues, chkvq)) {
if (chkvq->viq_index == qidx) {
dev_err(vio->vio_dip, CE_WARN, "attempt to register "
"queue \"%s\" with same index (%d) as queue \"%s\"",
name, qidx, chkvq->viq_name);
mutex_exit(&vio->vio_mutex);
virtio_queue_free(viq);
return (NULL);
}
}
list_insert_tail(&vio->vio_queues, viq);
membar_producer();
VIRTQ_DMA_SYNC_FORDEV(viq);
const uint64_t pa = virtio_dma_cookie_pa(&viq->viq_dma, 0);
vio->vio_ops->vop_queue_addr_set(vio, qidx,
pa, pa + sz_descs, pa + sz_driver);
mutex_exit(&vio->vio_mutex);
return (viq);
}
static void
virtio_queue_free(virtio_queue_t *viq)
{
virtio_t *vio = viq->viq_virtio;
VERIFY(!(vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ADDED));
mutex_enter(&viq->viq_mutex);
if (!viq->viq_shutdown) {
vio->vio_ops->vop_queue_enable_set(vio, viq->viq_index, false);
vio->vio_ops->vop_queue_addr_set(vio, viq->viq_index, 0, 0, 0);
}
virtio_dma_fini(&viq->viq_dma);
VERIFY(avl_is_empty(&viq->viq_inflight));
avl_destroy(&viq->viq_inflight);
if (viq->viq_descmap != NULL) {
id_space_destroy(viq->viq_descmap);
}
mutex_exit(&viq->viq_mutex);
mutex_destroy(&viq->viq_mutex);
kmem_free(viq, sizeof (*viq));
}
void
virtio_queue_no_interrupt(virtio_queue_t *viq, boolean_t stop_interrupts)
{
mutex_enter(&viq->viq_mutex);
if (stop_interrupts) {
viq->viq_dma_driver->vqdr_flags |=
viq_gtoh16(viq, VIRTQ_AVAIL_F_NO_INTERRUPT);
} else {
viq->viq_dma_driver->vqdr_flags &=
viq_gtoh16(viq, ~VIRTQ_AVAIL_F_NO_INTERRUPT);
}
VIRTQ_DMA_SYNC_FORDEV(viq);
mutex_exit(&viq->viq_mutex);
}
static virtio_chain_t *
virtio_queue_complete(virtio_queue_t *viq, uint_t index)
{
VERIFY(MUTEX_HELD(&viq->viq_mutex));
virtio_chain_t *vic;
virtio_chain_t search;
bzero(&search, sizeof (search));
search.vic_head = index;
if ((vic = avl_find(&viq->viq_inflight, &search, NULL)) == NULL) {
return (NULL);
}
avl_remove(&viq->viq_inflight, vic);
return (vic);
}
uint_t
virtio_queue_size(virtio_queue_t *viq)
{
return (viq->viq_size);
}
uint_t
virtio_queue_nactive(virtio_queue_t *viq)
{
mutex_enter(&viq->viq_mutex);
uint_t r = avl_numnodes(&viq->viq_inflight);
mutex_exit(&viq->viq_mutex);
return (r);
}
virtio_chain_t *
virtio_queue_poll(virtio_queue_t *viq)
{
mutex_enter(&viq->viq_mutex);
if (viq->viq_shutdown) {
mutex_exit(&viq->viq_mutex);
return (NULL);
}
VIRTQ_DMA_SYNC_FORKERNEL(viq);
uint16_t dindex = viq_htog16(viq, viq->viq_dma_device->vqde_index);
if (viq->viq_device_index == dindex) {
mutex_exit(&viq->viq_mutex);
return (NULL);
}
membar_consumer();
uint16_t index = (viq->viq_device_index++) % viq->viq_size;
uint16_t start = viq_htog16(viq,
viq->viq_dma_device->vqde_ring[index].vqe_start);
uint32_t len = viq_htog32(viq,
viq->viq_dma_device->vqde_ring[index].vqe_len);
virtio_chain_t *vic;
if ((vic = virtio_queue_complete(viq, start)) == NULL) {
dev_err(viq->viq_virtio->vio_dip, CE_PANIC,
"queue \"%s\" ring entry %u (descriptor %u) has no chain",
viq->viq_name, (uint16_t)index, (uint16_t)start);
}
vic->vic_received_length = len;
mutex_exit(&viq->viq_mutex);
return (vic);
}
virtio_chain_t *
virtio_queue_evacuate(virtio_queue_t *viq)
{
virtio_t *vio = viq->viq_virtio;
mutex_enter(&vio->vio_mutex);
if (!(vio->vio_initlevel & VIRTIO_INITLEVEL_SHUTDOWN)) {
dev_err(vio->vio_dip, CE_PANIC,
"virtio_queue_evacuate() without virtio_shutdown()");
}
mutex_exit(&vio->vio_mutex);
mutex_enter(&viq->viq_mutex);
VERIFY(viq->viq_shutdown);
virtio_chain_t *vic = avl_first(&viq->viq_inflight);
if (vic != NULL) {
avl_remove(&viq->viq_inflight, vic);
}
mutex_exit(&viq->viq_mutex);
return (vic);
}
size_t
virtio_chain_received_length(virtio_chain_t *vic)
{
return (vic->vic_received_length);
}
virtio_chain_t *
virtio_chain_alloc(virtio_queue_t *viq, int kmflags)
{
virtio_t *vio = viq->viq_virtio;
virtio_chain_t *vic;
uint_t cap;
if (viq->viq_indirect) {
cap = 1;
} else {
cap = viq->viq_max_segs;
}
size_t vicsz = sizeof (*vic) + sizeof (uint16_t) * cap;
if ((vic = kmem_zalloc(vicsz, kmflags)) == NULL) {
return (NULL);
}
vic->vic_vq = viq;
vic->vic_direct_capacity = cap;
if (viq->viq_indirect) {
if (virtio_dma_init(vio, &vic->vic_indirect_dma,
sizeof (virtio_vq_desc_t) * viq->viq_max_segs,
&virtio_dma_attr_indirect,
DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
kmflags) != DDI_SUCCESS) {
goto fail;
}
mutex_enter(&viq->viq_mutex);
if (virtio_chain_append_impl(vic,
virtio_dma_cookie_pa(&vic->vic_indirect_dma, 0), 0,
VIRTQ_DESC_F_INDIRECT) != DDI_SUCCESS) {
mutex_exit(&viq->viq_mutex);
goto fail;
}
mutex_exit(&viq->viq_mutex);
VERIFY3U(vic->vic_direct_used, ==, 1);
vic->vic_indirect_capacity = viq->viq_max_segs;
}
return (vic);
fail:
virtio_dma_fini(&vic->vic_indirect_dma);
kmem_free(vic, vicsz);
return (NULL);
}
void *
virtio_chain_data(virtio_chain_t *vic)
{
return (vic->vic_data);
}
void
virtio_chain_data_set(virtio_chain_t *vic, void *data)
{
vic->vic_data = data;
}
void
virtio_chain_clear(virtio_chain_t *vic)
{
if (vic->vic_indirect_capacity != 0) {
VERIFY3U(vic->vic_direct_capacity, ==, 1);
if (vic->vic_indirect_used > 0) {
vic->vic_indirect_used = 0;
bzero(virtio_dma_va(&vic->vic_indirect_dma, 0),
virtio_dma_size(&vic->vic_indirect_dma));
}
} else if (vic->vic_direct_capacity > 0) {
for (uint_t i = 0; i < vic->vic_direct_used; i++) {
id_free(vic->vic_vq->viq_descmap, vic->vic_direct[i]);
vic->vic_direct[i] = 0;
}
vic->vic_direct_used = 0;
}
}
void
virtio_chain_free(virtio_chain_t *vic)
{
virtio_chain_clear(vic);
if (vic->vic_indirect_capacity > 0) {
VERIFY3U(vic->vic_direct_capacity, ==, 1);
id_free(vic->vic_vq->viq_descmap, vic->vic_direct[0]);
virtio_dma_fini(&vic->vic_indirect_dma);
}
size_t vicsz = sizeof (*vic) +
vic->vic_direct_capacity * sizeof (uint16_t);
kmem_free(vic, vicsz);
}
static inline int
virtio_queue_descmap_alloc(virtio_queue_t *viq, uint_t *indexp)
{
id_t index;
if ((index = id_alloc_nosleep(viq->viq_descmap)) == -1) {
return (ENOMEM);
}
VERIFY3S(index, >=, 0);
VERIFY3S(index, <=, viq->viq_size);
*indexp = (uint_t)index;
return (0);
}
static int
virtio_chain_append_impl(virtio_chain_t *vic, uint64_t pa, size_t len,
uint16_t flags)
{
virtio_queue_t *viq = vic->vic_vq;
virtio_vq_desc_t *vqd;
uint_t index;
VERIFY(MUTEX_HELD(&viq->viq_mutex));
if (vic->vic_indirect_capacity != 0) {
if (vic->vic_indirect_used >= vic->vic_indirect_capacity) {
return (DDI_FAILURE);
}
vqd = virtio_dma_va(&vic->vic_indirect_dma, 0);
if ((index = vic->vic_indirect_used++) > 0) {
vqd[index - 1].vqd_flags |=
viq_gtoh16(viq, VIRTQ_DESC_F_NEXT);
vqd[index - 1].vqd_next = viq_gtoh16(viq, index);
}
} else {
if (vic->vic_direct_used >= vic->vic_direct_capacity) {
return (DDI_FAILURE);
}
if (virtio_queue_descmap_alloc(viq, &index) != 0) {
return (DDI_FAILURE);
}
vqd = virtio_dma_va(&viq->viq_dma, 0);
if (vic->vic_direct_used > 0) {
uint16_t p = vic->vic_direct[vic->vic_direct_used - 1];
vqd[p].vqd_flags |=
viq_gtoh16(viq, VIRTQ_DESC_F_NEXT);
vqd[p].vqd_next = viq_gtoh16(viq, index);
}
vic->vic_direct[vic->vic_direct_used++] = index;
}
vqd[index].vqd_addr = viq_gtoh64(viq, pa);
vqd[index].vqd_len = viq_gtoh32(viq, len);
vqd[index].vqd_flags = viq_gtoh16(viq, flags);
vqd[index].vqd_next = 0;
return (DDI_SUCCESS);
}
int
virtio_chain_append(virtio_chain_t *vic, uint64_t pa, size_t len,
virtio_direction_t dir)
{
virtio_queue_t *viq = vic->vic_vq;
uint16_t flags = 0;
switch (dir) {
case VIRTIO_DIR_DEVICE_WRITES:
flags |= VIRTQ_DESC_F_WRITE;
break;
case VIRTIO_DIR_DEVICE_READS:
break;
default:
panic("unknown direction value %u", dir);
}
mutex_enter(&viq->viq_mutex);
int r = virtio_chain_append_impl(vic, pa, len, flags);
mutex_exit(&viq->viq_mutex);
return (r);
}
static void
virtio_queue_flush_locked(virtio_queue_t *viq)
{
virtio_t *vio = viq->viq_virtio;
VERIFY(MUTEX_HELD(&viq->viq_mutex));
membar_producer();
viq->viq_dma_driver->vqdr_index =
viq_gtoh16(viq, viq->viq_driver_index);
VIRTQ_DMA_SYNC_FORDEV(viq);
VIRTQ_DMA_SYNC_FORKERNEL(viq);
if (!(viq->viq_dma_device->vqde_flags &
viq_gtoh16(viq, VIRTQ_USED_F_NO_NOTIFY))) {
vio->vio_ops->vop_queue_notify(viq);
}
}
void
virtio_queue_flush(virtio_queue_t *viq)
{
mutex_enter(&viq->viq_mutex);
virtio_queue_flush_locked(viq);
mutex_exit(&viq->viq_mutex);
}
void
virtio_chain_submit(virtio_chain_t *vic, boolean_t flush)
{
virtio_queue_t *viq = vic->vic_vq;
mutex_enter(&viq->viq_mutex);
if (vic->vic_indirect_capacity != 0) {
virtio_vq_desc_t *vqd = virtio_dma_va(&viq->viq_dma, 0);
VERIFY3U(vic->vic_direct_used, ==, 1);
vqd[vic->vic_direct[0]].vqd_len = viq_gtoh32(viq,
sizeof (virtio_vq_desc_t) * vic->vic_indirect_used);
virtio_dma_sync(&vic->vic_indirect_dma, DDI_DMA_SYNC_FORDEV);
}
uint16_t index = (viq->viq_driver_index++) % viq->viq_size;
viq->viq_dma_driver->vqdr_ring[index] =
viq_gtoh16(viq, vic->vic_direct[0]);
vic->vic_head = vic->vic_direct[0];
avl_add(&viq->viq_inflight, vic);
if (flush) {
virtio_queue_flush_locked(vic->vic_vq);
}
mutex_exit(&viq->viq_mutex);
}
static const char *
virtio_interrupt_type_name(int type)
{
switch (type) {
case DDI_INTR_TYPE_MSIX:
return ("MSI-X");
case DDI_INTR_TYPE_MSI:
return ("MSI");
case DDI_INTR_TYPE_FIXED:
return ("fixed");
default:
return ("?");
}
}
static int
virtio_interrupts_alloc(virtio_t *vio, int type, int nrequired)
{
dev_info_t *dip = vio->vio_dip;
int nintrs = 0;
int navail = 0;
VERIFY(MUTEX_HELD(&vio->vio_mutex));
VERIFY(!(vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ALLOC));
if (ddi_intr_get_nintrs(dip, type, &nintrs) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "could not count %s interrupts",
virtio_interrupt_type_name(type));
return (DDI_FAILURE);
}
if (nintrs < 1) {
dev_err(dip, CE_WARN, "no %s interrupts supported",
virtio_interrupt_type_name(type));
return (DDI_FAILURE);
}
if (ddi_intr_get_navail(dip, type, &navail) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "could not count available %s interrupts",
virtio_interrupt_type_name(type));
return (DDI_FAILURE);
}
if (navail < nrequired) {
dev_err(dip, CE_WARN, "need %d %s interrupts, but only %d "
"available", nrequired, virtio_interrupt_type_name(type),
navail);
return (DDI_FAILURE);
}
VERIFY3P(vio->vio_interrupts, ==, NULL);
vio->vio_interrupts = kmem_zalloc(
sizeof (ddi_intr_handle_t) * nrequired, KM_SLEEP);
int r;
if ((r = ddi_intr_alloc(dip, vio->vio_interrupts, type, 0, nrequired,
&vio->vio_ninterrupts, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "%s interrupt allocation failure (%d)",
virtio_interrupt_type_name(type), r);
kmem_free(vio->vio_interrupts,
sizeof (ddi_intr_handle_t) * nrequired);
vio->vio_interrupts = NULL;
return (DDI_FAILURE);
}
vio->vio_initlevel |= VIRTIO_INITLEVEL_INT_ALLOC;
vio->vio_interrupt_type = type;
return (DDI_SUCCESS);
}
static uint_t
virtio_shared_isr(caddr_t arg0, caddr_t arg1)
{
virtio_t *vio = (virtio_t *)arg0;
uint_t r = DDI_INTR_UNCLAIMED;
uint8_t isr;
mutex_enter(&vio->vio_mutex);
isr = vio->vio_ops->vop_isr_status(vio);
if ((isr & VIRTIO_ISR_CHECK_QUEUES) != 0) {
r = DDI_INTR_CLAIMED;
for (virtio_queue_t *viq = list_head(&vio->vio_queues);
viq != NULL; viq = list_next(&vio->vio_queues, viq)) {
if (viq->viq_func != NULL) {
mutex_exit(&vio->vio_mutex);
(void) viq->viq_func(viq->viq_funcarg, arg0);
mutex_enter(&vio->vio_mutex);
if (vio->vio_initlevel &
VIRTIO_INITLEVEL_SHUTDOWN) {
break;
}
}
}
}
mutex_exit(&vio->vio_mutex);
if ((isr & VIRTIO_ISR_CHECK_CONFIG) != 0) {
r = DDI_INTR_CLAIMED;
if (vio->vio_cfgchange_handler != NULL) {
(void) vio->vio_cfgchange_handler(
(caddr_t)vio->vio_cfgchange_handlerarg,
(caddr_t)vio);
}
}
return (r);
}
static int
virtio_interrupts_setup(virtio_t *vio, int allow_types)
{
dev_info_t *dip = vio->vio_dip;
int types;
int count = 0;
mutex_enter(&vio->vio_mutex);
for (virtio_queue_t *viq = list_head(&vio->vio_queues); viq != NULL;
viq = list_next(&vio->vio_queues, viq)) {
if (viq->viq_func != NULL) {
count++;
}
}
if (vio->vio_cfgchange_handler != NULL)
count++;
if (ddi_intr_get_supported_types(dip, &types) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "could not get supported interrupts");
mutex_exit(&vio->vio_mutex);
return (DDI_FAILURE);
}
if (allow_types != VIRTIO_ANY_INTR_TYPE) {
types &= allow_types;
}
if (types & DDI_INTR_TYPE_MSIX) {
if (virtio_interrupts_alloc(vio, DDI_INTR_TYPE_MSIX,
count) == DDI_SUCCESS) {
goto add_handlers;
}
}
if (types & DDI_INTR_TYPE_FIXED) {
if (virtio_interrupts_alloc(vio, DDI_INTR_TYPE_FIXED, 1) ==
DDI_SUCCESS) {
goto add_handlers;
}
}
dev_err(dip, CE_WARN, "interrupt allocation failed");
mutex_exit(&vio->vio_mutex);
return (DDI_FAILURE);
add_handlers:
for (int i = 0; i < vio->vio_ninterrupts; i++) {
uint_t ipri;
if (ddi_intr_get_pri(vio->vio_interrupts[i], &ipri) !=
DDI_SUCCESS) {
dev_err(dip, CE_WARN, "could not determine interrupt "
"priority");
goto fail;
}
if (ipri >= ddi_intr_get_hilevel_pri()) {
dev_err(dip, CE_WARN, "high level interrupts not "
"supported");
goto fail;
}
if (i == 0 || ipri > vio->vio_interrupt_priority) {
vio->vio_interrupt_priority = ipri;
}
}
if (ddi_intr_get_cap(vio->vio_interrupts[0],
&vio->vio_interrupt_cap) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "failed to get interrupt capabilities");
goto fail;
}
if (vio->vio_interrupt_type == DDI_INTR_TYPE_FIXED) {
VERIFY3S(vio->vio_ninterrupts, ==, 1);
if (ddi_intr_add_handler(vio->vio_interrupts[0],
virtio_shared_isr, (caddr_t)vio, NULL) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "adding shared %s interrupt "
"handler failed", virtio_interrupt_type_name(
vio->vio_interrupt_type));
goto fail;
}
goto done;
}
VERIFY3S(vio->vio_ninterrupts, ==, count);
uint_t n = 0;
if (vio->vio_cfgchange_handler != NULL) {
if (ddi_intr_add_handler(vio->vio_interrupts[n],
vio->vio_cfgchange_handler,
(caddr_t)vio->vio_cfgchange_handlerarg,
(caddr_t)vio) != DDI_SUCCESS) {
dev_err(dip, CE_WARN,
"adding configuration change interrupt failed");
goto fail;
}
vio->vio_cfgchange_handler_added = B_TRUE;
vio->vio_cfgchange_handler_index = n;
n++;
}
for (virtio_queue_t *viq = list_head(&vio->vio_queues); viq != NULL;
viq = list_next(&vio->vio_queues, viq)) {
if (viq->viq_func == NULL) {
continue;
}
if (ddi_intr_add_handler(vio->vio_interrupts[n],
viq->viq_func, (caddr_t)viq->viq_funcarg,
(caddr_t)vio) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "adding interrupt %u (%s) failed",
n, viq->viq_name);
goto fail;
}
viq->viq_handler_index = n;
viq->viq_handler_added = B_TRUE;
n++;
}
done:
vio->vio_initlevel |= VIRTIO_INITLEVEL_INT_ADDED;
mutex_exit(&vio->vio_mutex);
return (DDI_SUCCESS);
fail:
virtio_interrupts_teardown(vio);
mutex_exit(&vio->vio_mutex);
return (DDI_FAILURE);
}
static void
virtio_interrupts_teardown(virtio_t *vio)
{
VERIFY(MUTEX_HELD(&vio->vio_mutex));
virtio_interrupts_disable_locked(vio);
if (vio->vio_interrupt_type == DDI_INTR_TYPE_FIXED) {
if (vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ADDED) {
int r;
VERIFY3S(vio->vio_ninterrupts, ==, 1);
if ((r = ddi_intr_remove_handler(
vio->vio_interrupts[0])) != DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN, "removing "
"shared interrupt handler failed (%d)", r);
}
}
} else {
if (vio->vio_cfgchange_handler_added) {
int r;
if ((r = ddi_intr_remove_handler(
vio->vio_interrupts[0])) != DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN,
"removing configuration change interrupt "
"handler failed (%d)", r);
}
vio->vio_cfgchange_handler_added = B_FALSE;
}
for (virtio_queue_t *viq = list_head(&vio->vio_queues);
viq != NULL; viq = list_next(&vio->vio_queues, viq)) {
int r;
if (!viq->viq_handler_added) {
continue;
}
if ((r = ddi_intr_remove_handler(
vio->vio_interrupts[viq->viq_handler_index])) !=
DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN, "removing "
"interrupt handler (%s) failed (%d)",
viq->viq_name, r);
}
viq->viq_handler_added = B_FALSE;
}
}
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_INT_ADDED;
if (vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ALLOC) {
for (int i = 0; i < vio->vio_ninterrupts; i++) {
int r;
if ((r = ddi_intr_free(vio->vio_interrupts[i])) !=
DDI_SUCCESS) {
dev_err(vio->vio_dip, CE_WARN, "freeing "
"interrupt %u failed (%d)", i, r);
}
}
kmem_free(vio->vio_interrupts,
sizeof (ddi_intr_handle_t) * vio->vio_ninterrupts);
vio->vio_interrupts = NULL;
vio->vio_ninterrupts = 0;
vio->vio_interrupt_type = 0;
vio->vio_interrupt_cap = 0;
vio->vio_interrupt_priority = 0;
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_INT_ALLOC;
}
}
static void
virtio_interrupts_unwind(virtio_t *vio)
{
VERIFY(MUTEX_HELD(&vio->vio_mutex));
if (vio->vio_interrupt_type == DDI_INTR_TYPE_MSIX) {
for (virtio_queue_t *viq = list_head(&vio->vio_queues);
viq != NULL; viq = list_next(&vio->vio_queues, viq)) {
if (!viq->viq_handler_added) {
continue;
}
vio->vio_ops->vop_msix_queue_set(vio, viq->viq_index,
VIRTIO_LEGACY_MSI_NO_VECTOR);
}
if (vio->vio_cfgchange_handler_added) {
vio->vio_ops->vop_msix_config_set(vio,
VIRTIO_LEGACY_MSI_NO_VECTOR);
}
}
if (vio->vio_interrupt_cap & DDI_INTR_FLAG_BLOCK) {
(void) ddi_intr_block_disable(vio->vio_interrupts,
vio->vio_ninterrupts);
} else {
for (int i = 0; i < vio->vio_ninterrupts; i++) {
(void) ddi_intr_disable(vio->vio_interrupts[i]);
}
}
if (!virtio_modern(vio))
vio->vio_legacy_cfg_offset = VIRTIO_LEGACY_CFG_OFFSET;
}
int
virtio_interrupts_enable(virtio_t *vio)
{
mutex_enter(&vio->vio_mutex);
if (vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ENABLED) {
mutex_exit(&vio->vio_mutex);
return (DDI_SUCCESS);
}
int r = DDI_SUCCESS;
if (vio->vio_interrupt_cap & DDI_INTR_FLAG_BLOCK) {
r = ddi_intr_block_enable(vio->vio_interrupts,
vio->vio_ninterrupts);
} else {
for (int i = 0; i < vio->vio_ninterrupts; i++) {
if ((r = ddi_intr_enable(vio->vio_interrupts[i])) !=
DDI_SUCCESS) {
for (i--; i >= 0; i--) {
(void) ddi_intr_disable(
vio->vio_interrupts[i]);
}
break;
}
}
}
if (r != DDI_SUCCESS) {
mutex_exit(&vio->vio_mutex);
return (r);
}
if (vio->vio_interrupt_type == DDI_INTR_TYPE_MSIX) {
if (!virtio_modern(vio)) {
vio->vio_legacy_cfg_offset =
VIRTIO_LEGACY_CFG_OFFSET_MSIX;
}
for (virtio_queue_t *viq = list_head(&vio->vio_queues);
viq != NULL; viq = list_next(&vio->vio_queues, viq)) {
if (!viq->viq_handler_added) {
continue;
}
uint16_t qi = viq->viq_index;
uint16_t msi = viq->viq_handler_index;
vio->vio_ops->vop_msix_queue_set(vio, qi, msi);
if (vio->vio_ops->vop_msix_queue_get(vio, qi) != msi) {
dev_err(vio->vio_dip, CE_WARN,
"failed to configure MSI-X vector %u for "
"queue \"%s\" (#%u)", (uint_t)msi,
viq->viq_name, (uint_t)qi);
virtio_interrupts_unwind(vio);
mutex_exit(&vio->vio_mutex);
return (DDI_FAILURE);
}
}
if (vio->vio_cfgchange_handler_added) {
vio->vio_ops->vop_msix_config_set(vio,
vio->vio_cfgchange_handler_index);
if (vio->vio_ops->vop_msix_config_get(vio) !=
vio->vio_cfgchange_handler_index) {
dev_err(vio->vio_dip, CE_WARN,
"failed to configure MSI-X vector for "
"configuration");
virtio_interrupts_unwind(vio);
mutex_exit(&vio->vio_mutex);
return (DDI_FAILURE);
}
}
}
vio->vio_initlevel |= VIRTIO_INITLEVEL_INT_ENABLED;
mutex_exit(&vio->vio_mutex);
return (DDI_SUCCESS);
}
static void
virtio_interrupts_disable_locked(virtio_t *vio)
{
VERIFY(MUTEX_HELD(&vio->vio_mutex));
if (!(vio->vio_initlevel & VIRTIO_INITLEVEL_INT_ENABLED)) {
return;
}
virtio_interrupts_unwind(vio);
vio->vio_initlevel &= ~VIRTIO_INITLEVEL_INT_ENABLED;
}
void
virtio_interrupts_disable(virtio_t *vio)
{
mutex_enter(&vio->vio_mutex);
virtio_interrupts_disable_locked(vio);
mutex_exit(&vio->vio_mutex);
}
static int
virtio_bar_to_rnumber(virtio_t *vio, uint8_t bar)
{
pci_regspec_t *regs;
uint_t bar_offset, regs_length, rcount;
int rnumber = -1;
if (bar > 5)
return (-1);
bar_offset = PCI_CONF_BASE0 + sizeof (uint32_t) * bar;
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vio->vio_dip,
DDI_PROP_DONTPASS, "reg", (int **)®s, ®s_length) !=
DDI_PROP_SUCCESS) {
return (-1);
}
rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
for (int i = 0; i < rcount; i++) {
if (PCI_REG_REG_G(regs[i].pci_phys_hi) == bar_offset) {
rnumber = i;
break;
}
}
ddi_prop_free(regs);
return ((rnumber < rcount) ? rnumber : -1);
}