#include <sys/types.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcidevs.h>
#include <dev/vmm/vmm.h>
#include <stddef.h>
#include <string.h>
#include <unistd.h>
#include "vmd.h"
#include "pci.h"
#include "atomicio.h"
struct pci pci;
extern struct vmd_vm current_vm;
const uint8_t pci_pic_irqs[PCI_MAX_PIC_IRQS] = {3, 5, 6, 7, 9, 10, 11, 12,
14, 15};
int
pci_add_bar(uint8_t id, uint32_t type, void *barfn, void *cookie)
{
uint8_t bar_reg_idx, bar_ct;
if (id >= pci.pci_dev_ct)
return (-1);
bar_ct = pci.pci_devices[id].pd_bar_ct;
if (bar_ct >= PCI_MAX_BARS)
return (-1);
bar_reg_idx = (PCI_MAPREG_START + (bar_ct * 4)) / 4;
if (type == PCI_MAPREG_TYPE_MEM) {
if (pci.pci_next_mmio_bar >= PCI_MMIO_BAR_END)
return (-1);
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
PCI_MAPREG_MEM_ADDR(pci.pci_next_mmio_bar);
pci.pci_next_mmio_bar += VM_PCI_MMIO_BAR_SIZE;
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_MMIO;
pci.pci_devices[id].pd_barsize[bar_ct] = VM_PCI_MMIO_BAR_SIZE;
pci.pci_devices[id].pd_bar_ct++;
}
#ifdef __amd64__
else if (type == PCI_MAPREG_TYPE_IO) {
if (pci.pci_next_io_bar >= VM_PCI_IO_BAR_END)
return (-1);
pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
PCI_MAPREG_IO_ADDR(pci.pci_next_io_bar) |
PCI_MAPREG_TYPE_IO;
pci.pci_next_io_bar += VM_PCI_IO_BAR_SIZE;
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
DPRINTF("adding pci bar cookie for dev %d bar %d = %p", id,
bar_ct, cookie);
pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_IO;
pci.pci_devices[id].pd_barsize[bar_ct] = VM_PCI_IO_BAR_SIZE;
pci.pci_devices[id].pd_bar_ct++;
}
#endif
return ((int)bar_ct);
}
int
pci_set_bar_fn(uint8_t id, uint8_t bar_ct, void *barfn, void *cookie)
{
if (id >= pci.pci_dev_ct)
return (1);
if (bar_ct >= PCI_MAX_BARS)
return (1);
pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
return (0);
}
uint8_t
pci_get_dev_irq(uint8_t id)
{
if (pci.pci_devices[id].pd_int)
return pci.pci_devices[id].pd_irq;
else
return 0xFF;
}
int
pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, uint8_t class,
uint8_t subclass, uint16_t subsys_vid, uint16_t subsys_id,
uint8_t rev_id, uint8_t irq_needed, pci_cs_fn_t csfunc)
{
if (pci.pci_dev_ct >= PCI_CONFIG_MAX_DEV)
return (1);
if (pci.pci_next_pic_irq >= PCI_MAX_PIC_IRQS && irq_needed)
return (1);
*id = pci.pci_dev_ct;
pci.pci_devices[*id].pd_vid = vid;
pci.pci_devices[*id].pd_did = pid;
pci.pci_devices[*id].pd_rev = rev_id;
pci.pci_devices[*id].pd_class = class;
pci.pci_devices[*id].pd_subclass = subclass;
pci.pci_devices[*id].pd_subsys_vid = subsys_vid;
pci.pci_devices[*id].pd_subsys_id = subsys_id;
pci.pci_devices[*id].pd_csfunc = csfunc;
if (irq_needed) {
pci.pci_devices[*id].pd_irq =
pci_pic_irqs[pci.pci_next_pic_irq];
pci.pci_devices[*id].pd_int = 1;
pci.pci_next_pic_irq++;
DPRINTF("assigned irq %d to pci dev %d",
pci.pci_devices[*id].pd_irq, *id);
intr_toggle_el(¤t_vm, pci.pci_devices[*id].pd_irq, 1);
}
pci.pci_dev_ct++;
return (0);
}
int
pci_add_capability(uint8_t id, struct pci_cap *cap)
{
uint8_t cid;
struct pci_dev *dev = NULL;
if (id >= pci.pci_dev_ct)
return (-1);
dev = &pci.pci_devices[id];
if (dev->pd_cap_ct >= PCI_MAX_CAPS)
return (-1);
cid = dev->pd_cap_ct;
memcpy(&dev->pd_caps[cid], cap, sizeof(dev->pd_caps[0]));
if (cid > 0)
dev->pd_caps[cid - 1].pc_next = (sizeof(struct pci_cap) * cid) +
offsetof(struct pci_dev, pd_caps);
dev->pd_cap_ct++;
dev->pd_cap = offsetof(struct pci_dev, pd_caps);
dev->pd_status |= (PCI_STATUS_CAPLIST_SUPPORT >> 16);
return (cid);
}
void
pci_init(void)
{
uint8_t id;
memset(&pci, 0, sizeof(pci));
CTASSERT(sizeof(pci.pci_devices[0].pd_cfg_space) <= 256);
pci.pci_next_mmio_bar = PCI_MMIO_BAR_BASE;
#ifdef __amd64__
pci.pci_next_io_bar = VM_PCI_IO_BAR_BASE;
#endif
if (pci_add_device(&id, PCI_VENDOR_OPENBSD, PCI_PRODUCT_OPENBSD_PCHB,
PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST,
PCI_VENDOR_OPENBSD, 0, 0, 0, NULL)) {
log_warnx("can't add PCI host bridge");
return;
}
}
#ifdef __amd64__
void
pci_handle_address_reg(struct vm_run_params *vrp)
{
struct vm_exit *vei = vrp->vrp_exit;
if (vei->vei.vei_dir == VEI_DIR_OUT) {
get_input_data(vei, &pci.pci_addr_reg);
} else {
set_return_data(vei, pci.pci_addr_reg);
}
}
uint8_t
pci_handle_io(struct vm_run_params *vrp)
{
int i, j;
uint16_t reg, b_hi, b_lo;
pci_iobar_fn_t fn = NULL;
void *cookie = NULL;
uint8_t intr = 0xFF, irq = 0xFF, dir, sz;
struct vm_exit *vei = vrp->vrp_exit;
reg = vei->vei.vei_port;
dir = vei->vei.vei_dir;
sz = vei->vei.vei_size;
for (i = 0 ; i < pci.pci_dev_ct; i++) {
for (j = 0 ; j < pci.pci_devices[i].pd_bar_ct; j++) {
b_lo = PCI_MAPREG_IO_ADDR(pci.pci_devices[i].pd_bar[j]);
b_hi = b_lo + VM_PCI_IO_BAR_SIZE;
if (reg >= b_lo && reg < b_hi) {
fn = pci.pci_devices[i].pd_barfunc[j];
reg = reg - b_lo;
cookie = pci.pci_devices[i].pd_bar_cookie[j];
irq = pci.pci_devices[i].pd_irq;
goto found;
}
}
}
found:
if (fn == NULL) {
DPRINTF("no pci i/o function for reg 0x%llx (dir=%d guest "
"%%rip=0x%llx)", (uint64_t)reg, dir,
vei->vrs.vrs_gprs[VCPU_REGS_RIP]);
if (dir == VEI_DIR_IN)
set_return_data(vei, 0xFFFFFFFF);
return (0xFF);
}
if (fn(dir, reg, &vei->vei.vei_data, &intr, cookie, sz))
log_warnx("pci i/o access function failed");
if (intr != 0xFF)
intr = irq;
return (intr);
}
void
pci_handle_data_reg(struct vm_run_params *vrp)
{
struct vm_exit *vei = vrp->vrp_exit;
struct pci_dev *pd = NULL;
uint8_t b, d, f, o, baridx, cfgidx, ofs, sz;
uint32_t data = 0;
int ret;
pci_cs_fn_t csfunc;
if (!(pci.pci_addr_reg & PCI_MODE1_ENABLE)) {
if (vei->vei.vei_dir == VEI_DIR_IN)
set_return_data(vei, 0xFFFFFFFF);
log_warnx("invalid address register during pci read: "
"0x%llx", (uint64_t)pci.pci_addr_reg);
return;
}
ofs = vei->vei.vei_port - 0xCFC;
sz = vei->vei.vei_size;
b = (pci.pci_addr_reg >> 16) & 0xff;
d = (pci.pci_addr_reg >> 11) & 0x1f;
f = (pci.pci_addr_reg >> 8) & 0x7;
o = (pci.pci_addr_reg & 0xfc);
if (d >= pci.pci_dev_ct) {
DPRINTF("%s: invalid pci device access (%u)", __func__, d);
if (vei->vei.vei_dir == VEI_DIR_IN)
set_return_data(vei, 0xFFFFFFFF);
return;
}
pd = &pci.pci_devices[d];
cfgidx = (o / 4);
if (cfgidx >= nitems(pd->pd_cfg_space)) {
DPRINTF("%s: out of range config space access", __func__);
if (vei->vei.vei_dir == VEI_DIR_IN)
set_return_data(vei, 0xFFFFFFFF);
}
baridx = cfgidx - 4;
csfunc = pd->pd_csfunc;
if (csfunc != NULL) {
ret = csfunc(vei->vei.vei_dir, cfgidx, &vei->vei.vei_data);
if (ret)
log_warnx("cfg space access function failed for "
"pci device %d", d);
return;
}
o += ofs;
if (vei->vei.vei_dir == VEI_DIR_OUT) {
if ((o >= 0x10 && o <= 0x24) &&
vei->vei.vei_data == 0xffffffff) {
if (baridx < pd->pd_bar_ct)
vei->vei.vei_data = 0xfffff000;
else
vei->vei.vei_data = 0;
}
if (o >= 0x10 && o <= 0x24) {
if (baridx < pd->pd_bar_ct &&
pd->pd_bartype[baridx] == PCI_BAR_TYPE_IO)
vei->vei.vei_data |= 1;
}
if (o != PCI_EXROMADDR_0)
get_input_data(vei, &pd->pd_cfg_space[cfgidx]);
} else {
if (d > pci.pci_dev_ct || b > 0 || f > 0)
set_return_data(vei, 0xFFFFFFFF);
else {
data = pd->pd_cfg_space[cfgidx];
switch (sz) {
case 4:
set_return_data(vei, data);
break;
case 2:
if (ofs == 0)
set_return_data(vei, data);
else
set_return_data(vei, data >> 16);
break;
case 1:
set_return_data(vei, data >> (ofs * 8));
break;
}
}
}
}
#endif
int
pci_find_first_device(uint16_t subsys_id)
{
int i;
for (i = 0; i < pci.pci_dev_ct; i++)
if (pci.pci_devices[i].pd_subsys_id == subsys_id)
return (i);
return (-1);
}
uint16_t
pci_get_subsys_id(uint8_t pci_id)
{
if (pci_id >= pci.pci_dev_ct)
return (0);
else
return (pci.pci_devices[pci_id].pd_subsys_id);
}