#include <sys/types.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/sunndi.h>
#include <sys/pci.h>
#include <sys/pci_impl.h>
#include <sys/pcie_impl.h>
#include <sys/pci_props.h>
#include <sys/memlist.h>
#include <sys/bootconf.h>
#include <sys/pci_cfgacc.h>
#include <sys/pci_cfgspace.h>
#include <sys/pci_cfgspace_impl.h>
#include <sys/psw.h>
#include "../../../../common/pci/pci_strings.h"
#include <sys/apic.h>
#include <io/pciex/pcie_nvidia.h>
#include <sys/hotplug/pci/pciehpc_acpi.h>
#include <sys/acpi/acpi.h>
#include <sys/acpica.h>
#include <sys/iommulib.h>
#include <sys/devcache.h>
#include <sys/pci_cfgacc_x86.h>
#include <sys/plat/pci_prd.h>
#define pci_getb (*pci_getb_func)
#define pci_getw (*pci_getw_func)
#define pci_getl (*pci_getl_func)
#define pci_putb (*pci_putb_func)
#define pci_putw (*pci_putw_func)
#define pci_putl (*pci_putl_func)
#define dcmn_err if (pci_boot_debug != 0) cmn_err
#define bus_debug(bus) (pci_boot_debug != 0 && pci_debug_bus_start != -1 && \
pci_debug_bus_end != -1 && (bus) >= pci_debug_bus_start && \
(bus) <= pci_debug_bus_end)
#define dump_memlists(tag, bus) \
if (bus_debug((bus))) dump_memlists_impl((tag), (bus))
#define MSGHDR "!%s[%02x/%02x/%x]: "
#define CONFIG_INFO 0
#define CONFIG_UPDATE 1
#define CONFIG_NEW 2
#define CONFIG_FIX 3
#define COMPAT_BUFSIZE 512
#define PPB_IO_ALIGNMENT 0x1000
#define PPB_MEM_ALIGNMENT 0x100000
#define P2LE(align) \
{ \
uint_t i = 0; \
while (align >>= 1) \
i++; \
align = 1 << i; \
} \
#define BARMASKTOLEN(value) ((((value) ^ ((value) - 1)) + 1) >> 1)
typedef enum {
RES_IO,
RES_MEM,
RES_PMEM
} mem_res_t;
#define PPB_DISABLE_IORANGE_BASE 0x9fff
#define PPB_DISABLE_IORANGE_LIMIT 0x1000
#define PPB_DISABLE_MEMRANGE_BASE 0x9ff00000
#define PPB_DISABLE_MEMRANGE_LIMIT 0x100fffff
#define LPC_IO_CONTROL_REG_1 0x40
#define AMD8111_ENABLENMI (uint8_t)0x80
#define DEVID_AMD8111_LPC 0x7468
struct pci_fixundo {
uint8_t bus;
uint8_t dev;
uint8_t fn;
void (*undofn)(uint8_t, uint8_t, uint8_t);
struct pci_fixundo *next;
};
struct pci_devfunc {
struct pci_devfunc *next;
dev_info_t *dip;
uchar_t dev;
uchar_t func;
boolean_t reprogram;
};
extern int apic_nvidia_io_max;
static uchar_t max_dev_pci = 32;
int pci_boot_maxbus;
int pci_boot_debug = 0;
int pci_debug_bus_start = -1;
int pci_debug_bus_end = -1;
static struct pci_fixundo *undolist = NULL;
static int num_root_bus = 0;
extern void pci_cfgacc_add_workaround(uint16_t, uchar_t, uchar_t);
extern dev_info_t *pcie_get_rc_dip(dev_info_t *);
static void enumerate_bus_devs(uchar_t bus, int config_op);
static void create_root_bus_dip(uchar_t bus);
static void process_devfunc(uchar_t, uchar_t, uchar_t, int);
static boolean_t add_reg_props(dev_info_t *, uchar_t, uchar_t, uchar_t, int,
boolean_t);
static void add_ppb_props(dev_info_t *, uchar_t, uchar_t, uchar_t, boolean_t,
boolean_t);
static void add_bus_range_prop(int);
static void add_ranges_prop(int, boolean_t);
static void add_bus_available_prop(int);
static int get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id);
static void fix_ppb_res(uchar_t, boolean_t);
static void alloc_res_array(void);
static void create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid,
ushort_t deviceid);
static void populate_bus_res(uchar_t bus);
static void pci_memlist_remove_list(struct memlist **list,
struct memlist *remove_list);
static void ck804_fix_aer_ptr(dev_info_t *, pcie_req_id_t);
static int pci_unitaddr_cache_valid(void);
static int pci_bus_unitaddr(int);
static void pci_unitaddr_cache_create(void);
static int pci_cache_unpack_nvlist(nvf_handle_t, nvlist_t *, char *);
static int pci_cache_pack_nvlist(nvf_handle_t, nvlist_t **);
static void pci_cache_free_list(nvf_handle_t);
int pci_bus_always_renumber = 0;
static struct {
struct memlist *io_used;
struct memlist *mem_used;
} isa_res;
static nvf_ops_t pci_unitaddr_cache_ops = {
"/etc/devices/pci_unitaddr_persistent",
pci_cache_unpack_nvlist,
pci_cache_pack_nvlist,
pci_cache_free_list,
NULL
};
typedef struct {
list_node_t pua_nodes;
int pua_index;
int pua_addr;
} pua_node_t;
nvf_handle_t puafd_handle;
int pua_cache_valid = 0;
dev_info_t *
pci_boot_bus_to_dip(uint32_t busno)
{
ASSERT3U(busno, <=, pci_boot_maxbus);
return (pci_bus_res[busno].dip);
}
static void
dump_memlists_impl(const char *tag, int bus)
{
printf("Memlist dump at %s - bus %x\n", tag, bus);
if (pci_bus_res[bus].io_used != NULL) {
printf(" io_used ");
pci_memlist_dump(pci_bus_res[bus].io_used);
}
if (pci_bus_res[bus].io_avail != NULL) {
printf(" io_avail ");
pci_memlist_dump(pci_bus_res[bus].io_avail);
}
if (pci_bus_res[bus].mem_used != NULL) {
printf(" mem_used ");
pci_memlist_dump(pci_bus_res[bus].mem_used);
}
if (pci_bus_res[bus].mem_avail != NULL) {
printf(" mem_avail ");
pci_memlist_dump(pci_bus_res[bus].mem_avail);
}
if (pci_bus_res[bus].pmem_used != NULL) {
printf(" pmem_used ");
pci_memlist_dump(pci_bus_res[bus].pmem_used);
}
if (pci_bus_res[bus].pmem_avail != NULL) {
printf(" pmem_avail ");
pci_memlist_dump(pci_bus_res[bus].pmem_avail);
}
}
static boolean_t
pci_rc_scan_cb(uint32_t busno, void *arg)
{
if (busno > pci_boot_maxbus) {
dcmn_err(CE_NOTE, "platform root complex scan returned bus "
"with invalid bus id: 0x%x", busno);
return (B_TRUE);
}
if (pci_bus_res[busno].par_bus == (uchar_t)-1 &&
pci_bus_res[busno].dip == NULL) {
create_root_bus_dip((uchar_t)busno);
}
return (B_TRUE);
}
static void
pci_unitaddr_cache_init(void)
{
puafd_handle = nvf_register_file(&pci_unitaddr_cache_ops);
ASSERT(puafd_handle);
list_create(nvf_list(puafd_handle), sizeof (pua_node_t),
offsetof(pua_node_t, pua_nodes));
rw_enter(nvf_lock(puafd_handle), RW_WRITER);
(void) nvf_read_file(puafd_handle);
rw_exit(nvf_lock(puafd_handle));
}
static int
pci_cache_unpack_nvlist(nvf_handle_t hdl, nvlist_t *nvl, char *name)
{
long index;
int32_t value;
nvpair_t *np;
pua_node_t *node;
np = NULL;
while ((np = nvlist_next_nvpair(nvl, np)) != NULL) {
if (ddi_strtol(nvpair_name(np), NULL, 10, &index) != 0)
continue;
if (nvpair_value_int32(np, &value) != 0)
continue;
node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP);
node->pua_index = index;
node->pua_addr = value;
list_insert_tail(nvf_list(hdl), node);
}
pua_cache_valid = 1;
return (DDI_SUCCESS);
}
static int
pci_cache_pack_nvlist(nvf_handle_t hdl, nvlist_t **ret_nvl)
{
int rval;
nvlist_t *nvl, *sub_nvl;
list_t *listp;
pua_node_t *pua;
char buf[13];
ASSERT(RW_WRITE_HELD(nvf_lock(hdl)));
rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
if (rval != DDI_SUCCESS) {
nvf_error("%s: nvlist alloc error %d\n",
nvf_cache_name(hdl), rval);
return (DDI_FAILURE);
}
sub_nvl = NULL;
rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
if (rval != DDI_SUCCESS)
goto error;
listp = nvf_list(hdl);
for (pua = list_head(listp); pua != NULL;
pua = list_next(listp, pua)) {
(void) snprintf(buf, sizeof (buf), "%d", pua->pua_index);
rval = nvlist_add_int32(sub_nvl, buf, pua->pua_addr);
if (rval != DDI_SUCCESS)
goto error;
}
rval = nvlist_add_nvlist(nvl, "table", sub_nvl);
if (rval != DDI_SUCCESS)
goto error;
nvlist_free(sub_nvl);
*ret_nvl = nvl;
return (DDI_SUCCESS);
error:
nvlist_free(sub_nvl);
ASSERT(nvl);
nvlist_free(nvl);
*ret_nvl = NULL;
return (DDI_FAILURE);
}
static void
pci_cache_free_list(nvf_handle_t hdl)
{
list_t *listp;
pua_node_t *pua;
ASSERT(RW_WRITE_HELD(nvf_lock(hdl)));
listp = nvf_list(hdl);
for (pua = list_head(listp); pua != NULL;
pua = list_next(listp, pua)) {
list_remove(listp, pua);
kmem_free(pua, sizeof (pua_node_t));
}
}
static int
pci_unitaddr_cache_valid(void)
{
return (pua_cache_valid);
}
static int
pci_bus_unitaddr(int index)
{
pua_node_t *pua;
list_t *listp;
int addr;
rw_enter(nvf_lock(puafd_handle), RW_READER);
addr = -1;
listp = nvf_list(puafd_handle);
for (pua = list_head(listp); pua != NULL;
pua = list_next(listp, pua)) {
if (pua->pua_index == index) {
addr = pua->pua_addr;
break;
}
}
rw_exit(nvf_lock(puafd_handle));
return (addr);
}
static void
pci_unitaddr_cache_create(void)
{
int i, index;
pua_node_t *node;
list_t *listp;
rw_enter(nvf_lock(puafd_handle), RW_WRITER);
index = 0;
listp = nvf_list(puafd_handle);
for (i = 0; i <= pci_boot_maxbus; i++) {
if ((pci_bus_res[i].par_bus != (uchar_t)-1) ||
pci_bus_res[i].dip == NULL)
continue;
node = kmem_zalloc(sizeof (pua_node_t), KM_SLEEP);
node->pua_index = index++;
node->pua_addr = pci_bus_res[i].root_addr;
list_insert_tail(listp, node);
}
(void) nvf_mark_dirty(puafd_handle);
rw_exit(nvf_lock(puafd_handle));
nvf_wake_daemon();
}
void
pci_setup_tree(void)
{
uint_t i, root_bus_addr = 0;
alloc_res_array();
for (i = 0; i <= pci_boot_maxbus; i++) {
pci_bus_res[i].par_bus = (uchar_t)-1;
pci_bus_res[i].root_addr = (uchar_t)-1;
pci_bus_res[i].sub_bus = i;
}
pci_bus_res[0].root_addr = root_bus_addr++;
create_root_bus_dip(0);
enumerate_bus_devs(0, CONFIG_INFO);
for (i = 1; i <= pci_boot_maxbus; i++) {
if (pci_bus_res[i].dip == NULL) {
pci_bus_res[i].root_addr = root_bus_addr++;
}
enumerate_bus_devs(i, CONFIG_INFO);
}
}
void
pci_register_isa_resources(int type, uint32_t base, uint32_t size)
{
(void) pci_memlist_insert(
(type == 1) ? &isa_res.io_used : &isa_res.mem_used,
base, size);
}
static void
remove_subtractive_res()
{
int i, j;
struct memlist *list;
for (i = 0; i <= pci_boot_maxbus; i++) {
if (pci_bus_res[i].subtractive) {
list = pci_bus_res[i].io_used;
while (list) {
for (j = 0; j <= pci_boot_maxbus; j++)
(void) pci_memlist_remove(
&pci_bus_res[j].io_avail,
list->ml_address, list->ml_size);
list = list->ml_next;
}
list = pci_bus_res[i].mem_used;
while (list) {
for (j = 0; j <= pci_boot_maxbus; j++) {
(void) pci_memlist_remove(
&pci_bus_res[j].mem_avail,
list->ml_address, list->ml_size);
(void) pci_memlist_remove(
&pci_bus_res[j].pmem_avail,
list->ml_address, list->ml_size);
}
list = list->ml_next;
}
list = pci_bus_res[i].pmem_used;
while (list) {
for (j = 0; j <= pci_boot_maxbus; j++) {
(void) pci_memlist_remove(
&pci_bus_res[j].pmem_avail,
list->ml_address, list->ml_size);
(void) pci_memlist_remove(
&pci_bus_res[j].mem_avail,
list->ml_address, list->ml_size);
}
list = list->ml_next;
}
}
}
}
static void
setup_bus_res(int bus)
{
uchar_t par_bus;
if (pci_bus_res[bus].dip == NULL)
return;
if (pci_bus_res[bus].bus_avail == NULL) {
ASSERT(pci_bus_res[bus].sub_bus >= bus);
pci_memlist_insert(&pci_bus_res[bus].bus_avail, bus,
pci_bus_res[bus].sub_bus - bus + 1);
}
ASSERT(pci_bus_res[bus].bus_avail != NULL);
par_bus = pci_bus_res[bus].par_bus;
if (par_bus != (uchar_t)-1) {
ASSERT(pci_bus_res[par_bus].bus_avail != NULL);
pci_memlist_remove_list(&pci_bus_res[par_bus].bus_avail,
pci_bus_res[bus].bus_avail);
}
;
(void) pci_memlist_remove(&pci_bus_res[bus].bus_avail, bus, 1);
}
static uchar_t
resolve_alloc_bus(uchar_t bus, mem_res_t type)
{
while (pci_bus_res[bus].subtractive) {
if (type == RES_IO && pci_bus_res[bus].io_avail != NULL)
break;
if (type == RES_MEM && pci_bus_res[bus].mem_avail != NULL)
break;
if (type == RES_PMEM && pci_bus_res[bus].pmem_avail != NULL)
break;
if (pci_bus_res[bus].par_bus == (uchar_t)-1)
break;
bus = pci_bus_res[bus].par_bus;
}
return (bus);
}
static uint64_t
get_per_bridge_avail(uchar_t bus)
{
uchar_t par_bus;
par_bus = pci_bus_res[bus].par_bus;
while (par_bus != (uchar_t)-1) {
bus = par_bus;
par_bus = pci_bus_res[par_bus].par_bus;
}
if (pci_bus_res[bus].mem_buffer == 0 ||
pci_bus_res[bus].num_bridge == 0) {
return (0);
}
return (pci_bus_res[bus].mem_buffer / pci_bus_res[bus].num_bridge);
}
static uint64_t
lookup_parbus_res(uchar_t parbus, uint64_t size, uint64_t align, mem_res_t type)
{
struct memlist **list;
uint64_t addr;
if (pci_bus_res[parbus].par_bus == (uchar_t)-1 &&
num_root_bus > 1 && !pci_prd_multi_root_ok()) {
return (0);
}
parbus = resolve_alloc_bus(parbus, type);
switch (type) {
case RES_IO:
list = &pci_bus_res[parbus].io_avail;
break;
case RES_MEM:
list = &pci_bus_res[parbus].mem_avail;
break;
case RES_PMEM:
list = &pci_bus_res[parbus].pmem_avail;
break;
default:
panic("Invalid resource type %d", type);
}
if (*list == NULL)
return (0);
addr = pci_memlist_find(list, size, align);
return (addr);
}
static uint64_t
get_parbus_res(uchar_t parbus, uchar_t bus, uint64_t size, uint64_t align,
mem_res_t type)
{
struct memlist **par_avail, **par_used, **avail, **used;
uint64_t addr;
parbus = resolve_alloc_bus(parbus, type);
switch (type) {
case RES_IO:
par_avail = &pci_bus_res[parbus].io_avail;
par_used = &pci_bus_res[parbus].io_used;
avail = &pci_bus_res[bus].io_avail;
used = &pci_bus_res[bus].io_used;
break;
case RES_MEM:
par_avail = &pci_bus_res[parbus].mem_avail;
par_used = &pci_bus_res[parbus].mem_used;
avail = &pci_bus_res[bus].mem_avail;
used = &pci_bus_res[bus].mem_used;
break;
case RES_PMEM:
par_avail = &pci_bus_res[parbus].pmem_avail;
par_used = &pci_bus_res[parbus].pmem_used;
avail = &pci_bus_res[bus].pmem_avail;
used = &pci_bus_res[bus].pmem_used;
break;
default:
panic("Invalid resource type %d", type);
}
pci_memlist_subsume(used, avail);
for (struct memlist *m = *avail; m != NULL; m = m->ml_next) {
(void) pci_memlist_remove(par_used, m->ml_address, m->ml_size);
pci_memlist_insert(par_avail, m->ml_address, m->ml_size);
}
pci_memlist_free_all(avail);
addr = lookup_parbus_res(parbus, size, align, type);
if (type == RES_MEM &&
(addr >= UINT32_MAX || addr >= UINT32_MAX - size)) {
return (0);
}
if (addr != 0) {
pci_memlist_insert(par_used, addr, size);
(void) pci_memlist_remove(par_avail, addr, size);
pci_memlist_insert(avail, addr, size);
}
return (addr);
}
static int
get_pci_cap(uchar_t bus, uchar_t dev, uchar_t func, uint8_t cap_id)
{
uint8_t curcap, cap_id_loc;
uint16_t status;
int location = -1;
status = pci_getw(bus, dev, func, PCI_CONF_STAT);
if (!(status & PCI_STAT_CAP)) {
return (-1);
}
cap_id_loc = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR);
while (cap_id_loc && cap_id_loc != (uint8_t)-1) {
curcap = pci_getb(bus, dev, func, cap_id_loc);
if (curcap == cap_id) {
location = cap_id_loc;
break;
}
cap_id_loc = pci_getb(bus, dev, func, cap_id_loc + 1);
}
return (location);
}
static boolean_t
is_vga(struct memlist *elem, mem_res_t type)
{
switch (type) {
case RES_IO:
if ((elem->ml_address == 0x3b0 && elem->ml_size == 0xc) ||
(elem->ml_address == 0x3c0 && elem->ml_size == 0x20)) {
return (B_TRUE);
}
break;
case RES_MEM:
if (elem->ml_address == 0xa0000 && elem->ml_size == 0x20000)
return (B_TRUE);
break;
case RES_PMEM:
break;
}
return (B_FALSE);
}
static boolean_t
list_is_vga_only(struct memlist *l, mem_res_t type)
{
if (l == NULL) {
return (B_FALSE);
}
do {
if (!is_vga(l, type))
return (B_FALSE);
} while ((l = l->ml_next) != NULL);
return (B_TRUE);
}
static void
pci_memlist_range(struct memlist *list, mem_res_t type, uint64_t *basep,
uint64_t *limitp)
{
*limitp = *basep = 0;
for (; list != NULL; list = list->ml_next) {
if (is_vga(list, type))
continue;
if (*basep == 0)
*basep = list->ml_address;
if (list->ml_address + list->ml_size >= *limitp)
*limitp = list->ml_address + list->ml_size - 1;
}
}
static void
set_ppb_res(uchar_t bus, uchar_t dev, uchar_t func, mem_res_t type,
uint64_t base, uint64_t limit)
{
char *tag;
switch (type) {
case RES_IO: {
VERIFY0(base >> 32);
VERIFY0(limit >> 32);
pci_putb(bus, dev, func, PCI_BCNF_IO_BASE_LOW,
(uint8_t)((base >> PCI_BCNF_IO_SHIFT) & PCI_BCNF_IO_MASK));
pci_putb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW,
(uint8_t)((limit >> PCI_BCNF_IO_SHIFT) & PCI_BCNF_IO_MASK));
uint8_t val = pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW);
if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) {
pci_putw(bus, dev, func, PCI_BCNF_IO_BASE_HI,
base >> 16);
pci_putw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI,
limit >> 16);
} else {
VERIFY0(base >> 16);
VERIFY0(limit >> 16);
}
tag = "I/O";
break;
}
case RES_MEM:
VERIFY0(base >> 32);
VERIFY0(limit >> 32);
pci_putw(bus, dev, func, PCI_BCNF_MEM_BASE,
(uint16_t)((base >> PCI_BCNF_MEM_SHIFT) &
PCI_BCNF_MEM_MASK));
pci_putw(bus, dev, func, PCI_BCNF_MEM_LIMIT,
(uint16_t)((limit >> PCI_BCNF_MEM_SHIFT) &
PCI_BCNF_MEM_MASK));
tag = "MEM";
break;
case RES_PMEM: {
pci_putw(bus, dev, func, PCI_BCNF_PF_BASE_LOW,
(uint16_t)((base >> PCI_BCNF_MEM_SHIFT) &
PCI_BCNF_MEM_MASK));
pci_putw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW,
(uint16_t)((limit >> PCI_BCNF_MEM_SHIFT) &
PCI_BCNF_MEM_MASK));
uint16_t val = pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW);
if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) {
pci_putl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH,
base >> 32);
pci_putl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH,
limit >> 32);
} else {
VERIFY0(base >> 32);
VERIFY0(limit >> 32);
}
tag = "PMEM";
break;
}
default:
panic("Invalid resource type %d", type);
}
if (base > limit) {
cmn_err(CE_NOTE, MSGHDR "DISABLE %4s range",
"ppb", bus, dev, func, tag);
} else {
cmn_err(CE_NOTE,
MSGHDR "PROGRAM %4s range 0x%lx ~ 0x%lx",
"ppb", bus, dev, func, tag, base, limit);
}
}
static void
fetch_ppb_res(uchar_t bus, uchar_t dev, uchar_t func, mem_res_t type,
uint64_t *basep, uint64_t *limitp)
{
uint64_t val, base, limit;
switch (type) {
case RES_IO:
val = pci_getb(bus, dev, func, PCI_BCNF_IO_LIMIT_LOW);
limit = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT) |
PCI_BCNF_IO_LIMIT_BITS;
val = pci_getb(bus, dev, func, PCI_BCNF_IO_BASE_LOW);
base = ((val & PCI_BCNF_IO_MASK) << PCI_BCNF_IO_SHIFT);
if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_IO_32BIT) {
val = pci_getw(bus, dev, func, PCI_BCNF_IO_BASE_HI);
base |= val << 16;
val = pci_getw(bus, dev, func, PCI_BCNF_IO_LIMIT_HI);
limit |= val << 16;
}
VERIFY0(base >> 32);
break;
case RES_MEM:
val = pci_getw(bus, dev, func, PCI_BCNF_MEM_LIMIT);
limit = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) |
PCI_BCNF_MEM_LIMIT_BITS;
val = pci_getw(bus, dev, func, PCI_BCNF_MEM_BASE);
base = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT);
VERIFY0(base >> 32);
break;
case RES_PMEM:
val = pci_getw(bus, dev, func, PCI_BCNF_PF_LIMIT_LOW);
limit = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT) |
PCI_BCNF_MEM_LIMIT_BITS;
val = pci_getw(bus, dev, func, PCI_BCNF_PF_BASE_LOW);
base = ((val & PCI_BCNF_MEM_MASK) << PCI_BCNF_MEM_SHIFT);
if ((val & PCI_BCNF_ADDR_MASK) == PCI_BCNF_PF_MEM_64BIT) {
val = pci_getl(bus, dev, func, PCI_BCNF_PF_BASE_HIGH);
base |= val << 32;
val = pci_getl(bus, dev, func, PCI_BCNF_PF_LIMIT_HIGH);
limit |= val << 32;
}
break;
default:
panic("Invalid resource type %d", type);
}
*basep = base;
*limitp = limit;
}
static void
fix_ppb_res(uchar_t secbus, boolean_t prog_sub)
{
uchar_t bus, dev, func;
uchar_t parbus, subbus;
struct {
uint64_t base;
uint64_t limit;
uint64_t size;
uint64_t align;
} io, mem, pmem;
uint64_t addr = 0;
int *regp = NULL;
uint_t reglen, buscount;
int rv, cap_ptr, physhi;
dev_info_t *dip;
uint16_t cmd_reg;
struct memlist *scratch_list;
boolean_t reprogram_io, reprogram_mem;
if (pci_bus_res[secbus].par_bus == (uchar_t)-1)
return;
if (pci_bus_res[secbus].subtractive && !prog_sub)
return;
dip = pci_bus_res[secbus].dip;
if (dip == NULL)
return;
rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
"reg", ®p, ®len);
if (rv != DDI_PROP_SUCCESS || reglen == 0)
return;
physhi = regp[0];
ddi_prop_free(regp);
func = (uchar_t)PCI_REG_FUNC_G(physhi);
dev = (uchar_t)PCI_REG_DEV_G(physhi);
bus = (uchar_t)PCI_REG_BUS_G(physhi);
dump_memlists("fix_ppb_res start bus", bus);
dump_memlists("fix_ppb_res start secbus", secbus);
cap_ptr = get_pci_cap(bus, dev, func, PCI_CAP_ID_PCI_E);
if (cap_ptr != -1) {
uint16_t reg = pci_getw(bus, dev, func,
(uint16_t)cap_ptr + PCIE_LINKCTL);
if ((reg & PCIE_LINKCTL_LINK_DISABLE) != 0) {
dcmn_err(CE_NOTE, MSGHDR "link is disabled",
"ppb", bus, dev, func);
return;
}
}
subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS);
parbus = pci_bus_res[secbus].par_bus;
ASSERT(parbus == bus);
cmd_reg = pci_getw(bus, dev, func, PCI_CONF_COMM);
if (pci_bus_res[secbus].num_cbb != 0 &&
pci_bus_res[secbus].bus_avail == NULL) {
uchar_t range;
range = pci_bus_res[secbus].num_cbb * 2;
for (; range != 0; range--) {
if (pci_memlist_find_with_startaddr(
&pci_bus_res[parbus].bus_avail,
subbus + 1, range, 1) != 0) {
break;
}
}
if (range != 0) {
pci_memlist_insert(&pci_bus_res[secbus].bus_avail,
subbus + 1, range);
subbus = subbus + range;
pci_bus_res[secbus].sub_bus = subbus;
pci_putb(bus, dev, func, PCI_BCNF_SUBBUS, subbus);
add_bus_range_prop(secbus);
cmn_err(CE_NOTE,
MSGHDR "PROGRAM cardbus buses 0x%x ~ 0x%x",
"cbb", bus, dev, func, secbus, subbus);
}
}
buscount = subbus - secbus + 1;
dcmn_err(CE_NOTE, MSGHDR
"secbus 0x%x existing sizes I/O 0x%x, MEM 0x%lx, PMEM 0x%lx",
"ppb", bus, dev, func, secbus,
pci_bus_res[secbus].io_size, pci_bus_res[secbus].mem_size,
pci_bus_res[secbus].pmem_size);
io.size = MAX(pci_bus_res[secbus].io_size, buscount * 0x200);
uint64_t avail = get_per_bridge_avail(bus);
mem.size = 0;
if (avail > 0) {
for (uint_t i = 32; i > 0; i >>= 1) {
if (avail >= buscount * PPB_MEM_ALIGNMENT * i) {
mem.size = buscount * PPB_MEM_ALIGNMENT * i;
dcmn_err(CE_NOTE, MSGHDR
"Allocating %uMiB",
"ppb", bus, dev, func, i);
break;
}
}
}
mem.size = MAX(pci_bus_res[secbus].mem_size, mem.size);
pmem.size = MAX(pci_bus_res[secbus].pmem_size,
buscount * PPB_MEM_ALIGNMENT * 32);
if (lookup_parbus_res(parbus, 1ULL << 32, PPB_MEM_ALIGNMENT,
RES_PMEM) > 0) {
pmem.size = MAX(pci_bus_res[secbus].pmem_size,
buscount * PPB_MEM_ALIGNMENT * 512);
}
io.size = P2ROUNDUP(io.size, PPB_IO_ALIGNMENT);
mem.size = P2ROUNDUP(mem.size, PPB_MEM_ALIGNMENT);
pmem.size = P2ROUNDUP(pmem.size, PPB_MEM_ALIGNMENT);
io.align = io.size;
P2LE(io.align);
mem.align = mem.size;
P2LE(mem.align);
pmem.align = pmem.size;
P2LE(pmem.align);
if (pci_bus_res[secbus].subtractive && prog_sub) {
if (pci_bus_res[secbus].io_avail == NULL) {
addr = get_parbus_res(parbus, secbus, io.size,
io.align, RES_IO);
if (addr != 0) {
add_ranges_prop(secbus, B_TRUE);
pci_bus_res[secbus].io_reprogram =
pci_bus_res[parbus].io_reprogram;
cmn_err(CE_NOTE,
MSGHDR "PROGRAM I/O range 0x%lx ~ 0x%lx "
"(subtractive bridge)",
"ppb", bus, dev, func,
addr, addr + io.size - 1);
}
}
if (pci_bus_res[secbus].mem_avail == NULL) {
addr = get_parbus_res(parbus, secbus, mem.size,
mem.align, RES_MEM);
if (addr != 0) {
add_ranges_prop(secbus, B_TRUE);
pci_bus_res[secbus].mem_reprogram =
pci_bus_res[parbus].mem_reprogram;
cmn_err(CE_NOTE,
MSGHDR "PROGRAM MEM range 0x%lx ~ 0x%lx "
"(subtractive bridge)",
"ppb", bus, dev, func,
addr, addr + mem.size - 1);
}
}
goto cmd_enable;
}
fetch_ppb_res(bus, dev, func, RES_IO, &io.base, &io.limit);
fetch_ppb_res(bus, dev, func, RES_MEM, &mem.base, &mem.limit);
fetch_ppb_res(bus, dev, func, RES_PMEM, &pmem.base, &pmem.limit);
scratch_list = pci_memlist_dup(pci_bus_res[secbus].io_avail);
pci_memlist_merge(&pci_bus_res[secbus].io_used, &scratch_list);
reprogram_io = !list_is_vga_only(scratch_list, RES_IO) &&
(pci_bus_res[parbus].io_reprogram ||
(cmd_reg & PCI_COMM_IO) == 0 ||
io.base > io.limit);
pci_memlist_free_all(&scratch_list);
if (reprogram_io) {
if (pci_bus_res[secbus].io_used != NULL) {
pci_memlist_subsume(&pci_bus_res[secbus].io_used,
&pci_bus_res[secbus].io_avail);
}
if (pci_bus_res[secbus].io_avail != NULL &&
!pci_bus_res[parbus].io_reprogram &&
!pci_bus_res[parbus].subtractive) {
uint64_t base, limit;
pci_memlist_range(pci_bus_res[secbus].io_avail,
RES_IO, &base, &limit);
io.base = (uint_t)base;
io.limit = (uint_t)limit;
io.base = P2ALIGN(base, PPB_IO_ALIGNMENT);
io.limit = P2ROUNDUP(io.limit, PPB_IO_ALIGNMENT) - 1;
io.size = io.limit - io.base + 1;
ASSERT3U(io.base, <=, io.limit);
pci_memlist_free_all(&pci_bus_res[secbus].io_avail);
pci_memlist_insert(&pci_bus_res[secbus].io_avail,
io.base, io.size);
pci_memlist_insert(&pci_bus_res[parbus].io_used,
io.base, io.size);
(void) pci_memlist_remove(&pci_bus_res[parbus].io_avail,
io.base, io.size);
pci_bus_res[secbus].io_reprogram = B_TRUE;
} else {
addr = get_parbus_res(parbus, secbus, io.size,
io.align, RES_IO);
if (addr != 0) {
io.base = addr;
io.limit = addr + io.size - 1;
pci_bus_res[secbus].io_reprogram = B_TRUE;
}
}
if (pci_bus_res[secbus].io_reprogram) {
set_ppb_res(bus, dev, func, RES_IO, io.base, io.limit);
add_ranges_prop(secbus, B_TRUE);
}
}
scratch_list = pci_memlist_dup(pci_bus_res[secbus].mem_avail);
pci_memlist_merge(&pci_bus_res[secbus].mem_used, &scratch_list);
reprogram_mem = !list_is_vga_only(scratch_list, RES_MEM) &&
(pci_bus_res[parbus].mem_reprogram ||
(cmd_reg & PCI_COMM_MAE) == 0 ||
(mem.base > mem.limit && pmem.base > pmem.limit));
pci_memlist_free_all(&scratch_list);
if (reprogram_mem) {
if (pci_bus_res[secbus].mem_used != NULL) {
pci_memlist_subsume(&pci_bus_res[secbus].mem_used,
&pci_bus_res[secbus].mem_avail);
}
if (pci_bus_res[secbus].mem_avail != NULL &&
!pci_bus_res[parbus].mem_reprogram &&
!pci_bus_res[parbus].subtractive) {
pci_memlist_range(pci_bus_res[secbus].mem_avail,
RES_MEM, &mem.base, &mem.limit);
mem.base = P2ALIGN(mem.base, PPB_MEM_ALIGNMENT);
mem.limit = P2ROUNDUP(mem.limit, PPB_MEM_ALIGNMENT) - 1;
mem.size = mem.limit + 1 - mem.base;
ASSERT3U(mem.base, <=, mem.limit);
pci_memlist_free_all(&pci_bus_res[secbus].mem_avail);
pci_memlist_insert(&pci_bus_res[secbus].mem_avail,
mem.base, mem.size);
pci_memlist_insert(&pci_bus_res[parbus].mem_used,
mem.base, mem.size);
(void) pci_memlist_remove(
&pci_bus_res[parbus].mem_avail, mem.base,
mem.size);
pci_bus_res[secbus].mem_reprogram = B_TRUE;
} else {
addr = get_parbus_res(parbus, secbus, mem.size,
mem.align, RES_MEM);
if (addr != 0) {
mem.base = addr;
mem.limit = addr + mem.size - 1;
pci_bus_res[secbus].mem_reprogram = B_TRUE;
}
}
if (pci_bus_res[secbus].pmem_used != NULL) {
pci_memlist_subsume(&pci_bus_res[secbus].pmem_used,
&pci_bus_res[secbus].pmem_avail);
}
if (pci_bus_res[secbus].pmem_avail != NULL &&
!pci_bus_res[parbus].mem_reprogram &&
!pci_bus_res[parbus].subtractive) {
pci_memlist_range(pci_bus_res[secbus].pmem_avail,
RES_PMEM, &pmem.base, &pmem.limit);
pmem.base = P2ALIGN(pmem.base, PPB_MEM_ALIGNMENT);
pmem.limit = P2ROUNDUP(pmem.limit, PPB_MEM_ALIGNMENT)
- 1;
pmem.size = pmem.limit + 1 - pmem.base;
ASSERT3U(pmem.base, <=, pmem.limit);
pci_memlist_free_all(&pci_bus_res[secbus].pmem_avail);
pci_memlist_insert(&pci_bus_res[secbus].pmem_avail,
pmem.base, pmem.size);
pci_memlist_insert(&pci_bus_res[parbus].pmem_used,
pmem.base, pmem.size);
(void) pci_memlist_remove(
&pci_bus_res[parbus].pmem_avail, pmem.base,
pmem.size);
pci_bus_res[secbus].mem_reprogram = B_TRUE;
} else {
addr = get_parbus_res(parbus, secbus, pmem.size,
pmem.align, RES_PMEM);
if (addr != 0) {
pmem.base = addr;
pmem.limit = addr + pmem.size - 1;
pci_bus_res[secbus].mem_reprogram = B_TRUE;
}
}
if (pci_bus_res[secbus].mem_reprogram) {
set_ppb_res(bus, dev, func,
RES_MEM, mem.base, mem.limit);
set_ppb_res(bus, dev, func,
RES_PMEM, pmem.base, pmem.limit);
add_ranges_prop(secbus, B_TRUE);
}
}
cmd_enable:
dump_memlists("fix_ppb_res end bus", bus);
dump_memlists("fix_ppb_res end secbus", secbus);
if (pci_bus_res[secbus].io_avail != NULL)
cmd_reg |= PCI_COMM_IO | PCI_COMM_ME;
if (pci_bus_res[secbus].mem_avail != NULL ||
pci_bus_res[secbus].pmem_avail != NULL) {
cmd_reg |= PCI_COMM_MAE | PCI_COMM_ME;
}
pci_putw(bus, dev, func, PCI_CONF_COMM, cmd_reg);
}
void
pci_reprogram(void)
{
int i, pci_reconfig = 1;
char *onoff;
int bus;
pci_prd_root_complex_iter(pci_rc_scan_cb, NULL);
for (bus = 0; bus <= pci_boot_maxbus; bus++) {
pci_prd_slot_name(bus, pci_bus_res[bus].dip);
}
pci_unitaddr_cache_init();
if (pci_unitaddr_cache_valid()) {
int pci_regs[] = {0, 0, 0};
int new_addr;
int index = 0;
for (bus = 0; bus <= pci_boot_maxbus; bus++) {
if ((pci_bus_res[bus].par_bus != (uchar_t)-1) ||
(pci_bus_res[bus].dip == NULL))
continue;
new_addr = pci_bus_unitaddr(index);
if (pci_bus_res[bus].root_addr != new_addr) {
pci_regs[0] = pci_bus_res[bus].root_addr =
new_addr;
(void) ndi_prop_update_int_array(
DDI_DEV_T_NONE, pci_bus_res[bus].dip,
"reg", (int *)pci_regs, 3);
}
index++;
}
} else {
pci_unitaddr_cache_create();
}
for (bus = 0; bus <= pci_boot_maxbus; bus++) {
if (pci_bus_res[bus].par_bus != (uchar_t)-1)
continue;
populate_bus_res(bus);
(void) pci_memlist_remove(&pci_bus_res[bus].mem_avail,
0, 0x100000);
(void) pci_memlist_remove(&pci_bus_res[bus].pmem_avail,
0, 0x100000);
if (pci_bus_res[bus].num_bridge > 0) {
uint64_t mem = 0;
for (struct memlist *ml = pci_bus_res[bus].mem_avail;
ml != NULL; ml = ml->ml_next) {
if (ml->ml_address < UINT32_MAX)
mem += ml->ml_size;
}
if (mem > pci_bus_res[bus].mem_size)
mem -= pci_bus_res[bus].mem_size;
else
mem = 0;
pci_bus_res[bus].mem_buffer = mem;
dcmn_err(CE_NOTE,
"Bus 0x%02x, bridges 0x%x, buffer mem 0x%lx",
bus, pci_bus_res[bus].num_bridge, mem);
}
pci_memlist_remove_list(&pci_bus_res[bus].io_avail,
pci_bus_res[bus].io_used);
pci_memlist_remove_list(&pci_bus_res[bus].mem_avail,
pci_bus_res[bus].mem_used);
pci_memlist_remove_list(&pci_bus_res[bus].pmem_avail,
pci_bus_res[bus].pmem_used);
pci_memlist_remove_list(&pci_bus_res[bus].mem_avail,
pci_bus_res[bus].pmem_used);
pci_memlist_remove_list(&pci_bus_res[bus].pmem_avail,
pci_bus_res[bus].mem_used);
pci_memlist_remove_list(&pci_bus_res[bus].io_avail,
isa_res.io_used);
pci_memlist_remove_list(&pci_bus_res[bus].mem_avail,
isa_res.mem_used);
}
pci_memlist_free_all(&isa_res.io_used);
pci_memlist_free_all(&isa_res.mem_used);
for (i = 0; i <= pci_boot_maxbus; i++) {
if (pci_bus_res[i].par_bus == (uchar_t)-1)
add_bus_range_prop(i);
setup_bus_res(i);
}
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
DDI_PROP_DONTPASS, "pci-reprog", &onoff) == DDI_SUCCESS) {
if (strcmp(onoff, "off") == 0) {
pci_reconfig = 0;
cmn_err(CE_NOTE, "pci device reprogramming disabled");
}
ddi_prop_free(onoff);
}
remove_subtractive_res();
if (pci_reconfig)
for (i = 0; i <= pci_boot_maxbus; i++)
fix_ppb_res(i, B_FALSE);
for (i = 0; i <= pci_boot_maxbus; i++) {
if (pci_reconfig) {
if (pci_bus_res[i].subtractive)
fix_ppb_res(i, B_TRUE);
enumerate_bus_devs(i, CONFIG_NEW);
}
}
for (i = 0; i <= pci_boot_maxbus; i++)
add_bus_available_prop(i);
}
static void
populate_bus_res(uchar_t bus)
{
pci_bus_res[bus].pmem_avail = pci_prd_find_resource(bus,
PCI_PRD_R_PREFETCH);
pci_bus_res[bus].mem_avail = pci_prd_find_resource(bus, PCI_PRD_R_MMIO);
pci_bus_res[bus].io_avail = pci_prd_find_resource(bus, PCI_PRD_R_IO);
pci_bus_res[bus].bus_avail = pci_prd_find_resource(bus, PCI_PRD_R_BUS);
dump_memlists("populate_bus_res", bus);
if (pci_bus_res[bus].bus_avail != NULL) {
struct memlist *entry;
int current;
entry = pci_bus_res[bus].bus_avail;
while (entry != NULL) {
current = entry->ml_address + entry->ml_size - 1;
if (current > pci_bus_res[bus].sub_bus)
pci_bus_res[bus].sub_bus = current;
entry = entry->ml_next;
}
}
if (bus == 0) {
if (pci_bus_res[0].mem_avail == NULL) {
pci_bus_res[0].mem_avail =
pci_memlist_dup(bootops->boot_mem->pcimem);
}
if (pci_bus_res[0].io_avail == NULL) {
pci_memlist_insert(&pci_bus_res[0].io_avail, 0x100,
0xffff);
}
}
add_ranges_prop(bus, B_FALSE);
}
static void
create_root_bus_dip(uchar_t bus)
{
int pci_regs[] = {0, 0, 0};
dev_info_t *dip;
ASSERT(pci_bus_res[bus].par_bus == (uchar_t)-1);
num_root_bus++;
ndi_devi_alloc_sleep(ddi_root_node(), "pci",
(pnode_t)DEVI_SID_NODEID, &dip);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#address-cells", 3);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#size-cells", 2);
pci_regs[0] = pci_bus_res[bus].root_addr;
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
"reg", (int *)pci_regs, 3);
if (create_pcie_root_bus(bus, dip) == B_FALSE)
(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
"device_type", "pci");
(void) ndi_devi_bind_driver(dip, 0);
pci_bus_res[bus].dip = dip;
}
void
enumerate_bus_devs(uchar_t bus, int config_op)
{
uchar_t dev, func, nfunc, header;
ushort_t venid;
struct pci_devfunc *devlist = NULL, *entry;
if (bus_debug(bus)) {
if (config_op == CONFIG_NEW) {
dcmn_err(CE_NOTE, "configuring pci bus 0x%x", bus);
} else if (config_op == CONFIG_FIX) {
dcmn_err(CE_NOTE,
"fixing devices on pci bus 0x%x", bus);
} else {
dcmn_err(CE_NOTE, "enumerating pci bus 0x%x", bus);
}
}
if (config_op == CONFIG_NEW) {
devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata;
while (devlist) {
entry = devlist;
devlist = entry->next;
if (entry->reprogram ||
pci_bus_res[bus].io_reprogram ||
pci_bus_res[bus].mem_reprogram) {
(void) add_reg_props(entry->dip, bus,
entry->dev, entry->func, CONFIG_NEW, 0);
}
kmem_free(entry, sizeof (*entry));
}
pci_bus_res[bus].privdata = NULL;
return;
}
for (dev = 0; dev < max_dev_pci; dev++) {
nfunc = 1;
for (func = 0; func < nfunc; func++) {
venid = pci_getw(bus, dev, func, PCI_CONF_VENID);
if ((venid == 0xffff) || (venid == 0)) {
continue;
}
header = pci_getb(bus, dev, func, PCI_CONF_HEADER);
if (header == 0xff) {
continue;
}
if ((func == 0) && (header & PCI_HEADER_MULTI)) {
nfunc = 8;
}
if (config_op == CONFIG_FIX ||
config_op == CONFIG_INFO) {
process_devfunc(bus, dev, func, config_op);
}
}
}
if (config_op == CONFIG_INFO) {
int par_bus;
par_bus = pci_bus_res[bus].par_bus;
while (par_bus != (uchar_t)-1) {
pci_bus_res[par_bus].io_size +=
pci_bus_res[bus].io_size;
pci_bus_res[par_bus].mem_size +=
pci_bus_res[bus].mem_size;
pci_bus_res[par_bus].pmem_size +=
pci_bus_res[bus].pmem_size;
if (pci_bus_res[bus].io_used != NULL) {
pci_memlist_merge(&pci_bus_res[bus].io_used,
&pci_bus_res[par_bus].io_used);
}
if (pci_bus_res[bus].mem_used != NULL) {
pci_memlist_merge(&pci_bus_res[bus].mem_used,
&pci_bus_res[par_bus].mem_used);
}
if (pci_bus_res[bus].pmem_used != NULL) {
pci_memlist_merge(&pci_bus_res[bus].pmem_used,
&pci_bus_res[par_bus].pmem_used);
}
pci_bus_res[par_bus].num_bridge +=
pci_bus_res[bus].num_bridge;
bus = par_bus;
par_bus = pci_bus_res[par_bus].par_bus;
}
}
}
static boolean_t
check_pciide_prop(uchar_t revid, ushort_t venid, ushort_t devid,
ushort_t subvenid, ushort_t subdevid)
{
static int prop_exist = -1;
static char *pciide_str;
char compat[32];
if (prop_exist == -1) {
prop_exist = (ddi_prop_lookup_string(DDI_DEV_T_ANY,
ddi_root_node(), DDI_PROP_DONTPASS, "pci-ide",
&pciide_str) == DDI_SUCCESS);
}
if (!prop_exist)
return (B_FALSE);
if (subvenid) {
(void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x.%x",
venid, devid, subvenid, subdevid, revid);
if (strcmp(pciide_str, compat) == 0)
return (B_TRUE);
(void) snprintf(compat, sizeof (compat), "pci%x,%x.%x.%x",
venid, devid, subvenid, subdevid);
if (strcmp(pciide_str, compat) == 0)
return (B_TRUE);
(void) snprintf(compat, sizeof (compat), "pci%x,%x",
subvenid, subdevid);
if (strcmp(pciide_str, compat) == 0)
return (B_TRUE);
}
(void) snprintf(compat, sizeof (compat), "pci%x,%x.%x",
venid, devid, revid);
if (strcmp(pciide_str, compat) == 0)
return (B_TRUE);
(void) snprintf(compat, sizeof (compat), "pci%x,%x", venid, devid);
if (strcmp(pciide_str, compat) == 0)
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
is_pciide(const pci_prop_data_t *prop)
{
struct ide_table {
ushort_t venid;
ushort_t devid;
};
static struct ide_table ide_other[] = {
{0x1095, 0x3112},
{0x1095, 0x3114},
{0x1095, 0x3512},
{0x1095, 0x680},
{0x1283, 0x8211}
};
if (prop->ppd_class != PCI_CLASS_MASS)
return (B_FALSE);
if (prop->ppd_subclass == PCI_MASS_IDE) {
return (B_TRUE);
}
if (check_pciide_prop(prop->ppd_rev, prop->ppd_vendid,
prop->ppd_devid, prop->ppd_subvid, prop->ppd_subsys)) {
return (B_TRUE);
}
if (prop->ppd_subclass != PCI_MASS_OTHER &&
prop->ppd_subclass != PCI_MASS_SATA) {
return (B_FALSE);
}
for (size_t i = 0; i < ARRAY_SIZE(ide_other); i++) {
if (ide_other[i].venid == prop->ppd_vendid &&
ide_other[i].devid == prop->ppd_devid)
return (B_TRUE);
}
return (B_FALSE);
}
static void
add_undofix_entry(uint8_t bus, uint8_t dev, uint8_t fn,
void (*undofn)(uint8_t, uint8_t, uint8_t))
{
struct pci_fixundo *newundo;
newundo = kmem_alloc(sizeof (struct pci_fixundo), KM_SLEEP);
newundo->bus = bus;
newundo->dev = dev;
newundo->fn = fn;
newundo->undofn = undofn;
newundo->next = undolist;
undolist = newundo;
}
void
add_pci_fixes(void)
{
int i;
for (i = 0; i <= pci_boot_maxbus; i++) {
enumerate_bus_devs(i, CONFIG_FIX);
}
}
void
undo_pci_fixes(void)
{
struct pci_fixundo *nextundo;
uint8_t bus, dev, fn;
while (undolist != NULL) {
bus = undolist->bus;
dev = undolist->dev;
fn = undolist->fn;
(*(undolist->undofn))(bus, dev, fn);
nextundo = undolist->next;
kmem_free(undolist, sizeof (struct pci_fixundo));
undolist = nextundo;
}
}
static void
undo_amd8111_pci_fix(uint8_t bus, uint8_t dev, uint8_t fn)
{
uint8_t val8;
val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1);
val8 |= AMD8111_ENABLENMI;
pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8);
}
static void
pci_fix_amd8111(uint8_t bus, uint8_t dev, uint8_t fn)
{
uint8_t val8;
val8 = pci_getb(bus, dev, fn, LPC_IO_CONTROL_REG_1);
if ((val8 & AMD8111_ENABLENMI) == 0)
return;
val8 &= ~AMD8111_ENABLENMI;
pci_putb(bus, dev, fn, LPC_IO_CONTROL_REG_1, val8);
add_undofix_entry(bus, dev, fn, undo_amd8111_pci_fix);
}
static void
set_devpm_d0(uchar_t bus, uchar_t dev, uchar_t func)
{
uint16_t status;
uint8_t header;
uint8_t cap_ptr;
uint8_t cap_id;
uint16_t pmcsr;
status = pci_getw(bus, dev, func, PCI_CONF_STAT);
if (!(status & PCI_STAT_CAP))
return;
header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M;
if (header == PCI_HEADER_CARDBUS)
cap_ptr = pci_getb(bus, dev, func, PCI_CBUS_CAP_PTR);
else
cap_ptr = pci_getb(bus, dev, func, PCI_CONF_CAP_PTR);
while (cap_ptr != PCI_CAP_NEXT_PTR_NULL && cap_ptr >= PCI_CAP_PTR_OFF) {
cap_ptr &= PCI_CAP_PTR_MASK;
cap_id = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_ID);
if (cap_id == PCI_CAP_ID_PM) {
pmcsr = pci_getw(bus, dev, func, cap_ptr + PCI_PMCSR);
pmcsr &= ~(PCI_PMCSR_STATE_MASK);
pmcsr |= PCI_PMCSR_D0;
pci_putw(bus, dev, func, cap_ptr + PCI_PMCSR, pmcsr);
break;
}
cap_ptr = pci_getb(bus, dev, func, cap_ptr + PCI_CAP_NEXT_PTR);
}
}
static void
process_devfunc(uchar_t bus, uchar_t dev, uchar_t func, int config_op)
{
pci_prop_data_t prop_data;
pci_prop_failure_t prop_ret;
dev_info_t *dip;
boolean_t reprogram = B_FALSE;
boolean_t pciide = B_FALSE;
int power[2] = {1, 1};
struct pci_devfunc *devlist = NULL, *entry = NULL;
gfx_entry_t *gfxp;
pcie_req_id_t bdf;
prop_ret = pci_prop_data_fill(NULL, bus, dev, func, &prop_data);
if (prop_ret != PCI_PROP_OK) {
cmn_err(CE_WARN, MSGHDR "failed to get basic PCI data: 0x%x",
"pci", bus, dev, func, prop_ret);
return;
}
if (prop_data.ppd_header == PCI_HEADER_CARDBUS &&
config_op == CONFIG_INFO) {
pci_bus_res[bus].num_cbb++;
}
if (config_op == CONFIG_FIX) {
if (prop_data.ppd_vendid == VENID_AMD &&
prop_data.ppd_devid == DEVID_AMD8111_LPC) {
pci_fix_amd8111(bus, dev, func);
}
return;
}
if (pci_bus_res[bus].dip == NULL)
create_root_bus_dip(bus);
ndi_devi_alloc_sleep(pci_bus_res[bus].dip, DEVI_PSEUDO_NEXNAME,
DEVI_SID_NODEID, &dip);
prop_ret = pci_prop_name_node(dip, &prop_data);
if (prop_ret != PCI_PROP_OK) {
cmn_err(CE_WARN, MSGHDR "failed to set node name: 0x%x; "
"devinfo node not created", "pci", bus, dev, func,
prop_ret);
(void) ndi_devi_free(dip);
return;
}
bdf = PCI_GETBDF(bus, dev, func);
if (IS_BAD_AMD_NTBRIDGE(prop_data.ppd_vendid, prop_data.ppd_devid) ||
IS_AMD_8132_CHIP(prop_data.ppd_vendid, prop_data.ppd_devid)) {
uchar_t secbus = 0;
uchar_t subbus = 0;
if (pci_prop_class_is_pcibridge(&prop_data)) {
secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS);
subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS);
}
pci_cfgacc_add_workaround(bdf, secbus, subbus);
}
if (pcie_get_rc_dip(dip) != NULL) {
ck804_fix_aer_ptr(dip, bdf);
(void) pcie_init_bus(dip, bdf, PCIE_BUS_INITIAL);
}
prop_ret = pci_prop_set_common_props(dip, &prop_data);
if (prop_ret != PCI_PROP_OK) {
cmn_err(CE_WARN, MSGHDR "failed to set properties: 0x%x; "
"devinfo node not created", "pci", bus, dev, func,
prop_ret);
if (pcie_get_rc_dip(dip) != NULL) {
pcie_fini_bus(dip, PCIE_BUS_FINAL);
}
(void) ndi_devi_free(dip);
return;
}
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
"power-consumption", power, 2);
set_devpm_d0(bus, dev, func);
if (pci_prop_class_is_pcibridge(&prop_data)) {
boolean_t pciex = (prop_data.ppd_flags & PCI_PROP_F_PCIE) != 0;
boolean_t is_pci_bridge = pciex &&
prop_data.ppd_pcie_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI;
add_ppb_props(dip, bus, dev, func, pciex, is_pci_bridge);
} else {
devlist = (struct pci_devfunc *)pci_bus_res[bus].privdata;
entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
entry->dip = dip;
entry->dev = dev;
entry->func = func;
entry->next = devlist;
pci_bus_res[bus].privdata = entry;
}
if (pci_prop_class_is_ioapic(&prop_data)) {
create_ioapic_node(bus, dev, func, prop_data.ppd_vendid,
prop_data.ppd_devid);
}
if (NVIDIA_IS_LPC_BRIDGE(prop_data.ppd_vendid, prop_data.ppd_devid) &&
dev == 1 && func == 0) {
add_nvidia_isa_bridge_props(dip, bus, dev, func);
apic_nvidia_io_max++;
}
prop_ret = pci_prop_set_compatible(dip, &prop_data);
if (prop_ret != PCI_PROP_OK) {
cmn_err(CE_WARN, MSGHDR "failed to set compatible property: "
"0x%x; device may not bind to a driver", "pci", bus, dev,
func, prop_ret);
}
if (is_pciide(&prop_data)) {
if (ddi_compatible_driver_major(dip, NULL) == (major_t)-1) {
(void) ndi_devi_set_nodename(dip, "pci-ide", 0);
pciide = B_TRUE;
}
}
DEVI_SET_PCI(dip);
reprogram = add_reg_props(dip, bus, dev, func, config_op, pciide);
(void) ndi_devi_bind_driver(dip, 0);
if (pciide) {
dev_info_t *cdip;
(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
"device_type", "pci-ide");
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#address-cells", 1);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#size-cells", 0);
ndi_devi_alloc_sleep(dip, "ide",
(pnode_t)DEVI_SID_NODEID, &cdip);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
"reg", 0);
(void) ndi_devi_bind_driver(cdip, 0);
ndi_devi_alloc_sleep(dip, "ide",
(pnode_t)DEVI_SID_NODEID, &cdip);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
"reg", 1);
(void) ndi_devi_bind_driver(cdip, 0);
reprogram = B_FALSE;
}
if (pci_prop_class_is_vga(&prop_data)) {
gfxp = kmem_zalloc(sizeof (*gfxp), KM_SLEEP);
gfxp->g_dip = dip;
gfxp->g_prev = NULL;
gfxp->g_next = gfx_devinfo_list;
gfx_devinfo_list = gfxp;
if (gfxp->g_next)
gfxp->g_next->g_prev = gfxp;
}
if (reprogram && (entry != NULL))
entry->reprogram = B_TRUE;
}
static struct {
uchar_t native_mask;
uchar_t bar_offset;
ushort_t addr;
ushort_t length;
} pciide_bar[] = {
{ 0x01, 0, 0x1f0, 8 },
{ 0x01, 2, 0x3f6, 1 },
{ 0x04, 0, 0x170, 8 },
{ 0x04, 2, 0x376, 1 }
};
static boolean_t
pciide_adjust_bar(uchar_t progcl, uint_t bar, uint_t *basep, uint_t *lenp)
{
boolean_t hard_decode = B_FALSE;
if (bar <= 3) {
*lenp = pciide_bar[bar].length;
if (progcl & pciide_bar[bar].native_mask) {
*basep += pciide_bar[bar].bar_offset;
} else {
*basep = pciide_bar[bar].addr;
hard_decode = B_TRUE;
}
}
if (*basep == 0 || *lenp == 0) {
*basep = 0;
*lenp = 0;
hard_decode = B_FALSE;
}
return (hard_decode);
}
static int
add_bar_reg_props(int op, uchar_t bus, uchar_t dev, uchar_t func, uint_t bar,
ushort_t offset, pci_regspec_t *regs, pci_regspec_t *assigned,
ushort_t *bar_sz, boolean_t pciide)
{
uint8_t baseclass, subclass, progclass;
uint32_t base, devloc;
uint16_t command = 0;
int reprogram = 0;
uint64_t value;
devloc = PCI_REG_MAKE_BDFR(bus, dev, func, 0);
baseclass = pci_getb(bus, dev, func, PCI_CONF_BASCLASS);
subclass = pci_getb(bus, dev, func, PCI_CONF_SUBCLASS);
progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS);
base = pci_getl(bus, dev, func, offset);
if (baseclass != PCI_CLASS_BRIDGE) {
command = (uint_t)pci_getw(bus, dev, func, PCI_CONF_COMM);
pci_putw(bus, dev, func, PCI_CONF_COMM,
command & ~(PCI_COMM_MAE | PCI_COMM_IO));
}
pci_putl(bus, dev, func, offset, 0xffffffff);
value = pci_getl(bus, dev, func, offset);
pci_putl(bus, dev, func, offset, base);
if (baseclass != PCI_CLASS_BRIDGE)
pci_putw(bus, dev, func, PCI_CONF_COMM, command);
if ((pciide && bar < 4) || (base & PCI_BASE_SPACE_IO) != 0) {
struct memlist **io_avail = &pci_bus_res[bus].io_avail;
struct memlist **io_used = &pci_bus_res[bus].io_used;
boolean_t hard_decode = B_FALSE;
uint_t type, len;
*bar_sz = PCI_BAR_SZ_32;
value &= PCI_BASE_IO_ADDR_M;
len = BARMASKTOLEN(value);
if (pciide) {
if (subclass != PCI_MASS_IDE) {
progclass = (PCI_IDE_IF_NATIVE_PRI |
PCI_IDE_IF_NATIVE_SEC);
}
hard_decode = pciide_adjust_bar(progclass, bar,
&base, &len);
} else if (value == 0) {
return (-1);
}
regs->pci_phys_hi = PCI_ADDR_IO | devloc;
if (hard_decode) {
regs->pci_phys_hi |= PCI_RELOCAT_B;
regs->pci_phys_low = base & PCI_BASE_IO_ADDR_M;
} else {
regs->pci_phys_hi |= offset;
regs->pci_phys_low = 0;
}
assigned->pci_phys_hi = PCI_RELOCAT_B | regs->pci_phys_hi;
regs->pci_size_low = assigned->pci_size_low = len;
type = base & ~PCI_BASE_IO_ADDR_M;
base &= PCI_BASE_IO_ADDR_M;
if (op == CONFIG_NEW && pci_bus_res[bus].subtractive &&
*io_avail == NULL) {
uchar_t res_bus;
res_bus = resolve_alloc_bus(bus, RES_IO);
io_avail = &pci_bus_res[res_bus].io_avail;
}
if (op == CONFIG_INFO) {
if (base != 0) {
(void) pci_memlist_remove(io_avail, base, len);
pci_memlist_insert(io_used, base, len);
} else {
reprogram = 1;
}
dcmn_err(CE_NOTE,
MSGHDR "BAR%u I/O FWINIT 0x%x ~ 0x%x",
"pci", bus, dev, func, bar, base, len);
pci_bus_res[bus].io_size += len;
} else if ((*io_avail != NULL && base == 0) ||
pci_bus_res[bus].io_reprogram) {
base = pci_memlist_find(io_avail, len, len);
if (base == 0) {
cmn_err(CE_WARN, MSGHDR "BAR%u I/O "
"failed to find length 0x%x",
"pci", bus, dev, func, bar, len);
} else {
uint32_t nbase;
cmn_err(CE_NOTE, "!" MSGHDR "BAR%u "
"I/O REPROG 0x%x ~ 0x%x",
"pci", bus, dev, func,
bar, base, len);
pci_putl(bus, dev, func, offset, base | type);
nbase = pci_getl(bus, dev, func, offset);
nbase &= PCI_BASE_IO_ADDR_M;
if (base != nbase) {
cmn_err(CE_NOTE, "!" MSGHDR "BAR%u "
"I/O REPROG 0x%x ~ 0x%x "
"FAILED READBACK 0x%x",
"pci", bus, dev, func,
bar, base, len, nbase);
pci_putl(bus, dev, func, offset, 0);
if (baseclass != PCI_CLASS_BRIDGE) {
command = pci_getw(bus, dev,
func, PCI_CONF_COMM);
command &= ~PCI_COMM_IO;
pci_putw(bus, dev, func,
PCI_CONF_COMM, command);
}
pci_memlist_insert(io_avail, base, len);
base = 0;
} else {
pci_memlist_insert(io_used, base, len);
}
}
}
assigned->pci_phys_low = base;
} else {
struct memlist **mem_avail = &pci_bus_res[bus].mem_avail;
struct memlist **mem_used = &pci_bus_res[bus].mem_used;
struct memlist **pmem_avail = &pci_bus_res[bus].pmem_avail;
struct memlist **pmem_used = &pci_bus_res[bus].pmem_used;
uint_t type, base_hi, phys_hi;
uint64_t len, fbase;
if ((base & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL) {
*bar_sz = PCI_BAR_SZ_64;
base_hi = pci_getl(bus, dev, func, offset + 4);
pci_putl(bus, dev, func, offset + 4,
0xffffffff);
value |= (uint64_t)pci_getl(bus, dev, func,
offset + 4) << 32;
pci_putl(bus, dev, func, offset + 4, base_hi);
phys_hi = PCI_ADDR_MEM64;
value &= PCI_BASE_M_ADDR64_M;
} else {
*bar_sz = PCI_BAR_SZ_32;
base_hi = 0;
phys_hi = PCI_ADDR_MEM32;
value &= PCI_BASE_M_ADDR_M;
}
if (value == 0)
return (-1);
len = BARMASKTOLEN(value);
regs->pci_size_low = assigned->pci_size_low = len & 0xffffffff;
regs->pci_size_hi = assigned->pci_size_hi = len >> 32;
phys_hi |= devloc | offset;
if (base & PCI_BASE_PREF_M)
phys_hi |= PCI_PREFETCH_B;
if (op == CONFIG_NEW && pci_bus_res[bus].subtractive) {
uchar_t res_bus = bus;
if ((phys_hi & PCI_PREFETCH_B) != 0 &&
*pmem_avail == NULL) {
res_bus = resolve_alloc_bus(bus, RES_PMEM);
pmem_avail = &pci_bus_res[res_bus].pmem_avail;
mem_avail = &pci_bus_res[res_bus].mem_avail;
} else if (*mem_avail == NULL) {
res_bus = resolve_alloc_bus(bus, RES_MEM);
pmem_avail = &pci_bus_res[res_bus].pmem_avail;
mem_avail = &pci_bus_res[res_bus].mem_avail;
}
}
regs->pci_phys_hi = assigned->pci_phys_hi = phys_hi;
assigned->pci_phys_hi |= PCI_RELOCAT_B;
type = base & ~PCI_BASE_M_ADDR_M;
base &= PCI_BASE_M_ADDR_M;
fbase = (((uint64_t)base_hi) << 32) | base;
if (op == CONFIG_INFO) {
dcmn_err(CE_NOTE,
MSGHDR "BAR%u %sMEM FWINIT 0x%lx ~ 0x%lx%s",
"pci", bus, dev, func, bar,
(phys_hi & PCI_PREFETCH_B) ? "P" : " ",
fbase, len,
*bar_sz == PCI_BAR_SZ_64 ? " (64-bit)" : "");
if (fbase != 0) {
(void) pci_memlist_remove(mem_avail, fbase,
len);
(void) pci_memlist_remove(pmem_avail, fbase,
len);
if ((phys_hi & PCI_PREFETCH_B) != 0) {
pci_memlist_insert(pmem_used, fbase,
len);
} else {
pci_memlist_insert(mem_used, fbase,
len);
}
} else {
reprogram = 1;
len *= 2;
}
if (phys_hi & PCI_PREFETCH_B)
pci_bus_res[bus].pmem_size += len;
else
pci_bus_res[bus].mem_size += len;
} else if (pci_bus_res[bus].mem_reprogram || (fbase == 0 &&
(*mem_avail != NULL || *pmem_avail != NULL))) {
boolean_t pf = B_FALSE;
fbase = 0;
if ((phys_hi & PCI_PREFETCH_B) != 0 &&
*pmem_avail != NULL) {
fbase = pci_memlist_find(pmem_avail, len, len);
if (fbase != 0)
pf = B_TRUE;
}
if (fbase == 0 && *mem_avail != NULL)
fbase = pci_memlist_find(mem_avail, len, len);
base_hi = fbase >> 32;
base = fbase & 0xffffffff;
if (fbase == 0) {
cmn_err(CE_WARN, MSGHDR "BAR%u MEM "
"failed to find length 0x%lx",
"pci", bus, dev, func, bar, len);
} else {
uint64_t nbase, nbase_hi = 0;
cmn_err(CE_NOTE, "!" MSGHDR "BAR%u "
"%s%s REPROG 0x%lx ~ 0x%lx",
"pci", bus, dev, func, bar,
pf ? "PMEM" : "MEM",
*bar_sz == PCI_BAR_SZ_64 ? "64" : "",
fbase, len);
pci_putl(bus, dev, func, offset, base | type);
nbase = pci_getl(bus, dev, func, offset);
if (*bar_sz == PCI_BAR_SZ_64) {
pci_putl(bus, dev, func,
offset + 4, base_hi);
nbase_hi = pci_getl(bus, dev, func,
offset + 4);
}
nbase &= PCI_BASE_M_ADDR_M;
if (base != nbase || base_hi != nbase_hi) {
cmn_err(CE_NOTE, "!" MSGHDR "BAR%u "
"%s%s REPROG 0x%lx ~ 0x%lx "
"FAILED READBACK 0x%lx",
"pci", bus, dev, func, bar,
pf ? "PMEM" : "MEM",
*bar_sz == PCI_BAR_SZ_64 ?
"64" : "",
fbase, len,
nbase_hi << 32 | nbase);
pci_putl(bus, dev, func, offset, 0);
if (*bar_sz == PCI_BAR_SZ_64) {
pci_putl(bus, dev, func,
offset + 4, 0);
}
if (baseclass != PCI_CLASS_BRIDGE) {
command = pci_getw(bus, dev,
func, PCI_CONF_COMM);
command &= ~PCI_COMM_MAE;
pci_putw(bus, dev, func,
PCI_CONF_COMM, command);
}
pci_memlist_insert(
pf ? pmem_avail : mem_avail,
base, len);
base = base_hi = 0;
} else {
if (pf) {
pci_memlist_insert(pmem_used,
fbase, len);
(void) pci_memlist_remove(
pmem_avail, fbase, len);
} else {
pci_memlist_insert(mem_used,
fbase, len);
(void) pci_memlist_remove(
mem_avail, fbase, len);
}
}
}
}
assigned->pci_phys_mid = base_hi;
assigned->pci_phys_low = base;
}
dcmn_err(CE_NOTE, MSGHDR "BAR%u ---- %08x.%x.%x.%x.%x",
"pci", bus, dev, func, bar,
assigned->pci_phys_hi,
assigned->pci_phys_mid,
assigned->pci_phys_low,
assigned->pci_size_hi,
assigned->pci_size_low);
return (reprogram);
}
static boolean_t
add_reg_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func,
int op, boolean_t pciide)
{
uchar_t baseclass, subclass, progclass, header;
uint_t bar, value, devloc, base;
ushort_t bar_sz, offset, end;
int max_basereg, reprogram = B_FALSE;
struct memlist **io_avail, **io_used;
struct memlist **mem_avail, **mem_used;
struct memlist **pmem_avail;
pci_regspec_t regs[16] = {{0}};
pci_regspec_t assigned[15] = {{0}};
int nreg, nasgn;
io_avail = &pci_bus_res[bus].io_avail;
io_used = &pci_bus_res[bus].io_used;
mem_avail = &pci_bus_res[bus].mem_avail;
mem_used = &pci_bus_res[bus].mem_used;
pmem_avail = &pci_bus_res[bus].pmem_avail;
dump_memlists("add_reg_props start", bus);
devloc = PCI_REG_MAKE_BDFR(bus, dev, func, 0);
regs[0].pci_phys_hi = devloc;
nreg = 1;
nasgn = 0;
baseclass = pci_getb(bus, dev, func, PCI_CONF_BASCLASS);
subclass = pci_getb(bus, dev, func, PCI_CONF_SUBCLASS);
progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS);
header = pci_getb(bus, dev, func, PCI_CONF_HEADER) & PCI_HEADER_TYPE_M;
switch (header) {
case PCI_HEADER_ZERO:
max_basereg = PCI_BASE_NUM;
break;
case PCI_HEADER_PPB:
max_basereg = PCI_BCNF_BASE_NUM;
break;
case PCI_HEADER_CARDBUS:
max_basereg = PCI_CBUS_BASE_NUM;
reprogram = B_TRUE;
break;
default:
max_basereg = 0;
break;
}
end = PCI_CONF_BASE0 + max_basereg * sizeof (uint_t);
for (bar = 0, offset = PCI_CONF_BASE0; offset < end;
bar++, offset += bar_sz) {
int ret;
ret = add_bar_reg_props(op, bus, dev, func, bar, offset,
®s[nreg], &assigned[nasgn], &bar_sz, pciide);
if (bar_sz == PCI_BAR_SZ_64)
bar++;
if (ret == -1)
continue;
if (ret == 1)
reprogram = B_TRUE;
nreg++;
nasgn++;
}
switch (header) {
case PCI_HEADER_ZERO:
offset = PCI_CONF_ROM;
break;
case PCI_HEADER_PPB:
offset = PCI_BCNF_ROM;
break;
default:
goto done;
}
base = pci_getl(bus, dev, func, offset);
pci_putl(bus, dev, func, offset, PCI_BASE_ROM_ADDR_M);
value = pci_getl(bus, dev, func, offset);
pci_putl(bus, dev, func, offset, base);
if (value & PCI_BASE_ROM_ENABLE)
value &= PCI_BASE_ROM_ADDR_M;
else
value = 0;
if (value != 0) {
uint_t len;
regs[nreg].pci_phys_hi = (PCI_ADDR_MEM32 | devloc) + offset;
assigned[nasgn].pci_phys_hi = (PCI_RELOCAT_B |
PCI_ADDR_MEM32 | devloc) + offset;
base &= PCI_BASE_ROM_ADDR_M;
assigned[nasgn].pci_phys_low = base;
len = BARMASKTOLEN(value);
regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = len;
nreg++, nasgn++;
if (base != 0) {
(void) pci_memlist_remove(mem_avail, base, len);
pci_memlist_insert(mem_used, base, len);
pci_bus_res[bus].mem_size += len;
}
}
if ((baseclass == PCI_CLASS_DISPLAY && subclass == PCI_DISPLAY_VGA) ||
(baseclass == PCI_CLASS_NONE && subclass == PCI_NONE_VGA)) {
regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi =
(PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc);
regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3b0;
regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0xc;
nreg++, nasgn++;
(void) pci_memlist_remove(io_avail, 0x3b0, 0xc);
pci_memlist_insert(io_used, 0x3b0, 0xc);
pci_bus_res[bus].io_size += 0xc;
regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi =
(PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc);
regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x3c0;
regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x20;
nreg++, nasgn++;
(void) pci_memlist_remove(io_avail, 0x3c0, 0x20);
pci_memlist_insert(io_used, 0x3c0, 0x20);
pci_bus_res[bus].io_size += 0x20;
regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi =
(PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_MEM32 | devloc);
regs[nreg].pci_phys_low =
assigned[nasgn].pci_phys_low = 0xa0000;
regs[nreg].pci_size_low =
assigned[nasgn].pci_size_low = 0x20000;
nreg++, nasgn++;
(void) pci_memlist_remove(mem_avail, 0xa0000, 0x20000);
(void) pci_memlist_remove(pmem_avail, 0xa0000, 0x20000);
pci_memlist_insert(mem_used, 0xa0000, 0x20000);
pci_bus_res[bus].mem_size += 0x20000;
}
if ((baseclass == PCI_CLASS_DISPLAY) &&
(subclass == PCI_DISPLAY_VGA) &&
(progclass & PCI_DISPLAY_IF_8514)) {
regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi =
(PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc);
regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2e8;
regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x1;
nreg++, nasgn++;
(void) pci_memlist_remove(io_avail, 0x2e8, 0x1);
pci_memlist_insert(io_used, 0x2e8, 0x1);
pci_bus_res[bus].io_size += 0x1;
regs[nreg].pci_phys_hi = assigned[nasgn].pci_phys_hi =
(PCI_RELOCAT_B | PCI_ALIAS_B | PCI_ADDR_IO | devloc);
regs[nreg].pci_phys_low = assigned[nasgn].pci_phys_low = 0x2ea;
regs[nreg].pci_size_low = assigned[nasgn].pci_size_low = 0x6;
nreg++, nasgn++;
(void) pci_memlist_remove(io_avail, 0x2ea, 0x6);
pci_memlist_insert(io_used, 0x2ea, 0x6);
pci_bus_res[bus].io_size += 0x6;
}
done:
dump_memlists("add_reg_props end", bus);
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "reg",
(int *)regs, nreg * sizeof (pci_regspec_t) / sizeof (int));
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
"assigned-addresses",
(int *)assigned, nasgn * sizeof (pci_regspec_t) / sizeof (int));
return (reprogram);
}
static void
add_ppb_props(dev_info_t *dip, uchar_t bus, uchar_t dev, uchar_t func,
boolean_t pciex, boolean_t is_pci_bridge)
{
char *dev_type;
int i;
uint_t cmd_reg;
struct {
uint64_t base;
uint64_t limit;
} io, mem, pmem;
uchar_t secbus, subbus;
uchar_t progclass;
secbus = pci_getb(bus, dev, func, PCI_BCNF_SECBUS);
subbus = pci_getb(bus, dev, func, PCI_BCNF_SUBBUS);
ASSERT3U(secbus, <=, subbus);
dump_memlists("add_ppb_props start bus", bus);
dump_memlists("add_ppb_props start secbus", secbus);
progclass = pci_getb(bus, dev, func, PCI_CONF_PROGCLASS);
if (progclass == PCI_BRIDGE_PCI_IF_SUBDECODE)
pci_bus_res[secbus].subtractive = B_TRUE;
if (subbus > pci_boot_maxbus) {
pci_boot_maxbus = subbus;
alloc_res_array();
}
ASSERT(pci_bus_res[secbus].dip == NULL);
pci_bus_res[secbus].dip = dip;
pci_bus_res[secbus].par_bus = bus;
dev_type = (pciex && !is_pci_bridge) ? "pciex" : "pci";
pci_bus_res[secbus].sub_bus = subbus;
if (subbus > pci_bus_res[bus].sub_bus)
pci_bus_res[bus].sub_bus = subbus;
for (i = secbus + 1; i <= subbus; i++)
pci_bus_res[i].par_bus = bus;
if (!is_pci_bridge)
pci_bus_res[bus].num_bridge++;
(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
"device_type", dev_type);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#address-cells", 3);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
"#size-cells", 2);
cmd_reg = (uint_t)pci_getw(bus, dev, func, PCI_CONF_COMM);
fetch_ppb_res(bus, dev, func, RES_IO, &io.base, &io.limit);
fetch_ppb_res(bus, dev, func, RES_MEM, &mem.base, &mem.limit);
fetch_ppb_res(bus, dev, func, RES_PMEM, &pmem.base, &pmem.limit);
if (pci_boot_debug != 0) {
dcmn_err(CE_NOTE, MSGHDR " I/O FWINIT 0x%lx ~ 0x%lx%s",
"ppb", bus, dev, func, io.base, io.limit,
io.base > io.limit ? " (disabled)" : "");
dcmn_err(CE_NOTE, MSGHDR " MEM FWINIT 0x%lx ~ 0x%lx%s",
"ppb", bus, dev, func, mem.base, mem.limit,
mem.base > mem.limit ? " (disabled)" : "");
dcmn_err(CE_NOTE, MSGHDR "PMEM FWINIT 0x%lx ~ 0x%lx%s",
"ppb", bus, dev, func, pmem.base, pmem.limit,
pmem.base > pmem.limit ? " (disabled)" : "");
}
if ((cmd_reg & PCI_COMM_IO) == 0) {
io.base = PPB_DISABLE_IORANGE_BASE;
io.limit = PPB_DISABLE_IORANGE_LIMIT;
set_ppb_res(bus, dev, func, RES_IO, io.base, io.limit);
} else if (io.base < io.limit) {
uint64_t size = io.limit - io.base + 1;
pci_memlist_insert(&pci_bus_res[secbus].io_avail, io.base,
size);
pci_memlist_insert(&pci_bus_res[bus].io_used, io.base, size);
if (pci_bus_res[bus].io_avail != NULL) {
(void) pci_memlist_remove(&pci_bus_res[bus].io_avail,
io.base, size);
}
}
if ((cmd_reg & PCI_COMM_MAE) == 0 || mem.base == 0) {
mem.base = PPB_DISABLE_MEMRANGE_BASE;
mem.limit = PPB_DISABLE_MEMRANGE_LIMIT;
set_ppb_res(bus, dev, func, RES_MEM, mem.base, mem.limit);
} else if (mem.base < mem.limit) {
uint64_t size = mem.limit - mem.base + 1;
pci_memlist_insert(&pci_bus_res[secbus].mem_avail, mem.base,
size);
pci_memlist_insert(&pci_bus_res[bus].mem_used, mem.base, size);
(void) pci_memlist_remove(&pci_bus_res[bus].mem_avail,
mem.base, size);
(void) pci_memlist_remove(&pci_bus_res[bus].pmem_avail,
mem.base, size);
}
if ((cmd_reg & PCI_COMM_MAE) == 0 || pmem.base == 0) {
pmem.base = PPB_DISABLE_MEMRANGE_BASE;
pmem.limit = PPB_DISABLE_MEMRANGE_LIMIT;
set_ppb_res(bus, dev, func, RES_PMEM, pmem.base, pmem.limit);
} else if (pmem.base < pmem.limit) {
uint64_t size = pmem.limit - pmem.base + 1;
pci_memlist_insert(&pci_bus_res[secbus].pmem_avail,
pmem.base, size);
pci_memlist_insert(&pci_bus_res[bus].pmem_used, pmem.base,
size);
(void) pci_memlist_remove(&pci_bus_res[bus].pmem_avail,
pmem.base, size);
(void) pci_memlist_remove(&pci_bus_res[bus].mem_avail,
pmem.base, size);
}
if (pci_getw(bus, dev, func, PCI_BCNF_BCNTRL) &
PCI_BCNF_BCNTRL_VGA_ENABLE) {
pci_memlist_insert(&pci_bus_res[secbus].io_avail, 0x3b0, 0xc);
pci_memlist_insert(&pci_bus_res[bus].io_used, 0x3b0, 0xc);
if (pci_bus_res[bus].io_avail != NULL) {
(void) pci_memlist_remove(&pci_bus_res[bus].io_avail,
0x3b0, 0xc);
}
pci_memlist_insert(&pci_bus_res[secbus].io_avail, 0x3c0, 0x20);
pci_memlist_insert(&pci_bus_res[bus].io_used, 0x3c0, 0x20);
if (pci_bus_res[bus].io_avail != NULL) {
(void) pci_memlist_remove(&pci_bus_res[bus].io_avail,
0x3c0, 0x20);
}
pci_memlist_insert(&pci_bus_res[secbus].mem_avail, 0xa0000,
0x20000);
pci_memlist_insert(&pci_bus_res[bus].mem_used, 0xa0000,
0x20000);
if (pci_bus_res[bus].mem_avail != NULL) {
(void) pci_memlist_remove(&pci_bus_res[bus].mem_avail,
0xa0000, 0x20000);
}
}
add_bus_range_prop(secbus);
add_ranges_prop(secbus, B_TRUE);
dump_memlists("add_ppb_props end bus", bus);
dump_memlists("add_ppb_props end secbus", secbus);
}
static void
add_bus_range_prop(int bus)
{
int bus_range[2];
if (pci_bus_res[bus].dip == NULL)
return;
bus_range[0] = bus;
bus_range[1] = pci_bus_res[bus].sub_bus;
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip,
"bus-range", (int *)bus_range, 2);
}
static void
memlist_to_ranges(void **rp, struct memlist *list, const int bus,
const uint32_t type, boolean_t ppb)
{
ppb_ranges_t *ppb_rp = *rp;
pci_ranges_t *pci_rp = *rp;
while (list != NULL) {
uint32_t newtype = type;
if (list->ml_address + (list->ml_size - 1) > UINT32_MAX) {
if ((type & PCI_ADDR_MASK) == PCI_ADDR_IO) {
cmn_err(CE_WARN, "Found invalid 64-bit I/O "
"space address 0x%lx+0x%lx on bus %x",
list->ml_address, list->ml_size, bus);
list = list->ml_next;
continue;
}
newtype &= ~PCI_ADDR_MASK;
newtype |= PCI_ADDR_MEM64;
}
if (ppb) {
ppb_rp->child_high = ppb_rp->parent_high = newtype;
ppb_rp->child_mid = ppb_rp->parent_mid =
(uint32_t)(list->ml_address >> 32);
ppb_rp->child_low = ppb_rp->parent_low =
(uint32_t)list->ml_address;
ppb_rp->size_high = (uint32_t)(list->ml_size >> 32);
ppb_rp->size_low = (uint32_t)list->ml_size;
*rp = ++ppb_rp;
} else {
pci_rp->child_high = newtype;
pci_rp->child_mid = pci_rp->parent_high =
(uint32_t)(list->ml_address >> 32);
pci_rp->child_low = pci_rp->parent_low =
(uint32_t)list->ml_address;
pci_rp->size_high = (uint32_t)(list->ml_size >> 32);
pci_rp->size_low = (uint32_t)list->ml_size;
*rp = ++pci_rp;
}
list = list->ml_next;
}
}
static void
add_ranges_prop(int bus, boolean_t ppb)
{
int total, alloc_size;
void *rp, *next_rp;
struct memlist *iolist, *memlist, *pmemlist;
if (pci_bus_res[bus].dip == NULL)
return;
dump_memlists("add_ranges_prop", bus);
iolist = memlist = pmemlist = (struct memlist *)NULL;
pci_memlist_merge(&pci_bus_res[bus].io_avail, &iolist);
pci_memlist_merge(&pci_bus_res[bus].io_used, &iolist);
pci_memlist_merge(&pci_bus_res[bus].mem_avail, &memlist);
pci_memlist_merge(&pci_bus_res[bus].mem_used, &memlist);
pci_memlist_merge(&pci_bus_res[bus].pmem_avail, &pmemlist);
pci_memlist_merge(&pci_bus_res[bus].pmem_used, &pmemlist);
total = pci_memlist_count(iolist);
total += pci_memlist_count(memlist);
total += pci_memlist_count(pmemlist);
if (total == 0)
return;
alloc_size = total *
(ppb ? sizeof (ppb_ranges_t) : sizeof (pci_ranges_t));
next_rp = rp = kmem_alloc(alloc_size, KM_SLEEP);
memlist_to_ranges(&next_rp, iolist, bus,
PCI_ADDR_IO | PCI_RELOCAT_B, ppb);
memlist_to_ranges(&next_rp, memlist, bus,
PCI_ADDR_MEM32 | PCI_RELOCAT_B, ppb);
memlist_to_ranges(&next_rp, pmemlist, bus,
PCI_ADDR_MEM32 | PCI_RELOCAT_B | PCI_PREFETCH_B, ppb);
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip,
"ranges", (int *)rp, alloc_size / sizeof (int));
kmem_free(rp, alloc_size);
pci_memlist_free_all(&iolist);
pci_memlist_free_all(&memlist);
pci_memlist_free_all(&pmemlist);
}
static void
pci_memlist_remove_list(struct memlist **list, struct memlist *remove_list)
{
while (list && *list && remove_list) {
(void) pci_memlist_remove(list, remove_list->ml_address,
remove_list->ml_size);
remove_list = remove_list->ml_next;
}
}
static int
memlist_to_spec(struct pci_phys_spec *sp, const int bus, struct memlist *list,
const uint32_t type)
{
uint_t i = 0;
while (list != NULL) {
uint32_t newtype = type;
if (list->ml_address + (list->ml_size - 1) > UINT32_MAX) {
if ((type & PCI_ADDR_MASK) == PCI_ADDR_IO) {
cmn_err(CE_WARN, "Found invalid 64-bit I/O "
"space address 0x%lx+0x%lx on bus %x",
list->ml_address, list->ml_size, bus);
list = list->ml_next;
continue;
}
newtype &= ~PCI_ADDR_MASK;
newtype |= PCI_ADDR_MEM64;
}
sp->pci_phys_hi = newtype;
sp->pci_phys_mid = (uint32_t)(list->ml_address >> 32);
sp->pci_phys_low = (uint32_t)list->ml_address;
sp->pci_size_hi = (uint32_t)(list->ml_size >> 32);
sp->pci_size_low = (uint32_t)list->ml_size;
list = list->ml_next;
sp++, i++;
}
return (i);
}
static void
add_bus_available_prop(int bus)
{
int i, count;
struct pci_phys_spec *sp;
if (pci_bus_res[bus].dip == NULL)
return;
count = pci_memlist_count(pci_bus_res[bus].io_avail) +
pci_memlist_count(pci_bus_res[bus].mem_avail) +
pci_memlist_count(pci_bus_res[bus].pmem_avail);
if (count == 0)
return;
sp = kmem_alloc(count * sizeof (*sp), KM_SLEEP);
i = memlist_to_spec(&sp[0], bus, pci_bus_res[bus].io_avail,
PCI_ADDR_IO | PCI_RELOCAT_B);
i += memlist_to_spec(&sp[i], bus, pci_bus_res[bus].mem_avail,
PCI_ADDR_MEM32 | PCI_RELOCAT_B);
i += memlist_to_spec(&sp[i], bus, pci_bus_res[bus].pmem_avail,
PCI_ADDR_MEM32 | PCI_RELOCAT_B | PCI_PREFETCH_B);
ASSERT(i == count);
(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, pci_bus_res[bus].dip,
"available", (int *)sp,
i * sizeof (struct pci_phys_spec) / sizeof (int));
kmem_free(sp, count * sizeof (*sp));
}
static void
alloc_res_array(void)
{
static uint_t array_size = 0;
uint_t old_size;
void *old_res;
if (array_size > pci_boot_maxbus + 1)
return;
old_size = array_size;
old_res = pci_bus_res;
if (array_size == 0)
array_size = 16;
while (array_size <= pci_boot_maxbus + 1)
array_size <<= 1;
pci_bus_res = (struct pci_bus_resource *)kmem_zalloc(
array_size * sizeof (struct pci_bus_resource), KM_SLEEP);
if (old_res) {
bcopy(old_res, pci_bus_res,
old_size * sizeof (struct pci_bus_resource));
kmem_free(old_res, old_size * sizeof (struct pci_bus_resource));
}
}
static void
create_ioapic_node(int bus, int dev, int fn, ushort_t vendorid,
ushort_t deviceid)
{
static dev_info_t *ioapicsnode = NULL;
static int numioapics = 0;
dev_info_t *ioapic_node;
uint64_t physaddr;
uint32_t lobase, hibase = 0;
lobase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0);
if ((lobase & PCI_BASE_SPACE_M) != PCI_BASE_SPACE_MEM)
return;
if ((lobase & PCI_BASE_TYPE_M) == PCI_BASE_TYPE_ALL)
hibase = (*pci_getl_func)(bus, dev, fn, PCI_CONF_BASE0 + 4);
lobase &= PCI_BASE_M_ADDR_M;
physaddr = (((uint64_t)hibase) << 32) | lobase;
if (ioapicsnode == NULL) {
if (ndi_devi_alloc(ddi_root_node(), IOAPICS_NODE_NAME,
(pnode_t)DEVI_SID_NODEID, &ioapicsnode) != NDI_SUCCESS) {
return;
}
(void) ndi_devi_online(ioapicsnode, 0);
}
ioapic_node = ddi_add_child(ioapicsnode, IOAPICS_CHILD_NAME,
DEVI_SID_NODEID, numioapics++);
if (ioapic_node == NULL) {
return;
}
(void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node,
IOAPICS_PROP_VENID, vendorid);
(void) ndi_prop_update_int(DDI_DEV_T_NONE, ioapic_node,
IOAPICS_PROP_DEVID, deviceid);
(void) ndi_prop_update_string(DDI_DEV_T_NONE, ioapic_node,
"device_type", IOAPICS_DEV_TYPE);
(void) ndi_prop_update_int64(DDI_DEV_T_NONE, ioapic_node,
"reg", physaddr);
}
static void
ck804_fix_aer_ptr(dev_info_t *dip, pcie_req_id_t bdf)
{
dev_info_t *rcdip;
ushort_t cya1;
rcdip = pcie_get_rc_dip(dip);
ASSERT(rcdip != NULL);
if ((pci_cfgacc_get16(rcdip, bdf, PCI_CONF_VENID) ==
NVIDIA_VENDOR_ID) &&
(pci_cfgacc_get16(rcdip, bdf, PCI_CONF_DEVID) ==
NVIDIA_CK804_DEVICE_ID) &&
(pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID) >=
NVIDIA_CK804_AER_VALID_REVID)) {
cya1 = pci_cfgacc_get16(rcdip, bdf, NVIDIA_CK804_VEND_CYA1_OFF);
if (!(cya1 & ~NVIDIA_CK804_VEND_CYA1_ERPT_MASK))
(void) pci_cfgacc_put16(rcdip, bdf,
NVIDIA_CK804_VEND_CYA1_OFF,
cya1 | NVIDIA_CK804_VEND_CYA1_ERPT_VAL);
}
}