#include "i40e_sw.h"
static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3";
static kmutex_t i40e_glock;
static list_t i40e_glist;
static list_t i40e_dlist;
static ddi_device_acc_attr_t i40e_regs_acc_attr = {
DDI_DEVICE_ATTR_V1,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
DDI_FLAGERR_ACC
};
static void
i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
va_list ap)
{
char buf[1024];
(void) vsnprintf(buf, sizeof (buf), fmt, ap);
if (i40e == NULL) {
cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
I40E_MODULE_NAME, buf);
} else {
dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
buf);
}
}
void
i40e_error(i40e_t *i40e, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
va_end(ap);
}
void
i40e_log(i40e_t *i40e, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
va_end(ap);
}
void
i40e_notice(i40e_t *i40e, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
va_end(ap);
}
static boolean_t
i40e_is_x722(i40e_t *i40e)
{
return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722);
}
static void
i40e_device_rele(i40e_t *i40e)
{
i40e_device_t *idp = i40e->i40e_device;
if (idp == NULL)
return;
mutex_enter(&i40e_glock);
VERIFY(idp->id_nreg > 0);
list_remove(&idp->id_i40e_list, i40e);
idp->id_nreg--;
if (idp->id_nreg == 0) {
list_remove(&i40e_dlist, idp);
list_destroy(&idp->id_i40e_list);
kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
idp->id_rsrcs_alloc);
kmem_free(idp, sizeof (i40e_device_t));
}
i40e->i40e_device = NULL;
mutex_exit(&i40e_glock);
}
static i40e_device_t *
i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
{
i40e_device_t *idp;
mutex_enter(&i40e_glock);
for (idp = list_head(&i40e_dlist); idp != NULL;
idp = list_next(&i40e_dlist, idp)) {
if (idp->id_parent == parent && idp->id_pci_bus == bus &&
idp->id_pci_device == device) {
break;
}
}
if (idp != NULL) {
VERIFY(idp->id_nreg < idp->id_nfuncs);
idp->id_nreg++;
} else {
i40e_hw_t *hw = &i40e->i40e_hw_space;
ASSERT(hw->num_ports > 0);
ASSERT(hw->num_partitions > 0);
idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
idp->id_parent = parent;
idp->id_pci_bus = bus;
idp->id_pci_device = device;
idp->id_nfuncs = hw->num_ports * hw->num_partitions;
idp->id_nreg = 1;
idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
idp->id_rsrcs_alloc, KM_SLEEP);
bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
list_create(&idp->id_i40e_list, sizeof (i40e_t),
offsetof(i40e_t, i40e_dlink));
list_insert_tail(&i40e_dlist, idp);
}
list_insert_tail(&idp->id_i40e_list, i40e);
mutex_exit(&i40e_glock);
return (idp);
}
static void
i40e_link_state_set(i40e_t *i40e, link_state_t state)
{
if (i40e->i40e_link_state == state)
return;
i40e->i40e_link_state = state;
mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
}
void
i40e_link_check(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
bool ls;
int ret;
ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
hw->phy.get_link_info = true;
if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
i40e->i40e_s_link_status_errs++;
i40e->i40e_s_link_status_lasterr = ret;
return;
}
if (ls) {
enum i40e_aq_link_speed speed;
speed = i40e_get_link_speed(hw);
switch (speed) {
case I40E_LINK_SPEED_100MB:
i40e->i40e_link_speed = 100;
break;
case I40E_LINK_SPEED_1GB:
i40e->i40e_link_speed = 1000;
break;
case I40E_LINK_SPEED_2_5GB:
i40e->i40e_link_speed = 2500;
break;
case I40E_LINK_SPEED_5GB:
i40e->i40e_link_speed = 5000;
break;
case I40E_LINK_SPEED_10GB:
i40e->i40e_link_speed = 10000;
break;
case I40E_LINK_SPEED_20GB:
i40e->i40e_link_speed = 20000;
break;
case I40E_LINK_SPEED_40GB:
i40e->i40e_link_speed = 40000;
break;
case I40E_LINK_SPEED_25GB:
i40e->i40e_link_speed = 25000;
break;
default:
i40e->i40e_link_speed = 0;
break;
}
i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
i40e_link_state_set(i40e, LINK_STATE_UP);
} else {
i40e->i40e_link_speed = 0;
i40e->i40e_link_duplex = 0;
i40e_link_state_set(i40e, LINK_STATE_DOWN);
}
}
static void
i40e_rem_intrs(i40e_t *i40e)
{
int i, rc;
for (i = 0; i < i40e->i40e_intr_count; i++) {
rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "failed to free interrupt %d: %d",
i, rc);
}
}
kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
i40e->i40e_intr_handles = NULL;
}
static void
i40e_rem_intr_handlers(i40e_t *i40e)
{
int i, rc;
for (i = 0; i < i40e->i40e_intr_count; i++) {
rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "failed to remove interrupt %d: %d",
i, rc);
}
}
}
int
i40e_check_acc_handle(ddi_acc_handle_t handle)
{
ddi_fm_error_t de;
ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
return (de.fme_status);
}
int
i40e_check_dma_handle(ddi_dma_handle_t handle)
{
ddi_fm_error_t de;
ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
return (de.fme_status);
}
static int
i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
{
pci_ereport_post(dip, err, NULL);
return (err->fme_status);
}
static void
i40e_fm_init(i40e_t *i40e)
{
ddi_iblock_cookie_t iblk;
i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
if (i40e->i40e_fm_capabilities < 0) {
i40e->i40e_fm_capabilities = 0;
} else if (i40e->i40e_fm_capabilities > 0xf) {
i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
DDI_FM_ERRCB_CAPABLE;
}
if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
} else {
i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
}
if (i40e->i40e_fm_capabilities) {
ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
pci_ereport_setup(i40e->i40e_dip);
}
if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
ddi_fm_handler_register(i40e->i40e_dip,
i40e_fm_error_cb, (void*)i40e);
}
}
if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
i40e_init_dma_attrs(i40e, B_TRUE);
} else {
i40e_init_dma_attrs(i40e, B_FALSE);
}
}
static void
i40e_fm_fini(i40e_t *i40e)
{
if (i40e->i40e_fm_capabilities) {
if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
pci_ereport_teardown(i40e->i40e_dip);
if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
ddi_fm_handler_unregister(i40e->i40e_dip);
ddi_fm_fini(i40e->i40e_dip);
}
}
void
i40e_fm_ereport(i40e_t *i40e, char *detail)
{
uint64_t ena;
char buf[FM_MAX_CLASS];
(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
ena = fm_ena_generate(0, FM_ENA_FMT1);
if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
}
}
static boolean_t
i40e_set_def_vsi_seid(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_aqc_get_switch_config_resp *sw_config;
uint8_t aq_buf[I40E_AQ_LARGE_BUF];
uint16_t next = 0;
int rc;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
rc, hw->aq.asq_last_status);
return (B_FALSE);
}
if (LE_16(sw_config->header.num_reported) != 1) {
i40e_error(i40e, "encountered multiple (%d) switching units "
"during attach, not proceeding",
LE_16(sw_config->header.num_reported));
return (B_FALSE);
}
I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid;
return (B_TRUE);
}
static int
i40e_get_mac_seid(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_aqc_get_switch_config_resp *sw_config;
uint8_t aq_buf[I40E_AQ_LARGE_BUF];
uint16_t next = 0;
int rc;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
rc, hw->aq.asq_last_status);
return (-1);
}
return (LE_16(sw_config->element[0].uplink_seid));
}
static boolean_t
i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
{
struct i40e_aqc_list_capabilities_element_resp *buf;
int rc;
size_t len;
uint16_t needed;
int nelems = I40E_HW_CAP_DEFAULT;
len = nelems * sizeof (*buf);
for (;;) {
ASSERT(len > 0);
buf = kmem_alloc(len, KM_SLEEP);
rc = i40e_aq_discover_capabilities(hw, buf, len,
&needed, i40e_aqc_opc_list_func_capabilities, NULL);
kmem_free(buf, len);
if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
nelems == I40E_HW_CAP_DEFAULT) {
if (nelems == needed) {
i40e_error(i40e, "Capability discovery failed "
"due to byzantine common code");
return (B_FALSE);
}
len = needed;
continue;
} else if (rc != I40E_SUCCESS ||
hw->aq.asq_last_status != I40E_AQ_RC_OK) {
i40e_error(i40e, "Capability discovery failed: %d", rc);
return (B_FALSE);
}
break;
}
return (B_TRUE);
}
static boolean_t
i40e_get_switch_resources(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint8_t cnt = 2;
uint8_t act;
size_t size;
i40e_switch_rsrc_t *buf;
for (;;) {
enum i40e_status_code ret;
size = cnt * sizeof (i40e_switch_rsrc_t);
ASSERT(size > 0);
if (size > UINT16_MAX)
return (B_FALSE);
buf = kmem_alloc(size, KM_SLEEP);
ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
cnt, NULL);
if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
kmem_free(buf, size);
cnt += I40E_SWITCH_CAP_DEFAULT;
continue;
} else if (ret != I40E_SUCCESS) {
kmem_free(buf, size);
i40e_error(i40e,
"failed to retrieve switch statistics: %d", ret);
return (B_FALSE);
}
break;
}
i40e->i40e_switch_rsrc_alloc = cnt;
i40e->i40e_switch_rsrc_actual = act;
i40e->i40e_switch_rsrcs = buf;
return (B_TRUE);
}
static void
i40e_cleanup_resources(i40e_t *i40e)
{
if (i40e->i40e_uaddrs != NULL) {
kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
i40e->i40e_resources.ifr_nmacfilt);
i40e->i40e_uaddrs = NULL;
}
if (i40e->i40e_maddrs != NULL) {
kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
i40e->i40e_resources.ifr_nmcastfilt);
i40e->i40e_maddrs = NULL;
}
if (i40e->i40e_switch_rsrcs != NULL) {
size_t sz = sizeof (i40e_switch_rsrc_t) *
i40e->i40e_switch_rsrc_alloc;
ASSERT(sz > 0);
kmem_free(i40e->i40e_switch_rsrcs, sz);
i40e->i40e_switch_rsrcs = NULL;
}
if (i40e->i40e_device != NULL)
i40e_device_rele(i40e);
}
static boolean_t
i40e_get_available_resources(i40e_t *i40e)
{
dev_info_t *parent;
uint16_t bus, device, func;
uint_t nregs;
int *regs, i;
i40e_device_t *idp;
i40e_hw_t *hw = &i40e->i40e_hw_space;
parent = ddi_get_parent(i40e->i40e_dip);
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
®s, &nregs) != DDI_PROP_SUCCESS) {
return (B_FALSE);
}
if (nregs < 1) {
ddi_prop_free(regs);
return (B_FALSE);
}
bus = PCI_REG_BUS_G(regs[0]);
device = PCI_REG_DEV_G(regs[0]);
func = PCI_REG_FUNC_G(regs[0]);
ddi_prop_free(regs);
i40e->i40e_hw_space.bus.func = func;
i40e->i40e_hw_space.bus.device = device;
if (i40e_get_switch_resources(i40e) == B_FALSE) {
return (B_FALSE);
}
idp = i40e_device_find(i40e, parent, bus, device);
i40e->i40e_device = idp;
i40e->i40e_resources.ifr_nvsis = 0;
i40e->i40e_resources.ifr_nvsis_used = 0;
i40e->i40e_resources.ifr_nmacfilt = 0;
i40e->i40e_resources.ifr_nmacfilt_used = 0;
i40e->i40e_resources.ifr_nmcastfilt = 0;
i40e->i40e_resources.ifr_nmcastfilt_used = 0;
for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
switch (srp->resource_type) {
case I40E_AQ_RESOURCE_TYPE_VSI:
i40e->i40e_resources.ifr_nvsis +=
LE_16(srp->guaranteed);
i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
break;
case I40E_AQ_RESOURCE_TYPE_MACADDR:
i40e->i40e_resources.ifr_nmacfilt +=
LE_16(srp->guaranteed);
i40e->i40e_resources.ifr_nmacfilt_used =
LE_16(srp->used);
break;
case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
i40e->i40e_resources.ifr_nmcastfilt +=
LE_16(srp->guaranteed);
i40e->i40e_resources.ifr_nmcastfilt_used =
LE_16(srp->used);
break;
default:
break;
}
}
for (i = 0; i < idp->id_rsrcs_act; i++) {
i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
switch (srp->resource_type) {
case I40E_AQ_RESOURCE_TYPE_VSI:
i40e->i40e_resources.ifr_nvsis +=
LE_16(srp->total_unalloced) / idp->id_nfuncs;
break;
case I40E_AQ_RESOURCE_TYPE_MACADDR:
i40e->i40e_resources.ifr_nmacfilt +=
LE_16(srp->total_unalloced) / idp->id_nfuncs;
break;
case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
i40e->i40e_resources.ifr_nmcastfilt +=
LE_16(srp->total_unalloced) / idp->id_nfuncs;
default:
break;
}
}
i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
return (B_TRUE);
}
static boolean_t
i40e_enable_interrupts(i40e_t *i40e)
{
int i, rc;
if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
i40e->i40e_intr_count);
if (rc != DDI_SUCCESS) {
i40e_error(i40e, "Interrupt block-enable failed: %d",
rc);
return (B_FALSE);
}
} else {
for (i = 0; i < i40e->i40e_intr_count; i++) {
rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
if (rc != DDI_SUCCESS) {
i40e_error(i40e,
"Failed to enable interrupt %d: %d", i, rc);
while (--i >= 0) {
(void) ddi_intr_disable(
i40e->i40e_intr_handles[i]);
}
return (B_FALSE);
}
}
}
return (B_TRUE);
}
static boolean_t
i40e_disable_interrupts(i40e_t *i40e)
{
int i, rc;
if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
i40e->i40e_intr_count);
if (rc != DDI_SUCCESS) {
i40e_error(i40e,
"Interrupt block-disabled failed: %d", rc);
return (B_FALSE);
}
} else {
for (i = 0; i < i40e->i40e_intr_count; i++) {
rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
if (rc != DDI_SUCCESS) {
i40e_error(i40e,
"Failed to disable interrupt %d: %d",
i, rc);
return (B_FALSE);
}
}
}
return (B_TRUE);
}
static void
i40e_free_trqpairs(i40e_t *i40e)
{
i40e_trqpair_t *itrq;
if (i40e->i40e_rx_groups != NULL) {
kmem_free(i40e->i40e_rx_groups,
sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups);
i40e->i40e_rx_groups = NULL;
}
if (i40e->i40e_trqpairs != NULL) {
for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
itrq = &i40e->i40e_trqpairs[i];
mutex_destroy(&itrq->itrq_intr_lock);
mutex_destroy(&itrq->itrq_rx_lock);
mutex_destroy(&itrq->itrq_tx_lock);
mutex_destroy(&itrq->itrq_tcb_lock);
cv_destroy(&itrq->itrq_intr_cv);
cv_destroy(&itrq->itrq_tx_cv);
i40e_stats_trqpair_fini(itrq);
}
kmem_free(i40e->i40e_trqpairs,
sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
i40e->i40e_trqpairs = NULL;
}
cv_destroy(&i40e->i40e_rx_pending_cv);
mutex_destroy(&i40e->i40e_rx_pending_lock);
mutex_destroy(&i40e->i40e_general_lock);
}
static boolean_t
i40e_alloc_trqpairs(i40e_t *i40e)
{
void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
i40e->i40e_num_trqpairs, KM_SLEEP);
for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
itrq->itrq_i40e = i40e;
mutex_init(&itrq->itrq_intr_lock, NULL, MUTEX_DRIVER, mutexpri);
mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
cv_init(&itrq->itrq_intr_cv, NULL, CV_DRIVER, NULL);
cv_init(&itrq->itrq_tx_cv, NULL, CV_DRIVER, NULL);
itrq->itrq_index = i;
itrq->itrq_intr_quiesce = B_TRUE;
itrq->itrq_tx_quiesce = B_TRUE;
}
for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
if (!i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i])) {
i40e_free_trqpairs(i40e);
return (B_FALSE);
}
}
i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) *
i40e->i40e_num_rx_groups, KM_SLEEP);
for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i];
rxg->irg_index = i;
rxg->irg_i40e = i40e;
}
return (B_TRUE);
}
static void
i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
{
if (i40e->i40e_num_trqpairs_per_vsi == 0) {
if (i40e_is_x722(i40e)) {
i40e->i40e_num_trqpairs_per_vsi =
I40E_722_MAX_TC_QUEUES;
} else {
i40e->i40e_num_trqpairs_per_vsi =
I40E_710_MAX_TC_QUEUES;
}
}
if (i40e->i40e_num_rx_groups == 0) {
i40e->i40e_num_rx_groups = I40E_DEF_NUM_RX_GROUPS;
}
}
static void
i40e_common_code_fini(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
int rc;
rc = i40e_shutdown_lan_hmc(hw);
if (rc != I40E_SUCCESS)
i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
rc = i40e_shutdown_adminq(hw);
if (rc != I40E_SUCCESS)
i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
}
static boolean_t
i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
{
int rc;
i40e_clear_hw(hw);
rc = i40e_pf_reset(hw);
if (rc != 0) {
i40e_error(i40e, "failed to reset hardware: %d", rc);
i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
return (B_FALSE);
}
rc = i40e_init_shared_code(hw);
if (rc != 0) {
i40e_error(i40e, "failed to initialize i40e core: %d", rc);
return (B_FALSE);
}
hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE;
hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
rc = i40e_init_adminq(hw);
if (rc != 0) {
i40e_error(i40e, "failed to initialize firmware admin queue: "
"%d, potential firmware version mismatch", rc);
i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
return (B_FALSE);
}
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
i40e_log(i40e, "The driver for the device detected a newer "
"version of the NVM image (%d.%d) than expected (%d.%d).\n"
"Please install the most recent version of the network "
"driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw));
} else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
hw->aq.api_min_ver < (I40E_FW_MINOR_VERSION(hw) - 1)) {
i40e_log(i40e, "The driver for the device detected an older"
" version of the NVM image (%d.%d) than expected (%d.%d)."
"\nPlease update the NVM image.\n",
hw->aq.api_maj_ver, hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw) - 1);
}
i40e_clear_pxe_mode(hw);
if (!i40e_get_hw_capabilities(i40e, hw)) {
i40e_error(i40e, "failed to obtain hardware capabilities");
return (B_FALSE);
}
if (i40e_get_available_resources(i40e) == B_FALSE) {
i40e_error(i40e, "failed to obtain hardware resources");
return (B_FALSE);
}
i40e_hw_to_instance(i40e, hw);
rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (rc != 0) {
i40e_error(i40e, "failed to initialize hardware memory cache: "
"%d", rc);
return (B_FALSE);
}
rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (rc != 0) {
i40e_error(i40e, "failed to configure hardware memory cache: "
"%d", rc);
return (B_FALSE);
}
(void) i40e_aq_stop_lldp(hw, TRUE, FALSE, NULL);
rc = i40e_get_mac_addr(hw, hw->mac.addr);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "failed to retrieve hardware mac address: %d",
rc);
return (B_FALSE);
}
rc = i40e_validate_mac_addr(hw->mac.addr);
if (rc != 0) {
i40e_error(i40e, "failed to validate internal mac address: "
"%d", rc);
return (B_FALSE);
}
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
I40E_SUCCESS) {
i40e_error(i40e, "failed to retrieve port mac address: %d",
rc);
return (B_FALSE);
}
if (!i40e_set_def_vsi_seid(i40e)) {
i40e_error(i40e, "failed to obtain Default VSI SEID");
return (B_FALSE);
}
return (B_TRUE);
}
static void
i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
{
int rc;
if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
(void) i40e_disable_interrupts(i40e);
if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
i40e->i40e_periodic_id != 0) {
ddi_periodic_delete(i40e->i40e_periodic_id);
i40e->i40e_periodic_id = 0;
}
if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT)
ddi_ufm_fini(i40e->i40e_ufmh);
if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
rc = mac_unregister(i40e->i40e_mac_hdl);
if (rc != 0) {
i40e_error(i40e, "failed to unregister from mac: %d",
rc);
}
}
if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
i40e_stats_fini(i40e);
}
if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
i40e_rem_intr_handlers(i40e);
if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
i40e_free_trqpairs(i40e);
if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
i40e_rem_intrs(i40e);
if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
i40e_common_code_fini(i40e);
i40e_cleanup_resources(i40e);
if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
(void) ddi_prop_remove_all(devinfo);
if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
i40e->i40e_osdep_space.ios_reg_handle != NULL) {
ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
i40e->i40e_osdep_space.ios_reg_handle = NULL;
}
if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
i40e->i40e_osdep_space.ios_cfg_handle = NULL;
}
if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
i40e_fm_fini(i40e);
kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
kmem_free(i40e, sizeof (i40e_t));
ddi_set_driver_private(devinfo, NULL);
}
static boolean_t
i40e_final_init(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_osdep *osdep = OS_DEP(hw);
uint8_t pbanum[I40E_PBANUM_STRLEN];
enum i40e_status_code irc;
char buf[I40E_DDI_PROP_LEN];
pbanum[0] = '\0';
irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
if (irc != I40E_SUCCESS) {
i40e_log(i40e, "failed to read PBA string: %d", irc);
} else {
(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
"printed-board-assembly", (char *)pbanum);
}
#ifdef DEBUG
ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
hw->aq.fw_min_ver) < sizeof (buf));
ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
hw->aq.api_min_ver) < sizeof (buf));
#endif
(void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
hw->aq.fw_min_ver);
(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
"firmware-version", buf);
(void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
"firmware-build", buf);
(void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
hw->aq.api_min_ver);
(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
"api-version", buf);
if (!i40e_set_hw_bus_info(hw))
return (B_FALSE);
if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
return (B_FALSE);
}
return (B_TRUE);
}
static void
i40e_identify_hardware(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
PCI_CONF_REVID);
hw->subsystem_device_id =
pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
hw->subsystem_vendor_id =
pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
}
static boolean_t
i40e_regs_map(i40e_t *i40e)
{
dev_info_t *devinfo = i40e->i40e_dip;
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
off_t memsize;
int ret;
if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
DDI_SUCCESS) {
i40e_error(i40e, "Used invalid register set to map PCIe regs");
return (B_FALSE);
}
if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
(caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
&osdep->ios_reg_handle)) != DDI_SUCCESS) {
i40e_error(i40e, "failed to map device registers: %d", ret);
return (B_FALSE);
}
osdep->ios_reg_size = memsize;
return (B_TRUE);
}
void
i40e_update_mtu(i40e_t *i40e)
{
uint32_t rx, tx;
i40e->i40e_frame_max = i40e->i40e_sdu +
sizeof (struct ether_vlan_header) + ETHERFCSL;
rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
i40e->i40e_rx_buf_size = ((rx >> 10) +
((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
tx = i40e->i40e_frame_max;
i40e->i40e_tx_buf_size = ((tx >> 10) +
((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
}
static int
i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
{
int val;
val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
prop, def);
if (val > max)
val = max;
if (val < min)
val = min;
return (val);
}
static void
i40e_init_properties(i40e_t *i40e)
{
i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
B_FALSE, B_TRUE, B_TRUE);
i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
I40E_DEF_TX_RING_SIZE);
if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
I40E_DESC_ALIGN);
}
i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
I40E_MIN_TX_BLOCK_THRESH,
i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
I40E_DEF_TX_BLOCK_THRESH);
i40e->i40e_num_rx_groups = i40e_get_prop(i40e, "rx_num_groups",
I40E_MIN_NUM_RX_GROUPS, I40E_MAX_NUM_RX_GROUPS,
I40E_DEF_NUM_RX_GROUPS);
i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
I40E_DEF_RX_RING_SIZE);
if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
I40E_DESC_ALIGN);
}
i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR,
I40E_DEF_RX_LIMIT_PER_INTR);
i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
B_FALSE, B_TRUE, B_TRUE);
i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable",
B_FALSE, B_TRUE, B_TRUE);
i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
B_FALSE, B_TRUE, B_TRUE);
i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
I40E_DEF_RX_DMA_THRESH);
i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
I40E_DEF_TX_DMA_THRESH);
i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
if (!i40e->i40e_mr_enable) {
i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
}
i40e_update_mtu(i40e);
}
static boolean_t
i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
int request, count, actual, rc, min;
uint32_t reg;
switch (intr_type) {
case DDI_INTR_TYPE_FIXED:
case DDI_INTR_TYPE_MSI:
request = 1;
min = 1;
break;
case DDI_INTR_TYPE_MSIX:
min = 2;
if (!i40e->i40e_mr_enable) {
request = 2;
break;
}
reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2);
if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
ddi_fm_service_impact(i40e->i40e_dip,
DDI_SERVICE_DEGRADED);
return (B_FALSE);
}
request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
request++;
break;
default:
panic("bad interrupt type passed to i40e_alloc_intr_handles: "
"%d", intr_type);
}
rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
if (rc != DDI_SUCCESS || count < min) {
i40e_log(i40e, "Get interrupt number failed, "
"returned %d, count %d", rc, count);
return (B_FALSE);
}
rc = ddi_intr_get_navail(devinfo, intr_type, &count);
if (rc != DDI_SUCCESS || count < min) {
i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
"returned %d, count %d", rc, count);
return (B_FALSE);
}
actual = 0;
i40e->i40e_intr_count = 0;
i40e->i40e_intr_count_max = 0;
i40e->i40e_intr_count_min = 0;
i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
ASSERT(i40e->i40e_intr_size != 0);
i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
goto alloc_handle_fail;
}
i40e->i40e_intr_count = actual;
i40e->i40e_intr_count_max = request;
i40e->i40e_intr_count_min = min;
if (actual < min) {
i40e_log(i40e, "actual (%d) is less than minimum (%d).",
actual, min);
goto alloc_handle_fail;
}
rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
if (rc != DDI_SUCCESS) {
i40e_log(i40e,
"Getting interrupt priority failed with %d.", rc);
goto alloc_handle_fail;
}
rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
if (rc != DDI_SUCCESS) {
i40e_log(i40e,
"Getting interrupt capabilities failed with %d.", rc);
goto alloc_handle_fail;
}
i40e->i40e_intr_type = intr_type;
return (B_TRUE);
alloc_handle_fail:
i40e_rem_intrs(i40e);
return (B_FALSE);
}
static boolean_t
i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
int intr_types, rc;
uint_t max_trqpairs;
if (i40e_is_x722(i40e)) {
max_trqpairs = I40E_722_MAX_TC_QUEUES;
} else {
max_trqpairs = I40E_710_MAX_TC_QUEUES;
}
rc = ddi_intr_get_supported_types(devinfo, &intr_types);
if (rc != DDI_SUCCESS) {
i40e_error(i40e, "failed to get supported interrupt types: %d",
rc);
return (B_FALSE);
}
i40e->i40e_intr_type = 0;
if ((intr_types & DDI_INTR_TYPE_MSIX) &&
(i40e->i40e_intr_force <= I40E_INTR_MSIX) &&
(i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) {
uint32_t n, qp_cap, num_trqpairs;
n = MIN(i40e->i40e_intr_count - 1, max_trqpairs);
ASSERT3U(n, >, 0);
n = 0x1 << ddi_fls(n);
i40e->i40e_num_trqpairs_per_vsi = n;
ASSERT3U(i40e->i40e_num_rx_groups, >, 0);
qp_cap = MIN(hw->func_caps.num_rx_qp, hw->func_caps.num_tx_qp);
num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
i40e->i40e_num_rx_groups;
if (num_trqpairs > qp_cap) {
i40e->i40e_num_rx_groups = MAX(1, qp_cap /
i40e->i40e_num_trqpairs_per_vsi);
num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
i40e->i40e_num_rx_groups;
i40e_log(i40e, "Rx groups restricted to %u",
i40e->i40e_num_rx_groups);
}
ASSERT3U(num_trqpairs, >, 0);
i40e->i40e_num_trqpairs = num_trqpairs;
return (B_TRUE);
}
i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs;
i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
if ((intr_types & DDI_INTR_TYPE_MSI) &&
(i40e->i40e_intr_force <= I40E_INTR_MSI)) {
if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
return (B_TRUE);
}
if (intr_types & DDI_INTR_TYPE_FIXED) {
if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
i40e_map_intrs_to_vectors(i40e_t *i40e)
{
if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
return (B_TRUE);
}
for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
uint_t vector = i % (i40e->i40e_intr_count - 1);
i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1;
i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1;
}
return (B_TRUE);
}
static boolean_t
i40e_add_intr_handlers(i40e_t *i40e)
{
int rc, vector;
switch (i40e->i40e_intr_type) {
case DDI_INTR_TYPE_MSIX:
for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
rc = ddi_intr_add_handler(
i40e->i40e_intr_handles[vector],
(ddi_intr_handler_t *)i40e_intr_msix, i40e,
(void *)(uintptr_t)vector);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "Add interrupt handler (MSI-X) "
"failed: return %d, vector %d", rc, vector);
for (vector--; vector >= 0; vector--) {
(void) ddi_intr_remove_handler(
i40e->i40e_intr_handles[vector]);
}
return (B_FALSE);
}
}
break;
case DDI_INTR_TYPE_MSI:
rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
(ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "Add interrupt handler (MSI) failed: "
"return %d", rc);
return (B_FALSE);
}
break;
case DDI_INTR_TYPE_FIXED:
rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
(ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
if (rc != DDI_SUCCESS) {
i40e_log(i40e, "Add interrupt handler (legacy) failed:"
" return %d", rc);
return (B_FALSE);
}
break;
default:
panic("i40e_intr_type %p contains an unknown type: %d",
(void *)i40e, i40e->i40e_intr_type);
}
return (B_TRUE);
}
static void
i40e_timer(void *arg)
{
i40e_t *i40e = arg;
mutex_enter(&i40e->i40e_general_lock);
i40e_link_check(i40e);
mutex_exit(&i40e->i40e_general_lock);
}
static void
i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
{
int rc;
ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
(void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
i40e_link_check(i40e);
rc = i40e_aq_get_phy_capabilities(hw, false, true, &i40e->i40e_phy,
NULL);
if (rc == I40E_ERR_UNKNOWN_PHY) {
i40e_msec_delay(200);
rc = i40e_aq_get_phy_capabilities(hw, false, true,
&i40e->i40e_phy, NULL);
}
if (rc != I40E_SUCCESS) {
if (rc == I40E_ERR_UNKNOWN_PHY) {
i40e_error(i40e, "encountered unknown PHY type, "
"not attaching.");
} else {
i40e_error(i40e, "error getting physical capabilities: "
"%d, %d", rc, hw->aq.asq_last_status);
}
}
rc = i40e_update_link_info(hw);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "failed to update link information: %d", rc);
}
rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "failed to update phy link mask: %d", rc);
}
}
static void
i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
{
}
static void
i40e_set_shared_vsi_props(i40e_t *i40e,
struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx)
{
uint_t tc_queues;
uint16_t vsi_qp_base;
info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
I40E_AQ_VSI_PROP_VLAN_VALID);
vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi;
info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
info->queue_mapping[0] =
LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) &
I40E_AQ_VSI_QUEUE_MASK);
tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1);
info->tc_mapping[0] =
LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) &
I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) &
I40E_AQ_VSI_TC_QUE_NUMBER_MASK));
info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
}
static void
i40e_delete_vsi(i40e_t *i40e, uint_t idx)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint16_t seid = i40e->i40e_vsis[idx].iv_seid;
if (seid != 0) {
int rc;
rc = i40e_aq_delete_element(hw, seid, NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "Failed to delete VSI %d: %d",
rc, hw->aq.asq_last_status);
}
i40e->i40e_vsis[idx].iv_seid = 0;
}
}
static boolean_t
i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx)
{
struct i40e_vsi_context ctx;
i40e_rx_group_t *rxg;
int rc;
ASSERT3U(idx, !=, 0);
bzero(&ctx, sizeof (struct i40e_vsi_context));
ctx.uplink_seid = i40e->i40e_veb_seid;
ctx.pf_num = hw->pf_id;
ctx.flags = I40E_AQ_VSI_TYPE_PF;
ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
i40e_set_shared_vsi_props(i40e, &ctx.info, idx);
rc = i40e_aq_add_vsi(hw, &ctx, NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc,
hw->aq.asq_last_status);
return (B_FALSE);
}
rxg = &i40e->i40e_rx_groups[idx];
rxg->irg_vsi_seid = ctx.seid;
i40e->i40e_vsis[idx].iv_number = ctx.vsi_number;
i40e->i40e_vsis[idx].iv_seid = ctx.seid;
i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
if (i40e_stat_vsi_init(i40e, idx) == B_FALSE)
return (B_FALSE);
return (B_TRUE);
}
static boolean_t
i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw)
{
struct i40e_vsi_context ctx;
i40e_rx_group_t *def_rxg;
int err;
struct i40e_aqc_remove_macvlan_element_data filt;
bzero(&ctx, sizeof (struct i40e_vsi_context));
ctx.seid = I40E_DEF_VSI_SEID(i40e);
ctx.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &ctx, NULL);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "get VSI params failed with %d", err);
return (B_FALSE);
}
ctx.info.valid_sections = 0;
i40e->i40e_vsis[0].iv_number = ctx.vsi_number;
i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
if (i40e_stat_vsi_init(i40e, 0) == B_FALSE)
return (B_FALSE);
i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX);
err = i40e_aq_update_vsi_params(hw, &ctx, NULL);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "Update VSI params failed with %d", err);
return (B_FALSE);
}
def_rxg = &i40e->i40e_rx_groups[0];
def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e);
bzero(&filt, sizeof (filt));
bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
filt.vlan_tag = 0;
ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
i40e_log(i40e, "Num L2 filters: %u",
i40e->i40e_resources.ifr_nmacfilt_used);
err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
NULL);
if (err == I40E_SUCCESS) {
i40e_log(i40e,
"Removed L2 filter from Default VSI with SEID %u",
I40E_DEF_VSI_SEID(i40e));
} else if (hw->aq.asq_last_status == ENOENT) {
i40e_log(i40e,
"No L2 filter for Default VSI with SEID %u",
I40E_DEF_VSI_SEID(i40e));
} else {
i40e_error(i40e, "Failed to remove L2 filter from"
" Default VSI with SEID %u: %d (%d)",
I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
return (B_FALSE);
}
if (i40e->i40e_resources.ifr_nmacfilt_used == 1) {
i40e->i40e_resources.ifr_nmacfilt_used--;
} else {
if (i40e->i40e_resources.ifr_nmacfilt_used != 0) {
i40e_error(i40e, "Unexpected L2 filter count: %u"
" (expected 0)",
i40e->i40e_resources.ifr_nmacfilt_used);
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw)
{
for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_aqc_get_set_rss_key_data key;
const char *u8seed;
enum i40e_status_code status;
uint16_t vsi_number = i40e->i40e_vsis[i].iv_number;
(void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
u8seed = (char *)seed;
CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) +
sizeof (key.extended_hash_key)));
bcopy(u8seed, key.standard_rss_key,
sizeof (key.standard_rss_key));
bcopy(&u8seed[sizeof (key.standard_rss_key)],
key.extended_hash_key, sizeof (key.extended_hash_key));
ASSERT3U(vsi_number, !=, 0);
status = i40e_aq_set_rss_key(hw, vsi_number, &key);
if (status != I40E_SUCCESS) {
i40e_error(i40e, "failed to set RSS key for VSI %u: %d",
vsi_number, status);
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw)
{
if (i40e_is_x722(i40e)) {
if (!i40e_config_rss_key_x722(i40e, hw))
return (B_FALSE);
} else {
uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
(void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]);
}
return (B_TRUE);
}
static boolean_t
i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw)
{
uint32_t *hlut;
uint8_t lut_mask;
uint_t i;
boolean_t ret = B_FALSE;
hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP);
if (hlut == NULL) {
i40e_error(i40e, "i40e_config_rss() buffer allocation failed");
return (B_FALSE);
}
if (i40e_is_x722(i40e)) {
lut_mask = (1 << 7) - 1;
} else {
lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1;
}
for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) {
((uint8_t *)hlut)[i] =
(i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask;
}
if (i40e_is_x722(i40e)) {
enum i40e_status_code status;
status = i40e_aq_set_rss_lut(hw, 0, true, (uint8_t *)hlut,
I40E_HLUT_TABLE_SIZE);
if (status != I40E_SUCCESS) {
i40e_error(i40e, "failed to set RSS LUT %d: %d",
status, hw->aq.asq_last_status);
goto out;
}
} else {
for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) {
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]);
}
}
ret = B_TRUE;
out:
kmem_free(hlut, I40E_HLUT_TABLE_SIZE);
return (ret);
}
static boolean_t
i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw)
{
uint64_t hena;
if (!i40e_config_rss_key(i40e, hw))
return (B_FALSE);
hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
(1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
(1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) |
(1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
if (i40e_is_x722(i40e)) {
hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) |
(1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
return (i40e_config_rss_hlut(i40e, hw));
}
static boolean_t
i40e_chip_start(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_filter_control_settings filter;
int rc;
uint8_t err;
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4)) {
i40e_msec_delay(75);
if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
I40E_SUCCESS) {
i40e_error(i40e, "failed to restart link: admin queue "
"error: %d", hw->aq.asq_last_status);
return (B_FALSE);
}
}
i40e_get_hw_state(i40e, hw);
hw->fc.requested_mode = I40E_FC_NONE;
rc = i40e_set_fc(hw, &err, true);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "Setting flow control failed, returned %d"
" with error: 0x%x", rc, err);
return (B_FALSE);
}
i40e_init_macaddrs(i40e, hw);
bzero(&filter, sizeof (filter));
filter.enable_ethtype = TRUE;
filter.enable_macvlan = TRUE;
filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
rc = i40e_set_filter_control(hw, &filter);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
return (B_FALSE);
}
i40e_intr_chip_init(i40e);
rc = i40e_get_mac_seid(i40e);
if (rc == -1) {
i40e_error(i40e, "failed to obtain MAC Uplink SEID");
return (B_FALSE);
}
i40e->i40e_mac_seid = (uint16_t)rc;
rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e),
0x1, true, &i40e->i40e_veb_seid, false, NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc,
hw->aq.asq_last_status);
return (B_FALSE);
}
if (!i40e_config_def_vsi(i40e, hw))
return (B_FALSE);
for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) {
if (!i40e_add_vsi(i40e, hw, i))
return (B_FALSE);
}
if (!i40e_config_rss(i40e, hw))
return (B_FALSE);
i40e_flush(hw);
return (B_TRUE);
}
static void
i40e_shutdown_rx_ring(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint32_t reg;
reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
return;
VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
I40E_QRX_ENA_QENA_REQ_MASK);
reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
}
static void
i40e_shutdown_tx_ring(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint32_t reg;
i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, false);
drv_usecwait(500);
reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) != 0) {
reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
}
}
static boolean_t
i40e_shutdown_ring_wait(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint32_t reg;
int try;
for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
break;
i40e_msec_delay(I40E_RING_WAIT_PAUSE);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
i40e_error(i40e, "timed out disabling rx queue %d",
itrq->itrq_index);
return (B_FALSE);
}
for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
break;
i40e_msec_delay(I40E_RING_WAIT_PAUSE);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
i40e_error(i40e, "timed out disabling tx queue %d",
itrq->itrq_index);
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
i40e_shutdown_ring(i40e_trqpair_t *itrq)
{
boolean_t rv = B_TRUE;
if (i40e_ring_tx_quiesce(itrq)) {
return (B_TRUE);
}
i40e_shutdown_rx_ring(itrq);
i40e_shutdown_tx_ring(itrq);
if (!i40e_shutdown_ring_wait(itrq))
rv = B_FALSE;
itrq->irtq_time_stopped = gethrtime();
i40e_intr_quiesce(itrq);
mutex_enter(&itrq->itrq_tx_lock);
i40e_tx_cleanup_ring(itrq);
mutex_exit(&itrq->itrq_tx_lock);
i40e_free_ring_mem(itrq, B_FALSE);
return (rv);
}
static boolean_t
i40e_shutdown_rings(i40e_t *i40e)
{
boolean_t rv = B_TRUE;
int i;
for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
if (!i40e_shutdown_ring(&i40e->i40e_trqpairs[i]))
rv = B_FALSE;
}
return (rv);
}
static void
i40e_setup_rx_descs(i40e_trqpair_t *itrq)
{
int i;
i40e_rx_data_t *rxd = itrq->itrq_rxdata;
for (i = 0; i < rxd->rxd_ring_size; i++) {
i40e_rx_control_block_t *rcb;
i40e_rx_desc_t *rdesc;
rcb = rxd->rxd_work_list[i];
rdesc = &rxd->rxd_desc_ring[i];
rdesc->read.pkt_addr =
CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
rdesc->read.hdr_addr = 0;
}
}
static boolean_t
i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
{
i40e_rx_data_t *rxd = itrq->itrq_rxdata;
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_hmc_obj_rxq rctx;
int err;
bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
rctx.base = rxd->rxd_desc_area.dmab_dma_address /
I40E_HMC_RX_CTX_UNIT;
rctx.qlen = rxd->rxd_ring_size;
VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
rctx.rxmax = i40e->i40e_frame_max;
rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
rctx.prefena = I40E_HMC_RX_PREFENA;
err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to clear rx queue %d context: %d",
itrq->itrq_index, err);
return (B_FALSE);
}
err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to set rx queue %d context: %d",
itrq->itrq_index, err);
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
i40e_setup_rx_ring(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
i40e_rx_data_t *rxd = itrq->itrq_rxdata;
uint32_t reg;
int i;
i40e_setup_rx_descs(itrq);
if (!i40e_setup_rx_hmc(itrq))
return (B_FALSE);
I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index), 0);
I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index),
rxd->rxd_ring_size - 1);
reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
I40E_QRX_ENA_QENA_STAT_MASK));
reg |= I40E_QRX_ENA_QENA_REQ_MASK;
I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(I40E_RING_WAIT_PAUSE);
}
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
i40e_error(i40e, "failed to enable rx queue %d, timed "
"out.", itrq->itrq_index);
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
struct i40e_hmc_obj_txq tctx;
struct i40e_vsi_context context;
int err;
bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
tctx.base = itrq->itrq_desc_area.dmab_dma_address /
I40E_HMC_TX_CTX_UNIT;
tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
tctx.qlen = itrq->itrq_tx_ring_size;
tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
tctx.crc = 0;
tctx.rdylist_act = 0;
bzero(&context, sizeof (struct i40e_vsi_context));
context.seid = I40E_DEF_VSI_SEID(i40e);
context.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &context, NULL);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "get VSI params failed with %d", err);
return (B_FALSE);
}
tctx.rdylist = LE_16(context.info.qs_handle[0]);
err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to clear tx queue %d context: %d",
itrq->itrq_index, err);
return (B_FALSE);
}
err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to set tx queue %d context: %d",
itrq->itrq_index, err);
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
i40e_setup_tx_ring(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
i40e_hw_t *hw = &i40e->i40e_hw_space;
uint32_t reg;
int i;
i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, true);
if (!i40e_setup_tx_hmc(itrq))
return (B_FALSE);
reg = I40E_QTX_CTL_PF_QUEUE;
reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK;
I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
i40e_flush(hw);
reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK));
reg |= I40E_QTX_ENA_QENA_REQ_MASK;
I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(I40E_RING_WAIT_PAUSE);
}
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
i40e_error(i40e, "failed to enable tx queue %d, timed "
"out", itrq->itrq_index);
return (B_FALSE);
}
return (B_TRUE);
}
int
i40e_setup_ring(i40e_trqpair_t *itrq)
{
i40e_t *i40e = itrq->itrq_i40e;
hrtime_t now, gap;
if (!i40e_alloc_ring_mem(itrq)) {
i40e_error(i40e, "Failed to allocate ring memory");
return (ENOMEM);
}
now = gethrtime();
gap = NSEC2MSEC(now - itrq->irtq_time_stopped);
if (gap < I40E_RING_ENABLE_GAP && gap != 0)
delay(drv_usectohz(gap * 1000));
mutex_enter(&itrq->itrq_intr_lock);
if (!i40e_setup_rx_ring(itrq))
goto failed;
if (!i40e_setup_tx_ring(itrq))
goto failed;
if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
DDI_FM_OK)
goto failed;
itrq->itrq_intr_quiesce = B_FALSE;
mutex_exit(&itrq->itrq_intr_lock);
mutex_enter(&itrq->itrq_tx_lock);
itrq->itrq_tx_quiesce = B_FALSE;
mutex_exit(&itrq->itrq_tx_lock);
return (0);
failed:
mutex_exit(&itrq->itrq_intr_lock);
i40e_free_ring_mem(itrq, B_TRUE);
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
return (EIO);
}
void
i40e_stop(i40e_t *i40e)
{
uint_t i;
i40e_hw_t *hw = &i40e->i40e_hw_space;
ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
i40e_intr_io_disable_all(i40e);
i40e_intr_io_clear_cause(i40e);
if (!i40e_shutdown_rings(i40e))
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
for (i = 1; i < i40e->i40e_num_rx_groups; i++) {
i40e_delete_vsi(i40e, i);
}
if (i40e->i40e_veb_seid != 0) {
int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL);
if (rc != I40E_SUCCESS) {
i40e_error(i40e, "Failed to delete VEB %d: %d", rc,
hw->aq.asq_last_status);
}
i40e->i40e_veb_seid = 0;
}
i40e_intr_chip_fini(i40e);
if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
DDI_FM_OK) {
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
}
for (i = 0; i < i40e->i40e_num_rx_groups; i++) {
i40e_stat_vsi_fini(i40e, i);
}
i40e->i40e_link_speed = 0;
i40e->i40e_link_duplex = 0;
i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
}
boolean_t
i40e_start(i40e_t *i40e)
{
i40e_hw_t *hw = &i40e->i40e_hw_space;
boolean_t rc = B_TRUE;
int err;
ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
if (!i40e_chip_start(i40e)) {
i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
rc = B_FALSE;
goto done;
}
err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), true,
NULL);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to set default VSI: %d", err);
rc = B_FALSE;
goto done;
}
err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, true, 0,
false, NULL);
if (err != I40E_SUCCESS) {
i40e_error(i40e, "failed to set MAC config: %d", err);
rc = B_FALSE;
goto done;
}
if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
DDI_FM_OK) {
rc = B_FALSE;
goto done;
}
atomic_and_32(&i40e->i40e_state,
~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
i40e_intr_io_enable_all(i40e);
done:
if (rc == B_FALSE) {
i40e_stop(i40e);
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
}
return (rc);
}
static boolean_t
i40e_drain_rx(i40e_t *i40e)
{
mutex_enter(&i40e->i40e_rx_pending_lock);
while (i40e->i40e_rx_pending > 0) {
if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
&i40e->i40e_rx_pending_lock,
drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
mutex_exit(&i40e->i40e_rx_pending_lock);
return (B_FALSE);
}
}
mutex_exit(&i40e->i40e_rx_pending_lock);
return (B_TRUE);
}
static int
i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
ddi_ufm_image_t *img)
{
if (imgno != 0)
return (EINVAL);
ddi_ufm_image_set_desc(img, "Firmware");
ddi_ufm_image_set_nslots(img, 1);
return (0);
}
static int
i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
uint_t slotno, ddi_ufm_slot_t *slot)
{
i40e_t *i40e = (i40e_t *)arg;
char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL;
nvlist_t *misc = NULL;
uint_t flags = DDI_PROP_DONTPASS;
int err;
if (imgno != 0 || slotno != 0 ||
ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
"firmware-version", &fw_ver) != DDI_PROP_SUCCESS ||
ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
"firmware-build", &fw_bld) != DDI_PROP_SUCCESS ||
ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
"api-version", &api_ver) != DDI_PROP_SUCCESS) {
err = EINVAL;
goto err;
}
ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
ddi_ufm_slot_set_version(slot, fw_ver);
(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 ||
(err = nvlist_add_string(misc, "api-version", api_ver)) != 0) {
goto err;
}
ddi_ufm_slot_set_misc(slot, misc);
ddi_prop_free(fw_ver);
ddi_prop_free(fw_bld);
ddi_prop_free(api_ver);
return (0);
err:
nvlist_free(misc);
if (fw_ver != NULL)
ddi_prop_free(fw_ver);
if (fw_bld != NULL)
ddi_prop_free(fw_bld);
if (api_ver != NULL)
ddi_prop_free(api_ver);
return (err);
}
static int
i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
{
*caps = DDI_UFM_CAP_REPORT;
return (0);
}
static ddi_ufm_ops_t i40e_ufm_ops = {
NULL,
i40e_ufm_fill_image,
i40e_ufm_fill_slot,
i40e_ufm_getcaps
};
static int
i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
{
i40e_t *i40e;
struct i40e_osdep *osdep;
i40e_hw_t *hw;
int instance;
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
instance = ddi_get_instance(devinfo);
i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
i40e->i40e_instance = instance;
i40e->i40e_dip = devinfo;
hw = &i40e->i40e_hw_space;
osdep = &i40e->i40e_osdep_space;
hw->back = osdep;
osdep->ios_i40e = i40e;
ddi_set_driver_private(devinfo, i40e);
i40e_fm_init(i40e);
i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
i40e_error(i40e, "Failed to map PCI configurations.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
i40e_identify_hardware(i40e);
if (!i40e_regs_map(i40e)) {
i40e_error(i40e, "Failed to map device registers.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
i40e_init_properties(i40e);
i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
if (!i40e_common_code_init(i40e, hw))
goto attach_fail;
i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
if (!i40e_alloc_intrs(i40e, devinfo)) {
i40e_error(i40e, "Failed to allocate interrupts.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
if (!i40e_alloc_trqpairs(i40e)) {
i40e_error(i40e,
"Failed to allocate receive & transmit rings.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
if (!i40e_map_intrs_to_vectors(i40e)) {
i40e_error(i40e, "Failed to map interrupts to vectors.");
goto attach_fail;
}
if (!i40e_add_intr_handlers(i40e)) {
i40e_error(i40e, "Failed to add the interrupt handlers.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
if (!i40e_final_init(i40e)) {
i40e_error(i40e, "Final initialization failed.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
DDI_FM_OK) {
ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
goto attach_fail;
}
if (!i40e_stats_init(i40e)) {
i40e_error(i40e, "Stats initialization failed.");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
if (!i40e_register_mac(i40e)) {
i40e_error(i40e, "Failed to register to MAC/GLDv3");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
I40E_CYCLIC_PERIOD, DDI_IPL_0);
if (i40e->i40e_periodic_id == 0) {
i40e_error(i40e, "Failed to add the link-check timer");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
if (!i40e_enable_interrupts(i40e)) {
i40e_error(i40e, "Failed to enable DDI interrupts");
goto attach_fail;
}
i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
if (i40e->i40e_hw_space.bus.func == 0) {
if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION,
&i40e_ufm_ops, &i40e->i40e_ufmh, i40e) != 0) {
i40e_error(i40e, "failed to initialize UFM subsystem");
goto attach_fail;
}
ddi_ufm_update(i40e->i40e_ufmh);
i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT;
}
atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
mutex_enter(&i40e_glock);
list_insert_tail(&i40e_glist, i40e);
mutex_exit(&i40e_glock);
return (DDI_SUCCESS);
attach_fail:
i40e_unconfigure(devinfo, i40e);
return (DDI_FAILURE);
}
static int
i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
{
i40e_t *i40e;
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
i40e = (i40e_t *)ddi_get_driver_private(devinfo);
if (i40e == NULL) {
i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
return (DDI_FAILURE);
}
if (i40e_drain_rx(i40e) == B_FALSE) {
i40e_log(i40e, "timed out draining DMA resources, %d buffers "
"remain", i40e->i40e_rx_pending);
return (DDI_FAILURE);
}
mutex_enter(&i40e_glock);
list_remove(&i40e_glist, i40e);
mutex_exit(&i40e_glock);
i40e_unconfigure(devinfo, i40e);
return (DDI_SUCCESS);
}
static struct cb_ops i40e_cb_ops = {
nulldev,
nulldev,
nodev,
nodev,
nodev,
nodev,
nodev,
nodev,
nodev,
nodev,
nodev,
nochpoll,
ddi_prop_op,
NULL,
D_MP | D_HOTPLUG,
CB_REV,
nodev,
nodev
};
static struct dev_ops i40e_dev_ops = {
DEVO_REV,
0,
NULL,
nulldev,
nulldev,
i40e_attach,
i40e_detach,
nodev,
&i40e_cb_ops,
NULL,
nulldev,
ddi_quiesce_not_supported
};
static struct modldrv i40e_modldrv = {
&mod_driverops,
i40e_ident,
&i40e_dev_ops
};
static struct modlinkage i40e_modlinkage = {
MODREV_1,
&i40e_modldrv,
NULL
};
int
_init(void)
{
int status;
list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
list_create(&i40e_dlist, sizeof (i40e_device_t),
offsetof(i40e_device_t, id_link));
mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
status = mod_install(&i40e_modlinkage);
if (status != DDI_SUCCESS) {
mac_fini_ops(&i40e_dev_ops);
mutex_destroy(&i40e_glock);
list_destroy(&i40e_dlist);
list_destroy(&i40e_glist);
}
return (status);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&i40e_modlinkage, modinfop));
}
int
_fini(void)
{
int status;
status = mod_remove(&i40e_modlinkage);
if (status == DDI_SUCCESS) {
mac_fini_ops(&i40e_dev_ops);
mutex_destroy(&i40e_glock);
list_destroy(&i40e_dlist);
list_destroy(&i40e_glist);
}
return (status);
}