#include <sys/nxge/nxge_impl.h>
#include <sys/nxge/nxge_fzc.h>
#include <sys/nxge/nxge_rxdma.h>
#include <sys/nxge/nxge_txdma.h>
#include <sys/nxge/nxge_hio.h>
static ddi_device_acc_attr_t nxge_guest_register_access_attributes = {
DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC,
};
int
nxge_guest_regs_map(nxge_t *nxge)
{
dev_regs_t *regs;
off_t regsize;
int rv;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map"));
nxge->niu_type = N2_NIU;
nxge->function_num = nxge->instance;
nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
regs = nxge->dev_regs;
if ((rv = ddi_dev_regsize(nxge->dip, 0, ®size)) != DDI_SUCCESS) {
NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed"));
return (NXGE_ERROR);
}
rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)®s->nxge_regp, 0, 0,
&nxge_guest_register_access_attributes, ®s->nxge_regh);
if (rv != DDI_SUCCESS) {
NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed"));
return (NXGE_ERROR);
}
nxge->npi_handle.regh = regs->nxge_regh;
nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
nxge->npi_handle.is_vraddr = B_TRUE;
nxge->npi_handle.function.instance = nxge->instance;
nxge->npi_handle.function.function = nxge->function_num;
nxge->npi_handle.nxgep = (void *)nxge;
nxge->npi_reg_handle.regh = regs->nxge_regh;
nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
nxge->npi_reg_handle.is_vraddr = B_TRUE;
nxge->npi_reg_handle.function.instance = nxge->instance;
nxge->npi_reg_handle.function.function = nxge->function_num;
nxge->npi_reg_handle.nxgep = (void *)nxge;
nxge->npi_vreg_handle.regh = regs->nxge_regh;
nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp;
nxge->npi_vreg_handle.is_vraddr = B_TRUE;
nxge->npi_vreg_handle.function.instance = nxge->instance;
nxge->npi_vreg_handle.function.function = nxge->function_num;
nxge->npi_vreg_handle.nxgep = (void *)nxge;
regs->nxge_vir_regp = regs->nxge_regp;
regs->nxge_vir_regh = regs->nxge_regh;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map"));
return (NXGE_OK);
}
void
nxge_guest_regs_map_free(
nxge_t *nxge)
{
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free"));
if (nxge->dev_regs) {
if (nxge->dev_regs->nxge_regh) {
NXGE_DEBUG_MSG((nxge, DDI_CTL,
"==> nxge_unmap_regs: device registers"));
ddi_regs_map_free(&nxge->dev_regs->nxge_regh);
nxge->dev_regs->nxge_regh = NULL;
}
kmem_free(nxge->dev_regs, sizeof (dev_regs_t));
nxge->dev_regs = 0;
}
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free"));
}
#if defined(sun4v)
static nxge_hio_dc_t *nxge_guest_dc_alloc(
nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t);
static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t);
static void nxge_check_guest_state(nxge_hio_vr_t *);
int
nxge_hio_vr_add(nxge_t *nxge)
{
extern nxge_status_t nxge_mac_register(p_nxge_t);
nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
nxge_hio_vr_t *vr;
nxge_hio_dc_t *dc;
int *reg_val;
uint_t reg_len;
uint8_t vr_index;
nxhv_vr_fp_t *fp;
uint64_t vr_address, vr_size;
uint32_t cookie;
nxhv_dc_fp_t *tx, *rx;
uint64_t tx_map, rx_map;
uint64_t hv_rv;
int i;
nxge_status_t status;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add"));
if (nhd->type == NXGE_HIO_TYPE_SERVICE) {
ASSERT(nhd->type == NXGE_HIO_TYPE_GUEST);
return (DDI_FAILURE);
}
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip,
0, "reg", ®_val, ®_len) != DDI_PROP_SUCCESS) {
NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found"));
return (DDI_FAILURE);
}
cookie = (uint32_t)(reg_val[0]);
ddi_prop_free(reg_val);
fp = &nhd->hio.vr;
hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"vr->getinfo() failed"));
return (DDI_FAILURE);
}
MUTEX_ENTER(&nhd->lock);
for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
if (nhd->vr[vr_index].nxge == 0) {
nhd->vr[vr_index].nxge = (uintptr_t)nxge;
break;
}
}
MUTEX_EXIT(&nhd->lock);
if (vr_index == FUNC_VIR_MAX) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add "
"no VRs available"));
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"nxge_hio_vr_add(%d): cookie(0x%x)\n",
nxge->instance, cookie));
return (DDI_FAILURE);
}
vr = &nhd->vr[vr_index];
vr->nxge = (uintptr_t)nxge;
vr->cookie = (uint32_t)cookie;
vr->address = vr_address;
vr->size = vr_size;
vr->region = vr_index;
if (nxge->tx_set.lg.count == 0)
(void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP);
if (nxge->rx_set.lg.count == 0)
(void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP);
if (nxge_hio_intr_init(nxge) != NXGE_OK) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"nxge_hio_intr_init() failed"));
return (DDI_FAILURE);
}
tx = &nhd->hio.tx;
if (tx->get_map) {
hv_rv = (*tx->get_map)(vr->cookie, &tx_map);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"tx->get_map() failed"));
return (DDI_FAILURE);
}
res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map);
for (i = 0; i < VP_CHANNEL_MAX; i++) {
if ((1 << i) & tx_map) {
dc = nxge_guest_dc_alloc(nxge, vr,
NXGE_TRANSMIT_GROUP);
if (dc == 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"DC add failed"));
return (DDI_FAILURE);
}
dc->channel = (nxge_channel_t)i;
}
}
}
rx = &nhd->hio.rx;
if (rx->get_map) {
hv_rv = (*rx->get_map)(vr->cookie, &rx_map);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"rx->get_map() failed"));
return (DDI_FAILURE);
}
res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map);
for (i = 0; i < VP_CHANNEL_MAX; i++) {
if ((1 << i) & rx_map) {
dc = nxge_guest_dc_alloc(nxge, vr,
NXGE_RECEIVE_GROUP);
if (dc == 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"DC add failed"));
return (DDI_FAILURE);
}
dc->channel = (nxge_channel_t)i;
}
}
}
status = nxge_mac_register(nxge);
if (status != NXGE_OK) {
cmn_err(CE_WARN, "nxge(%d): nxge_mac_register failed\n",
nxge->instance);
return (DDI_FAILURE);
}
nxge->hio_vr = vr;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add"));
return (DDI_SUCCESS);
}
nxge_hio_dc_t *
nxge_guest_dc_alloc(
nxge_t *nxge,
nxge_hio_vr_t *vr,
nxge_grp_type_t type)
{
nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
nxge_hio_dc_t *dc;
int limit, i;
if (type == NXGE_TRANSMIT_GROUP) {
dc = &nhd->tdc[0];
limit = NXGE_MAX_TDCS;
} else {
dc = &nhd->rdc[0];
limit = NXGE_MAX_RDCS;
}
MUTEX_ENTER(&nhd->lock);
for (i = 0; i < limit; i++, dc++) {
if (dc->vr == 0) {
dc->vr = vr;
dc->cookie = vr->cookie;
MUTEX_EXIT(&nhd->lock);
return (dc);
}
}
MUTEX_EXIT(&nhd->lock);
return (0);
}
int
nxge_hio_get_dc_htable_idx(nxge_t *nxge, vpc_type_t type, uint32_t channel)
{
nxge_hio_dc_t *dc;
ASSERT(isLDOMguest(nxge));
dc = nxge_grp_dc_find(nxge, type, channel);
if (dc == NULL)
return (-1);
return (dc->ldg.vector);
}
void
res_map_parse(
nxge_t *nxge,
nxge_grp_type_t type,
uint64_t res_map)
{
uint8_t slots, mask, slot;
int first, count;
nxge_hw_pt_cfg_t *hardware;
nxge_grp_t *group;
slots = (uint8_t)(res_map & 0xff);
for (slot = 0, count = 0, mask = 1; slot < 8; slot++) {
if (slots & mask)
count++;
if (count == 1)
first = slot;
mask <<= 1;
}
hardware = &nxge->pt_config.hw_config;
group = (type == NXGE_TRANSMIT_GROUP) ?
nxge->tx_set.group[0] : nxge->rx_set.group[0];
if (type == NXGE_TRANSMIT_GROUP) {
nxge_dma_pt_cfg_t *port = &nxge->pt_config;
nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0];
hardware->tdc.start = first;
hardware->tdc.count = count;
hardware->tdc.owned = count;
tdc_grp->start_tdc = first;
tdc_grp->max_tdcs = (uint8_t)count;
tdc_grp->grp_index = group->index;
tdc_grp->map = slots;
group->map = slots;
port->tx_dma_map = slots;
nxge->tx_set.owned.map |= slots;
} else {
nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0];
hardware->start_rdc = first;
hardware->max_rdcs = count;
rdc_grp->start_rdc = (uint8_t)first;
rdc_grp->max_rdcs = (uint8_t)count;
rdc_grp->def_rdc = (uint8_t)first;
rdc_grp->map = slots;
group->map = slots;
nxge->rx_set.owned.map |= slots;
}
}
int
nxge_hio_vr_release(nxge_t *nxge)
{
nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
int vr_index;
NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release"));
if (nxge->hio_vr == NULL) {
return (NXGE_OK);
}
nxge_hio_intr_uninit(nxge);
nxge_uninit_rxdma_channels(nxge);
nxge_uninit_txdma_channels(nxge);
if (nxge->rx_set.group[0] != NULL)
nxge_grp_remove(nxge, nxge->rx_set.group[0]);
if (nxge->tx_set.group[0] != NULL)
nxge_grp_remove(nxge, nxge->tx_set.group[0]);
NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release"));
MUTEX_ENTER(&nhd->lock);
for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) {
if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) {
nhd->vr[vr_index].nxge = (uintptr_t)NULL;
break;
}
}
MUTEX_EXIT(&nhd->lock);
return (NXGE_OK);
}
#if defined(NIU_LP_WORKAROUND)
nxge_status_t
nxge_tdc_lp_conf(
p_nxge_t nxge,
int channel)
{
nxge_hio_dc_t *dc;
nxge_dma_common_t *data;
nxge_dma_common_t *control;
tx_ring_t *ring;
uint64_t hv_rv;
uint64_t ra, size;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf"));
ring = nxge->tx_rings->rings[channel];
if (ring->hv_set) {
return (NXGE_OK);
}
if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel)))
return (NXGE_ERROR);
data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel];
ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength;
hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
(uint64_t)channel, 0,
ring->hv_tx_buf_base_ioaddr_pp,
ring->hv_tx_buf_ioaddr_size);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"<== nxge_tdc_lp_conf: channel %d "
"(page 0 data buf) hv: %d "
"ioaddr_pp $%p size 0x%llx ",
channel, hv_rv,
ring->hv_tx_buf_base_ioaddr_pp,
ring->hv_tx_buf_ioaddr_size));
return (NXGE_ERROR | hv_rv);
}
ra = size = 0;
hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
(uint64_t)channel, 0, &ra, &size);
NXGE_DEBUG_MSG((nxge, HIO_CTL,
"==> nxge_tdc_lp_conf: channel %d "
"(page 0 data buf) hv_rv 0x%llx "
"set ioaddr_pp $%p set size 0x%llx "
"get ra ioaddr_pp $%p get size 0x%llx ",
channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp,
ring->hv_tx_buf_ioaddr_size, ra, size));
control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel];
ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie,
(uint64_t)channel, (uint64_t)1,
ring->hv_tx_cntl_base_ioaddr_pp,
ring->hv_tx_cntl_ioaddr_size);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"<== nxge_tdc_lp_conf: channel %d "
"(page 1 cntl buf) hv_rv 0x%llx "
"ioaddr_pp $%p size 0x%llx ",
channel, hv_rv,
ring->hv_tx_cntl_base_ioaddr_pp,
ring->hv_tx_cntl_ioaddr_size));
return (NXGE_ERROR | hv_rv);
}
ra = size = 0;
hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie,
(uint64_t)channel, (uint64_t)1, &ra, &size);
NXGE_DEBUG_MSG((nxge, HIO_CTL,
"==> nxge_tdc_lp_conf: channel %d "
"(page 1 cntl buf) hv_rv 0x%llx "
"set ioaddr_pp $%p set size 0x%llx "
"get ra ioaddr_pp $%p get size 0x%llx ",
channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp,
ring->hv_tx_cntl_ioaddr_size, ra, size));
ring->hv_set = B_TRUE;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf"));
return (NXGE_OK);
}
nxge_status_t
nxge_rdc_lp_conf(
p_nxge_t nxge,
int channel)
{
nxge_hio_dc_t *dc;
nxge_dma_common_t *data;
nxge_dma_common_t *control;
rx_rbr_ring_t *ring;
uint64_t hv_rv;
uint64_t ra, size;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf"));
ring = nxge->rx_rbr_rings->rbr_rings[channel];
if (ring->hv_set) {
return (NXGE_OK);
}
if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel)))
return (NXGE_ERROR);
data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel];
ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp;
ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength;
hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
(uint64_t)channel, 0,
ring->hv_rx_buf_base_ioaddr_pp,
ring->hv_rx_buf_ioaddr_size);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"<== nxge_rdc_lp_conf: channel %d "
"(page 0 data buf) hv_rv 0x%llx "
"ioaddr_pp $%p size 0x%llx ",
channel, hv_rv,
ring->hv_rx_buf_base_ioaddr_pp,
ring->hv_rx_buf_ioaddr_size));
return (NXGE_ERROR | hv_rv);
}
ra = size = 0;
hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
(uint64_t)channel, 0, &ra, &size);
NXGE_DEBUG_MSG((nxge, HIO_CTL,
"==> nxge_rdc_lp_conf: channel %d "
"(page 0 data buf) hv_rv 0x%llx "
"set ioaddr_pp $%p set size 0x%llx "
"get ra ioaddr_pp $%p get size 0x%llx ",
channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp,
ring->hv_rx_buf_ioaddr_size, ra, size));
control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel];
ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp;
ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength;
hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie,
(uint64_t)channel, (uint64_t)1,
ring->hv_rx_cntl_base_ioaddr_pp,
ring->hv_rx_cntl_ioaddr_size);
if (hv_rv != 0) {
NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
"<== nxge_rdc_lp_conf: channel %d "
"(page 1 cntl buf) hv_rv 0x%llx "
"ioaddr_pp $%p size 0x%llx ",
channel, hv_rv,
ring->hv_rx_cntl_base_ioaddr_pp,
ring->hv_rx_cntl_ioaddr_size));
return (NXGE_ERROR | hv_rv);
}
ra = size = 0;
hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie,
(uint64_t)channel, (uint64_t)1, &ra, &size);
NXGE_DEBUG_MSG((nxge, HIO_CTL,
"==> nxge_rdc_lp_conf: channel %d "
"(page 1 cntl buf) hv_rv 0x%llx "
"set ioaddr_pp $%p set size 0x%llx "
"get ra ioaddr_pp $%p get size 0x%llx ",
channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp,
ring->hv_rx_cntl_ioaddr_size, ra, size));
ring->hv_set = B_TRUE;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf"));
return (NXGE_OK);
}
#endif
#define NXGE_GUEST_TIMER 500
void
nxge_hio_start_timer(
nxge_t *nxge)
{
nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
nxge_hio_vr_t *vr;
int region;
NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start"));
MUTEX_ENTER(&nhd->lock);
for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) {
if (nhd->vr[region].nxge == (uintptr_t)nxge)
break;
}
MUTEX_EXIT(&nhd->lock);
if (region == NXGE_VR_SR_MAX) {
return;
}
vr = (nxge_hio_vr_t *)&nhd->vr[region];
nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state,
(void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER));
NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start"));
}
void
nxge_check_guest_state(
nxge_hio_vr_t *vr)
{
nxge_t *nxge = (nxge_t *)vr->nxge;
NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state"));
MUTEX_ENTER(nxge->genlock);
nxge->nxge_timerid = 0;
if (nxge->nxge_mac_state == NXGE_MAC_STARTED) {
nxge_check_tx_hang(nxge);
nxge->nxge_timerid = timeout((void(*)(void *))
nxge_check_guest_state, (caddr_t)vr,
drv_usectohz(1000 * NXGE_GUEST_TIMER));
}
nxge_check_guest_state_exit:
MUTEX_EXIT(nxge->genlock);
NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state"));
}
nxge_status_t
nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm)
{
nxge_grp_t *group;
uint32_t channel;
nxge_hio_dc_t *dc;
nxge_ldg_t *ldgp;
if (!isLDOMguest(nxge))
return (NXGE_ERROR);
if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
return (NXGE_ERROR);
group = nxge->rx_set.group[0];
for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
if ((1 << channel) & group->map) {
dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel);
if (dc == NULL)
return (NXGE_ERROR);
ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector];
if (ldgp == NULL)
return (NXGE_ERROR);
ldgp->arm = arm;
nxge_hio_ldgimgn(nxge, ldgp);
}
}
return (NXGE_OK);
}
nxge_status_t
nxge_hio_rdc_enable(p_nxge_t nxge)
{
nxge_grp_t *group;
npi_handle_t handle;
uint32_t channel;
npi_status_t rval;
if (!isLDOMguest(nxge))
return (NXGE_ERROR);
if (nxge->nxge_mac_state != NXGE_MAC_STARTED)
return (NXGE_ERROR);
group = nxge->rx_set.group[0];
handle = NXGE_DEV_NPI_HANDLE(nxge);
for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
if ((1 << channel) & group->map) {
rval = npi_rxdma_cfg_rdc_enable(handle, channel);
if (rval != NPI_SUCCESS)
return (NXGE_ERROR);
(void) npi_rxdma_channel_rbr_empty_clear(handle,
channel);
}
}
return (NXGE_OK);
}
#endif