rx_pool
struct rx_pool *rpp;
rpp = &vc->rcv.rx_pool;
flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp)
recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp)
flush_rx_pool(card, &vc->rcv.rx_pool);
if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
flush_rx_pool(card, &vc->rcv.rx_pool);
if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
static void flush_rx_pool(struct idt77252_dev *, struct rx_pool *);
struct rx_pool *);
struct rx_pool rx_pool;
kfree(rx_pool->free_map);
free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
rx_pool->rx_buff[j].skb = NULL;
kfree(rx_pool->rx_buff);
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
if (!adapter->rx_pool)
struct ibmvnic_rx_pool *rx_pool;
adapter->rx_pool = kzalloc_objs(struct ibmvnic_rx_pool, num_pools);
if (!adapter->rx_pool) {
rx_pool = &adapter->rx_pool[i];
rx_pool->size = pool_size;
rx_pool->index = i;
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rx_pool->free_map = kzalloc_objs(int, rx_pool->size);
if (!rx_pool->free_map) {
rx_pool->rx_buff = kzalloc_objs(struct ibmvnic_rx_buff,
rx_pool->size);
if (!rx_pool->rx_buff) {
rx_pool = &adapter->rx_pool[i];
i, rx_pool->size, rx_pool->buff_size);
rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
rx_pool->size, rx_pool->buff_size);
for (j = 0; j < rx_pool->size; ++j) {
rx_pool->free_map[j] = j;
rx_buff = &rx_pool->rx_buff[j];
atomic_set(&rx_pool->available, 0);
rx_pool->next_alloc = 0;
rx_pool->next_free = 0;
rx_pool->active = 1;
struct ibmvnic_rx_pool *rx_pool;
if (!adapter->rx_pool)
rx_pool = &adapter->rx_pool[i];
if (!rx_pool || !rx_pool->rx_buff)
rx_buff = &rx_pool->rx_buff[j];
!adapter->rx_pool ||
struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
(atomic_read(&adapter->rx_pool[scrq_num].available) <
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
adapter->rx_pool[i].active = 0;
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
struct ibmvnic_rx_pool *rx_pool;
if (!adapter->rx_pool)
rx_pool = &adapter->rx_pool[i];
struct ibmvnic_rx_pool *rx_pool;
struct page_pool *rx_pool[NUM_RX_QUEUE];
page_pool_put_page(priv->rx_pool[q],
page_pool_put_page(priv->rx_pool[q],
page_pool_destroy(priv->rx_pool[q]);
rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
priv->rx_pool[q] = page_pool_create(¶ms);
if (IS_ERR(priv->rx_pool[q]))
page_pool_put_page(priv->rx_pool[q],
page_pool_put_page(priv->rx_pool[q],
void *rx_pool;
if (!IS_ERR_OR_NULL(netcp->rx_pool))
netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
if (IS_ERR_OR_NULL(netcp->rx_pool)) {
ret = PTR_ERR(netcp->rx_pool);
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
knav_pool_desc_put(netcp->rx_pool, desc);
knav_pool_desc_put(netcp->rx_pool, desc);
desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
knav_pool_desc_put(netcp->rx_pool, ndesc);
knav_pool_desc_put(netcp->rx_pool, desc);
knav_pool_desc_put(netcp->rx_pool, desc);
desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
knav_pool_desc_put(netcp->rx_pool, desc);
knav_pool_desc_put(netcp->rx_pool, desc);
knav_pool_desc_put(netcp->rx_pool, desc);
if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
knav_pool_destroy(netcp->rx_pool);
netcp->rx_pool = NULL;
hwdesc = knav_pool_desc_get(netcp->rx_pool);
knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
knav_pool_desc_put(netcp->rx_pool, hwdesc);
struct usb_anchor rx_pool;
init_usb_anchor(&ar->rx_pool);
urb = usb_get_from_anchor(&ar->rx_pool);
usb_anchor_urb(urb, &ar->rx_pool);
usb_anchor_urb(urb, &ar->rx_pool);
usb_anchor_urb(urb, &ar->rx_pool);
usb_anchor_urb(urb, &ar->rx_pool);
usb_scuttle_anchored_urbs(&ar->rx_pool);
usb_scuttle_anchored_urbs(&ar->rx_pool);
struct iwl_rx_mem_buffer *rx_pool;
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
kfree(trans_pcie->rx_pool);
if (!trans_pcie->rx_pool)
if (!trans_pcie->rx_pool[i].page)
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_pool[i].page = NULL;
trans_pcie->rx_pool = kzalloc_objs(trans_pcie->rx_pool[0],
if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
kfree(trans_pcie->rx_pool);
trans_pcie->rx_pool = NULL;
u32 rx_pool;
DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
DEBUGFS_FWSTATS_DEL(event, rx_pool);
DEBUGFS_FWSTATS_ADD(event, rx_pool);
__le32 rx_pool;
DEBUGFS_FWSTATS_ADD(event, rx_pool);
WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
INIT_LIST_HEAD(&hw->rx_pool);
list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
struct list_head rx_pool;
if (!list_empty(&hw->rx_pool)) {
packet = list_first_entry(&hw->rx_pool,
list_add(&packet->queue, &hw->rx_pool);