page_to_netmem
mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
__skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
__skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
return page_to_netmem(virt_to_page(data));
return page_to_netmem(compound_head(netmem_to_page(netmem)));
page_pool_fragment_netmem(page_to_netmem(page), nr);
return page_pool_unref_netmem(page_to_netmem(page), nr);
page_pool_ref_netmem(page_to_netmem(page));
page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
return page_pool_get_dma_addr_netmem(page_to_netmem(page));
__page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr));
if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) {
page_pool_set_pp_info(pool, page_to_netmem(page));
trace_page_pool_state_hold(pool, page_to_netmem(page),
return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size,
return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
__skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
__skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
__skb_fill_netmem_desc(to, 0, page_to_netmem(page),
skb_page_unref(page_to_netmem(sg_page(sg)),
skb_page_unref(page_to_netmem(sg_page(sg)),