Symbol: pools
arch/powerpc/include/asm/iommu.h
110
struct iommu_pool pools[IOMMU_NR_POOLS];
arch/powerpc/kernel/iommu.c
254
pool = &(tbl->pools[pool_nr]);
arch/powerpc/kernel/iommu.c
282
pool = &(tbl->pools[0]);
arch/powerpc/kernel/iommu.c
304
pool = &tbl->pools[pool_nr];
arch/powerpc/kernel/iommu.c
431
p = &tbl->pools[pool_nr];
arch/powerpc/kernel/iommu.c
757
p = &tbl->pools[i];
arch/sparc/include/asm/iommu-common.h
26
struct iommu_pool pools[IOMMU_NR_POOLS];
arch/sparc/kernel/iommu-common.c
131
pool = &(iommu->pools[pool_nr]);
arch/sparc/kernel/iommu-common.c
161
pool = &(iommu->pools[0]);
arch/sparc/kernel/iommu-common.c
193
pool = &(iommu->pools[pool_nr]);
arch/sparc/kernel/iommu-common.c
237
p = &tbl->pools[pool_nr];
arch/sparc/kernel/iommu-common.c
82
spin_lock_init(&(iommu->pools[i].lock));
arch/sparc/kernel/iommu-common.c
83
iommu->pools[i].start = start;
arch/sparc/kernel/iommu-common.c
84
iommu->pools[i].hint = start;
arch/sparc/kernel/iommu-common.c
86
iommu->pools[i].end = start - 1;
arch/sparc/kernel/pci_sun4v.c
727
pool = &(iommu->pools[pool_nr]);
drivers/md/dm-table.c
1042
struct dm_md_mempools *pools;
drivers/md/dm-table.c
1050
pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
drivers/md/dm-table.c
1051
if (!pools)
drivers/md/dm-table.c
1075
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
drivers/md/dm-table.c
1078
if (bioset_init(&pools->bs, pool_size, front_pad, 0))
drivers/md/dm-table.c
1081
t->mempools = pools;
drivers/md/dm-table.c
1085
dm_free_md_mempools(pools);
drivers/md/dm-thin.c
526
struct list_head pools;
drivers/md/dm-thin.c
532
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
drivers/md/dm-thin.c
543
list_add(&pool->list, &dm_thin_pool_table.pools);
drivers/md/dm-thin.c
558
list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
drivers/md/dm-thin.c
574
list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
drivers/md/dm.c
3397
void dm_free_md_mempools(struct dm_md_mempools *pools)
drivers/md/dm.c
3399
if (!pools)
drivers/md/dm.c
3402
bioset_exit(&pools->bs);
drivers/md/dm.c
3403
bioset_exit(&pools->io_bs);
drivers/md/dm.c
3405
kfree(pools);
drivers/md/dm.h
227
void dm_free_md_mempools(struct dm_md_mempools *pools);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
348
struct cxgbi_ppm_pool __percpu *pools;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
350
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
367
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
368
pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
370
if (!pools)
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
374
struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
384
return pools;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4384
pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4385
pools_params.pools[0].backup_pool = 0;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4386
pools_params.pools[0].buffer_size = priv->rx_buf_size;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2734
dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2735
dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2736
dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
162
pools_params->pools[curr_bp].priority_mask |= (1 << j);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
163
if (!pools_params->pools[curr_bp].priority_mask)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
166
pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
167
pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
168
pools_params->pools[curr_bp++].backup_pool = 0;
drivers/net/ethernet/freescale/dpaa2/dpni.c
179
cpu_to_le16(cfg->pools[i].dpbp_id);
drivers/net/ethernet/freescale/dpaa2/dpni.c
181
cfg->pools[i].priority_mask;
drivers/net/ethernet/freescale/dpaa2/dpni.c
183
cpu_to_le16(cfg->pools[i].buffer_size);
drivers/net/ethernet/freescale/dpaa2/dpni.c
185
DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
drivers/net/ethernet/freescale/dpaa2/dpni.h
119
} pools[DPNI_MAX_DPBP];
drivers/net/ethernet/freescale/dpaa2/dpsw.c
1158
cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
drivers/net/ethernet/freescale/dpaa2/dpsw.c
1160
cpu_to_le16(cfg->pools[i].buffer_size);
drivers/net/ethernet/freescale/dpaa2/dpsw.c
1162
DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
drivers/net/ethernet/freescale/dpaa2/dpsw.h
210
} pools[DPSW_MAX_DPBP];
drivers/net/ethernet/mellanox/mlx5/core/en.h
879
struct xsk_buff_pool **pools;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
25
if (!xsk->pools) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
26
xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
27
if (unlikely(!xsk->pools))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
40
kfree(xsk->pools);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
41
xsk->pools = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
53
xsk->pools[ix] = pool;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
59
xsk->pools[ix] = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
12
if (!xsk || !xsk->pools)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
18
return xsk->pools[ix];
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
201
arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
226
dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
245
pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
246
if (!pool_mgr->pools[i])
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
254
dr_arg_pool_destroy(pool_mgr->pools[i]);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
262
struct dr_arg_pool **pools;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
268
pools = mgr->pools;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
270
dr_arg_pool_destroy(pools[i]);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
28
struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1000
wx->mac_table[i].pools = 0;
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1016
if (wx->mac_table[i].pools != (1ULL << pool)) {
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1018
wx->mac_table[i].pools |= (1ULL << pool);
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1030
wx->mac_table[i].pools |= (1ULL << pool);
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1050
wx->mac_table[i].pools &= ~(1ULL << pool);
drivers/net/ethernet/wangxun/libwx/wx_hw.c
1051
if (!wx->mac_table[i].pools) {
drivers/net/ethernet/wangxun/libwx/wx_hw.c
756
static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
drivers/net/ethernet/wangxun/libwx/wx_hw.c
772
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
drivers/net/ethernet/wangxun/libwx/wx_hw.c
775
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
drivers/net/ethernet/wangxun/libwx/wx_hw.c
950
wx->mac_table[i].pools,
drivers/net/ethernet/wangxun/libwx/wx_hw.c
968
wx->mac_table[i].pools,
drivers/net/ethernet/wangxun/libwx/wx_hw.c
981
wx->mac_table[0].pools = BIT(VMDQ_P(0));
drivers/net/ethernet/wangxun/libwx/wx_hw.c
984
wx->mac_table[0].pools,
drivers/net/ethernet/wangxun/libwx/wx_type.h
926
u64 pools;
drivers/nvme/host/pci.c
592
struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node];
drivers/nvme/host/pci.c
595
if (pools->small)
drivers/nvme/host/pci.c
596
return pools; /* already initialized */
drivers/nvme/host/pci.c
598
pools->large = dma_pool_create_node("nvme descriptor page", dev->dev,
drivers/nvme/host/pci.c
600
if (!pools->large)
drivers/nvme/host/pci.c
606
pools->small = dma_pool_create_node("nvme descriptor small", dev->dev,
drivers/nvme/host/pci.c
608
if (!pools->small) {
drivers/nvme/host/pci.c
609
dma_pool_destroy(pools->large);
drivers/nvme/host/pci.c
610
pools->large = NULL;
drivers/nvme/host/pci.c
614
return pools;
drivers/nvme/host/pci.c
622
struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i];
drivers/nvme/host/pci.c
624
dma_pool_destroy(pools->large);
drivers/nvme/host/pci.c
625
dma_pool_destroy(pools->small);
drivers/nvme/host/pci.c
634
struct nvme_descriptor_pools *pools;
drivers/nvme/host/pci.c
639
pools = nvme_setup_descriptor_pools(dev, hctx->numa_node);
drivers/nvme/host/pci.c
640
if (IS_ERR(pools))
drivers/nvme/host/pci.c
641
return PTR_ERR(pools);
drivers/nvme/host/pci.c
643
nvmeq->descriptor_pools = *pools;
drivers/soc/fsl/qbman/qman.c
1763
void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
drivers/soc/fsl/qbman/qman.c
1768
pools &= p->config->pools;
drivers/soc/fsl/qbman/qman.c
1769
p->sdqcr |= pools;
drivers/soc/fsl/qbman/qman_portal.c
249
pcfg->pools = qm_get_pools_sdqcr();
drivers/soc/fsl/qbman/qman_priv.h
177
u32 pools;
drivers/soc/ti/knav_qmss.h
203
struct list_head pools;
drivers/soc/ti/knav_qmss.h
304
struct list_head pools;
drivers/soc/ti/knav_qmss.h
363
list_for_each_entry(pool, &kdev->pools, list)
drivers/soc/ti/knav_qmss_queue.c
1034
list_add(&pool->region_inst, &region->pools);
drivers/soc/ti/knav_qmss_queue.c
1115
INIT_LIST_HEAD(&region->pools);
drivers/soc/ti/knav_qmss_queue.c
1357
list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
drivers/soc/ti/knav_qmss_queue.c
1797
INIT_LIST_HEAD(&kdev->pools);
drivers/soc/ti/knav_qmss_queue.c
817
node = &region->pools;
drivers/soc/ti/knav_qmss_queue.c
818
list_for_each_entry(iter, &region->pools, region_inst) {
drivers/soc/ti/knav_qmss_queue.c
832
list_add_tail(&pool->list, &kdev->pools);
include/linux/swiotlb.h
118
struct list_head pools;
kernel/cgroup/dmem.c
198
list_for_each_entry_rcu(pool, &dmemcs->pools, css_node)
kernel/cgroup/dmem.c
209
list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) {
kernel/cgroup/dmem.c
229
INIT_LIST_HEAD(&dmemcs->pools);
kernel/cgroup/dmem.c
238
list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock))
kernel/cgroup/dmem.c
270
list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
kernel/cgroup/dmem.c
386
list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
kernel/cgroup/dmem.c
387
list_add_tail(&pool->region_node, &region->pools);
kernel/cgroup/dmem.c
41
struct list_head pools;
kernel/cgroup/dmem.c
449
list_for_each_entry_safe(pool, next, &region->pools, region_node)
kernel/cgroup/dmem.c
480
list_for_each_entry_safe(pool, next, &region->pools, region_node) {
kernel/cgroup/dmem.c
530
INIT_LIST_HEAD(&ret->pools);
kernel/cgroup/dmem.c
59
struct list_head pools;
kernel/dma/swiotlb.c
1166
list_for_each_entry_rcu(pool, &mem->pools, node) {
kernel/dma/swiotlb.c
1347
list_for_each_entry_rcu(pool, &mem->pools, node)
kernel/dma/swiotlb.c
1855
INIT_LIST_HEAD_RCU(&mem->pools);
kernel/dma/swiotlb.c
309
list_add_rcu(&pool->node, &mem->pools);
kernel/dma/swiotlb.c
782
list_for_each_entry_rcu(pool, &mem->pools, node) {
kernel/dma/swiotlb.c
90
.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
kernel/workqueue.c
5578
struct worker_pool __percpu *pools;
kernel/workqueue.c
5581
pools = bh_worker_pools;
kernel/workqueue.c
5583
pools = cpu_worker_pools;
kernel/workqueue.c
5589
pool = &(per_cpu_ptr(pools, cpu)[highpri]);
mm/dmapool.c
270
INIT_LIST_HEAD(&retval->pools);
mm/dmapool.c
283
list_add(&retval->pools, &dev->dma_pools);
mm/dmapool.c
291
list_del(&retval->pools);
mm/dmapool.c
373
list_del(&pool->pools);
mm/dmapool.c
61
struct list_head pools;
mm/dmapool.c
81
list_for_each_entry(pool, &dev->dma_pools, pools) {
mm/dmapool.c
93
static DEVICE_ATTR_RO(pools);
tools/net/ynl/ynltool/page-pool.c
199
print_json_pool_list(struct netdev_page_pool_get_list *pools,
tools/net/ynl/ynltool/page-pool.c
205
ynl_dump_foreach(pools, pp) {
tools/net/ynl/ynltool/page-pool.c
250
print_plain_pool_list(struct netdev_page_pool_get_list *pools,
tools/net/ynl/ynltool/page-pool.c
254
ynl_dump_foreach(pools, pp) {
tools/net/ynl/ynltool/page-pool.c
301
struct netdev_page_pool_get_list *pools,
tools/net/ynl/ynltool/page-pool.c
304
ynl_dump_foreach(pools, pp) {
tools/net/ynl/ynltool/page-pool.c
331
struct netdev_page_pool_get_list *pools;
tools/net/ynl/ynltool/page-pool.c
377
pools = netdev_page_pool_get_dump(ys);
tools/net/ynl/ynltool/page-pool.c
378
if (!pools) {
tools/net/ynl/ynltool/page-pool.c
394
print_json_pool_list(pools, pp_stats, zombies_only);
tools/net/ynl/ynltool/page-pool.c
396
print_plain_pool_list(pools, pp_stats, zombies_only);
tools/net/ynl/ynltool/page-pool.c
407
aggregate_device_stats(&a, pools, pp_stats);
tools/net/ynl/ynltool/page-pool.c
420
netdev_page_pool_get_list_free(pools);