pools
struct iommu_pool pools[IOMMU_NR_POOLS];
pool = &(tbl->pools[pool_nr]);
pool = &(tbl->pools[0]);
pool = &tbl->pools[pool_nr];
p = &tbl->pools[pool_nr];
p = &tbl->pools[i];
struct iommu_pool pools[IOMMU_NR_POOLS];
pool = &(iommu->pools[pool_nr]);
pool = &(iommu->pools[0]);
pool = &(iommu->pools[pool_nr]);
p = &tbl->pools[pool_nr];
spin_lock_init(&(iommu->pools[i].lock));
iommu->pools[i].start = start;
iommu->pools[i].hint = start;
iommu->pools[i].end = start - 1;
pool = &(iommu->pools[pool_nr]);
struct dm_md_mempools *pools;
pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
if (!pools)
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
if (bioset_init(&pools->bs, pool_size, front_pad, 0))
t->mempools = pools;
dm_free_md_mempools(pools);
struct list_head pools;
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
list_add(&pool->list, &dm_thin_pool_table.pools);
list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
bioset_exit(&pools->bs);
bioset_exit(&pools->io_bs);
kfree(pools);
void dm_free_md_mempools(struct dm_md_mempools *pools);
struct cxgbi_ppm_pool __percpu *pools;
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
if (!pools)
struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
return pools;
pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
pools_params->pools[curr_bp].priority_mask |= (1 << j);
if (!pools_params->pools[curr_bp].priority_mask)
pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
pools_params->pools[curr_bp++].backup_pool = 0;
cpu_to_le16(cfg->pools[i].dpbp_id);
cfg->pools[i].priority_mask;
cpu_to_le16(cfg->pools[i].buffer_size);
DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
} pools[DPNI_MAX_DPBP];
cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
cpu_to_le16(cfg->pools[i].buffer_size);
DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
} pools[DPSW_MAX_DPBP];
struct xsk_buff_pool **pools;
if (!xsk->pools) {
xsk->pools = kzalloc_objs(*xsk->pools, MLX5E_MAX_NUM_CHANNELS);
if (unlikely(!xsk->pools))
kfree(xsk->pools);
xsk->pools = NULL;
xsk->pools[ix] = pool;
xsk->pools[ix] = NULL;
if (!xsk || !xsk->pools)
return xsk->pools[ix];
arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
if (!pool_mgr->pools[i])
dr_arg_pool_destroy(pool_mgr->pools[i]);
struct dr_arg_pool **pools;
pools = mgr->pools;
dr_arg_pool_destroy(pools[i]);
struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
wx->mac_table[i].pools = 0;
if (wx->mac_table[i].pools != (1ULL << pool)) {
wx->mac_table[i].pools |= (1ULL << pool);
wx->mac_table[i].pools |= (1ULL << pool);
wx->mac_table[i].pools &= ~(1ULL << pool);
if (!wx->mac_table[i].pools) {
static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
wx->mac_table[i].pools,
wx->mac_table[i].pools,
wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].pools,
u64 pools;
struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node];
if (pools->small)
return pools; /* already initialized */
pools->large = dma_pool_create_node("nvme descriptor page", dev->dev,
if (!pools->large)
pools->small = dma_pool_create_node("nvme descriptor small", dev->dev,
if (!pools->small) {
dma_pool_destroy(pools->large);
pools->large = NULL;
return pools;
struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i];
dma_pool_destroy(pools->large);
dma_pool_destroy(pools->small);
struct nvme_descriptor_pools *pools;
pools = nvme_setup_descriptor_pools(dev, hctx->numa_node);
if (IS_ERR(pools))
return PTR_ERR(pools);
nvmeq->descriptor_pools = *pools;
void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
pools &= p->config->pools;
p->sdqcr |= pools;
pcfg->pools = qm_get_pools_sdqcr();
u32 pools;
struct list_head pools;
struct list_head pools;
list_for_each_entry(pool, &kdev->pools, list)
list_add(&pool->region_inst, ®ion->pools);
INIT_LIST_HEAD(®ion->pools);
list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
INIT_LIST_HEAD(&kdev->pools);
node = ®ion->pools;
list_for_each_entry(iter, ®ion->pools, region_inst) {
list_add_tail(&pool->list, &kdev->pools);
struct list_head pools;
list_for_each_entry_rcu(pool, &dmemcs->pools, css_node)
list_for_each_entry_safe(pool, next, &dmemcs->pools, css_node) {
INIT_LIST_HEAD(&dmemcs->pools);
list_for_each_entry_rcu(pool, &dmemcs->pools, css_node, spin_is_locked(&dmemcg_lock))
list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) {
list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
list_add_tail(&pool->region_node, ®ion->pools);
struct list_head pools;
list_for_each_entry_safe(pool, next, ®ion->pools, region_node)
list_for_each_entry_safe(pool, next, ®ion->pools, region_node) {
INIT_LIST_HEAD(&ret->pools);
struct list_head pools;
list_for_each_entry_rcu(pool, &mem->pools, node) {
list_for_each_entry_rcu(pool, &mem->pools, node)
INIT_LIST_HEAD_RCU(&mem->pools);
list_add_rcu(&pool->node, &mem->pools);
list_for_each_entry_rcu(pool, &mem->pools, node) {
.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
struct worker_pool __percpu *pools;
pools = bh_worker_pools;
pools = cpu_worker_pools;
pool = &(per_cpu_ptr(pools, cpu)[highpri]);
INIT_LIST_HEAD(&retval->pools);
list_add(&retval->pools, &dev->dma_pools);
list_del(&retval->pools);
list_del(&pool->pools);
struct list_head pools;
list_for_each_entry(pool, &dev->dma_pools, pools) {
static DEVICE_ATTR_RO(pools);
print_json_pool_list(struct netdev_page_pool_get_list *pools,
ynl_dump_foreach(pools, pp) {
print_plain_pool_list(struct netdev_page_pool_get_list *pools,
ynl_dump_foreach(pools, pp) {
struct netdev_page_pool_get_list *pools,
ynl_dump_foreach(pools, pp) {
struct netdev_page_pool_get_list *pools;
pools = netdev_page_pool_get_dump(ys);
if (!pools) {
print_json_pool_list(pools, pp_stats, zombies_only);
print_plain_pool_list(pools, pp_stats, zombies_only);
aggregate_device_stats(&a, pools, pp_stats);
netdev_page_pool_get_list_free(pools);