kmem_cache_alloc_node
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL | GFP_ATOMIC, cpu_to_node(cpu));
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO,
q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, node);
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
*pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
pn = kmem_cache_alloc_node(memcg_pn_cachep, GFP_KERNEL | __GFP_ZERO,
n = kmem_cache_alloc_node(kmem_cache_node,
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
return kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
kmem_cache_alloc_node(flow_stats_cache,
stats = kmem_cache_alloc_node(flow_stats_cache,