alloc_page
src_page = alloc_page(GFP_KERNEL);
dst_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
vdso_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
pte_page = alloc_page(GFP_KERNEL);
pmd_page = alloc_page(GFP_KERNEL);
pud_page = alloc_page(GFP_KERNEL);
cp = alloc_page(GFP_KERNEL);
dp = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_ATOMIC);
page = alloc_page(flags32);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
tpage = (void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
req_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
vmsa_page = alloc_page(GFP_KERNEL);
tdr_page = alloc_page(GFP_KERNEL);
tdcs_pages[i] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
page = alloc_page(GFP_KERNEL|__GFP_DMA32);
um_vdso = alloc_page(GFP_KERNEL);
page = alloc_page(__GFP_ZERO);
page = alloc_page(GFP_NOIO | gfp_mask);
page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
pg = alloc_page(GFP_KERNEL);
pq_scribble_page = alloc_page(GFP_KERNEL);
data[i] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
device->md_io.page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOIO);
struct page *page = alloc_page(GFP_NOIO);
req_page = alloc_page(GFP_KERNEL);
reply_page = alloc_page(GFP_KERNEL);
req_page = alloc_page(GFP_KERNEL);
reply_page = alloc_page(GFP_KERNEL);
struct page *indirect_page = alloc_page(GFP_KERNEL |
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
struct page *page = alloc_page(GFP_NOIO);
struct page *page = alloc_page(GFP_NOIO);
page = alloc_page(GFP_KERNEL);
req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN);
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
struct page *page = alloc_page(GFP_KERNEL);
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_KERNEL);
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
struct page *page = alloc_page(gfp_flags);
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
dest = alloc_page(GFP_KERNEL);
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
dest = alloc_page(GFP_KERNEL);
pg = alloc_page(GFP_KERNEL);
chan->pdest_page = alloc_page(GFP_KERNEL);
chan->qdest_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
sg_pages[i] = alloc_page(GFP_KERNEL);
pd->p = alloc_page(GFP_DMA32);
pd->dummy_pt = alloc_page(GFP_DMA32);
pd->dummy_page = alloc_page(GFP_DMA32);
pt->p = alloc_page(GFP_DMA32);
dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
page = alloc_page(GFP | __GFP_HIGHMEM);
mock->pages[i] = alloc_page(GFP_KERNEL);
spt->shadow_page.page = alloc_page(gfp_mask);
raw_page = alloc_page(__GFP_ZERO | GFP_KERNEL);
mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
fb->sysmem.flush_page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
struct page *page = alloc_page(GFP_KERNEL);
struct page *p = alloc_page(GFP_KERNEL);
struct page *p = alloc_page(gfp);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
pages[p] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
glob->dummy_read_page = alloc_page(__GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
allocated_page = alloc_page(GFP_KERNEL);
allocated_page = alloc_page(GFP_KERNEL);
reg_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
page = alloc_page(GFP_KERNEL);
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
struct page *page = alloc_page(gfp);
bv->bv_page = alloc_page(gfp_mask);
page = alloc_page(gfp_mask);
pl[i].page = alloc_page(GFP_KERNEL);
pl->page = alloc_page(gfp | __GFP_HIGHMEM);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOIO);
page = alloc_page(GFP_KERNEL);
rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOIO);
mblk->page = alloc_page(GFP_NOIO);
store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
sb_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
rdev->sb_page = alloc_page(GFP_KERNEL);
rdev->bb_page = alloc_page(GFP_KERNEL);
rp->pages[i] = alloc_page(gfp_flags);
page = alloc_page(GFP_NOIO);
conf->tmppage = alloc_page(GFP_KERNEL);
conf->tmppage = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
ctx->meta_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOIO | __GFP_ZERO);
page = alloc_page(GFP_KERNEL);
page2 = alloc_page(GFP_KERNEL);
io->header_page = alloc_page(gfp_mask);
page1 = alloc_page(GFP_KERNEL);
page2 = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
sh->ppl_page = alloc_page(gfp);
ndisks[i].extra_page = alloc_page(GFP_NOIO);
nsh->pages[i] = alloc_page(GFP_NOIO);
struct page *p = alloc_page(GFP_NOIO);
struct page *p = alloc_page(GFP_NOIO);
p = alloc_page(gfp);
if (!(page = alloc_page(gfp))) {
percpu->spare_page = alloc_page(GFP_KERNEL);
conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
mmu->trash_page = alloc_page(GFP_KERNEL);
dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
struct page *page = alloc_page(gfp);
page = alloc_page(GFP_ATOMIC);
page = alloc_page(gfp);
page = alloc_page(GFP_ATOMIC);
page = alloc_page(gfp);
bool alloc_page = false;
alloc_page = !recycle;
if (alloc_page) {
page = alloc_page(GFP_ATOMIC);
buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
ps_page->page = alloc_page(gfp);
buffer_info->page = alloc_page(gfp);
buffer_info->page = alloc_page(GFP_ATOMIC);
struct page *page = alloc_page(gfp);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
dmatest_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(mask);
page = alloc_page(GFP_ATOMIC | GFP_DMA);
page = alloc_page(GFP_ATOMIC);
tf->page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_ATOMIC);
xdp_page = alloc_page(GFP_ATOMIC);
p = alloc_page(gfp_mask);
new_page = alloc_page(GFP_ATOMIC);
rbi->page = alloc_page(GFP_ATOMIC);
struct page *new_page = alloc_page(GFP_KERNEL);
struct page *p = alloc_page(GFP_KERNEL);
struct page *alloc_page;
if (trans_pcie->alloc_page)
__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
if (trans_pcie->alloc_page) {
if (trans_pcie->alloc_page) {
page = trans_pcie->alloc_page;
trans_pcie->alloc_page = NULL;
if (!trans_pcie->alloc_page) {
trans_pcie->alloc_page = page;
ret = alloc_page(GFP_ATOMIC);
p->page = alloc_page(GFP_ATOMIC);
struct page *page = alloc_page(GFP_ATOMIC);
page = alloc_page(GFP_ATOMIC);
ctrl->discard_page = alloc_page(GFP_KERNEL);
pg = alloc_page(GFP_KERNEL);
snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_ATOMIC);
mmu->trash_page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
page = alloc_page(GFP_NOIO | __GFP_ZERO);
tmp_page = alloc_page(GFP_ATOMIC);
page = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
page = alloc_page(gfp);
gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
struct page *page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOFS);
page = alloc_page(GFP_NOFS);
page = alloc_page(GFP_NOFS);
page = alloc_page(GFP_KERNEL);
#define dump_page_alloc() alloc_page(GFP_KERNEL)
entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
enc_extent_page = alloc_page(GFP_USER);
page = alloc_page(gfp);
page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
page = alloc_page(GFP_HIGHUSER);
page = alloc_page(GFP_USER);
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
arg->layoutupdate_page = alloc_page(GFP_NOFS);
struct page *page = alloc_page(GFP_KERNEL);
ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
args.pages[args.npages] = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
newpage = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(gfp_flags);
pages[i] = alloc_page(gfp_flags);
page = alloc_page(gfp);
fill = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_NOFS);
return alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
page = alloc_page(GFP_NOIO);
p = alloc_page(GFP_KERNEL);
struct page *page = alloc_page(GFP_KERNEL);
area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
page = alloc_page(gfp_mask);
page = alloc_page(__GFP_HIGHMEM);
buf->page_array[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
ns->vvar_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
page = alloc_page(GFP_USER | __GFP_ZERO);
spd.pages[i] = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page[i] = alloc_page(GFP_KERNEL);
dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
return alloc_page(gfp_flags);
page = alloc_page(GFP_KERNEL);
pages[i] = alloc_page(GFP_KERNEL);
fdt_page = alloc_page(GFP_KERNEL);
page = alloc_page(gfp_mask | __GFP_HIGHMEM);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
unlock_op_page = alloc_page(GFP_NOIO);
break_op_page = alloc_page(GFP_NOIO);
cookie_op_page = alloc_page(GFP_NOIO);
get_info_op_page = alloc_page(GFP_NOIO);
reply_page = alloc_page(GFP_NOIO);
lock_op_page = alloc_page(GFP_NOIO);
con->bounce_page = alloc_page(GFP_NOIO);
con->bounce_page = alloc_page(GFP_NOIO);
con->bounce_page = alloc_page(GFP_NOIO);
con->bounce_page = alloc_page(GFP_NOIO);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_KERNEL);
page = alloc_page(gfp_mask);
pfrag->page = alloc_page(gfp);
page = alloc_page(gfp);
page = alloc_page(gfp);
rqstp->rq_enc_pages[i] = alloc_page(GFP_KERNEL);
arg->pages[i] = alloc_page(GFP_KERNEL);
in_token->pages[i] = alloc_page(GFP_KERNEL);
page = alloc_page(gfp_flags);
*ppage = alloc_page(GFP_NOWAIT);
buf->pages[i] = alloc_page(gfp);
*ppages = alloc_page(GFP_NOWAIT);
page = alloc_page(GFP_NOIO | __GFP_NOWARN);
page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
dummy_page = alloc_page(GFP_KERNEL);
page = alloc_page(strp->sk->sk_allocation);
page = alloc_page(xs->sk.sk_allocation);
page = alloc_page(GFP_ATOMIC);
selinux_state.status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);