page_array
xa_init(&encl->page_array);
entry = xa_load(&encl->page_array, PFN_DOWN(addr));
entry = xa_load(&encl->page_array, PFN_DOWN(addr));
ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
(!xa_load(&encl->page_array, PFN_DOWN(addr))))
XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
xa_destroy(&encl->page_array);
entry = xa_load(&encl->page_array, PFN_DOWN(addr));
struct xarray page_array;
xa_erase(&encl->page_array, PFN_DOWN(entry->desc));
ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc),
xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));
xa_for_each(&vepc->page_array, index, entry) {
xa_for_each(&vepc->page_array, index, entry) {
xa_erase(&vepc->page_array, index);
xa_for_each(&vepc->page_array, index, entry) {
xa_erase(&vepc->page_array, index);
struct xarray page_array;
xa_destroy(&vepc->page_array);
xa_init(&vepc->page_array);
epc_page = xa_load(&vepc->page_array, index);
ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
xa_erase(&vepc->page_array, index);
struct page **page_array __free(kfree) = kzalloc_objs(page_array[0],
if (!page_array)
page_count, page_array);
release_pages(page_array, nr_populated);
buffer->pages = no_free_ptr(page_array);
struct page *page_array[MAX_ARRAY];
page_array[seg] = p;
page_array[seg] = ib_virt_dma_to_page(va);
rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
static int page_array_pin(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
static inline void page_array_idal_create_words(struct page_array *pa,
struct page_array *ch_pa;
static int page_array_alloc(struct page_array *pa, unsigned int len)
struct page_array *pa,
struct page_array *pa,
struct page_array *pa;
static void page_array_unpin(struct page_array *pa,
static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
__free_pages(page_array[i], 0);
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
allocated = alloc_pages_bulk(gfp, nr_pages, page_array);
__free_page(page_array[i]);
page_array[i] = NULL;
struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
eb->folios[i] = page_folio(page_array[i]);
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
header->page_array.npages, f_offset,
for (i = pg_index; i < header->page_array.npages; i++) {
header->page_array.npages - i,
for (i = pg_index; i < header->page_array.npages; i++) {
bio = do_add_page_to_bio(bio, header->page_array.npages - i,
iocb->bvec = kmalloc_objs(struct bio_vec, hdr->page_array.npages, flags);
struct page **pagevec = hdr->page_array.pagevec;
while (total && v < hdr->page_array.npages) {
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
hdr->args.pages = hdr->page_array.pagevec;
struct nfs_page_array *pg_array = &hdr->page_array;
if (pagecount <= ARRAY_SIZE(pg_array->page_array))
pg_array->pagevec = pg_array->page_array;
pages = hdr->page_array.pagevec;
struct page **page_array; /* array of mapped pages */
struct page **page_array;
unpin_user_pages(bufmap->page_array, bufmap->page_count);
kfree(bufmap->page_array);
bufmap->page_array =
if (!bufmap->page_array)
bufmap->page_count, FOLL_WRITE, bufmap->page_array);
unpin_user_page(bufmap->page_array[i]);
flush_dcache_page(bufmap->page_array[i]);
bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
struct page *page = to->page_array[i];
struct page *page = from->page_array[i];
unsigned long nr_pages, struct page **page_array);
struct page **page_array);
struct page **page_array);
struct page **page_array)
return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
struct page *page_array[NFS_PAGEVEC_SIZE];
struct nfs_page_array page_array;
struct page **page_array; /* array of current buffer pages */
struct page **page_array = *pages, **new_array = NULL;
struct page *p = compound_head(page_array[j]);
WARN_ON_ONCE(i > 0 && p != page_array[j]);
kvfree(page_array);
bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
struct folio *folio = page_folio(page_array[0]);
data->first_folio_page_idx = folio_page_idx(folio, page_array[0]);
if (page_folio(page_array[i]) == folio &&
page_array[i] == page_array[i-1] + 1) {
if (folio_page_idx(folio, page_array[i-1]) !=
folio = page_folio(page_array[i]);
folio_page_idx(folio, page_array[i]) != 0)
bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
buf->page_array = relay_alloc_page_array(n_pages);
if (!buf->page_array)
buf->page_array[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (unlikely(!buf->page_array[i]))
set_page_private(buf->page_array[i], (unsigned long)buf);
mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL);
__free_page(buf->page_array[j]);
relay_free_page_array(buf->page_array);
__free_page(buf->page_array[i]);
relay_free_page_array(buf->page_array);
struct page **page_array = pages;
___free_pages_bulk(page_array, nr_total - nr_pages);
struct page **page_array)
page_array);
nr_pages_per_node, page_array);
page_array += nr_allocated;
struct page **page_array)
page_array);
page_array += nr_allocated;
page_array);
page_array += nr_allocated;
struct page **page_array)
nr_pages, page_array);
page_array + nr_allocated);
unsigned long nr_pages, struct page **page_array)
nr_pages, page_array);
gfp, pol, nr_pages, page_array);
numa_node_id(), pol, nr_pages, page_array);
nr_pages, page_array);
struct page **page_array)
while (nr_populated < nr_pages && page_array[nr_populated])
if (page_array[nr_populated]) {
page_array[nr_populated++] = page;
page_array[nr_populated++] = page;