vm_page
end = start + pages * sizeof(struct vm_page);
pfn = first_page + (va - start) / sizeof(struct vm_page);
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO);
struct vm_page m;
_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
#define page vm_page
#define VMAP_MAX_CHUNK_SIZE (65536U / sizeof(struct vm_page)) /* KMEM_ZMAX */
fma = malloc(chunk * sizeof(struct vm_page),
memset(fma, 0, chunk * sizeof(struct vm_page));
struct vm_page *page;
struct vm_page *dummy_read_page);
struct vm_page *dummy_read_page;
struct vm_page **pages;
struct vm_page *dummy_read_page);
struct vm_page *dummy_read_page;
struct vm_page *dummy_read_page);
struct vm_page *dummy_read_page);
extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
struct vm_page *page,
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
struct vm_page;
struct vm_page *page,
struct vm_page *page);
vm->pages = malloc(sizeof(struct vm_page *) * vm->npages,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
fma = malloc(sizeof(struct vm_page) * ma_cnt,
fma = malloc(sizeof(struct vm_page) * ma_cnt,
struct page *vm_page;
vm_page = vmalloc_to_page(addr);
if (!vm_page)
pg_dma[i] = dma_map_page(hw_to_dev(hw), vm_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
struct vm_page m, *ma;
struct vm_page maa[bp->bio_ma_n];
struct vm_page **ma;
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
vm_page, vma->vm_pgoff, unmapped_db,
found = qlnxr_search_mmap(ucontext, vm_page, len);
if ((vm_page < unmapped_db) ||
((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
vm_page, unmapped_db, ucontext->dpi_size);
TAILQ_HEAD(, vm_page) free_pages;
TAILQ_HEAD(,vm_page) vtballoon_pages;
static TAILQ_HEAD(,vm_page) ballooned_pages;
struct vm_page **pages;
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
sf_buf_alloc(struct vm_page *m, int flags)
struct vm_page** pages;
struct vm_page *pg;
moea64_bpvo_pool_size = ((ptoa((uintmax_t)physmem) * sizeof(struct vm_page)) /
struct vm_page *pg;
struct vm_page *pg;
size = round_page(pages * sizeof(struct vm_page));
size = round_page(size * sizeof(struct vm_page));
vm_page_array_size += size / sizeof(struct vm_page);
end = start + pages * sizeof(struct vm_page);
pfn = first_page + (va - start) / sizeof(struct vm_page);
sz = (round_page(sz) / (PAGE_SIZE + sizeof(struct vm_page)));
data_end += round_page(sz * sizeof(struct vm_page));
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
struct vm_page m;
_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
struct vm_page m;
struct vm_page **bio_ma; /* Or unmapped. */
void (*b_pgiodone)(void *, struct vm_page **,
struct vm_page *b_pages[];
int vfs_bio_getpages(struct vnode *vp, struct vm_page **ma, int count,
struct vm_page;
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
struct vm_page **ma, bus_size_t tlen, int ma_offs,
memdesc_vmpages(struct vm_page **ma, size_t len, u_int ma_offset)
struct vm_page;
struct vm_page **md_ma;
struct vm_page **td_ma; /* (k) uio pages held */
struct sf_buf *sf_buf_alloc(struct vm_page *, int);
sf_buf_alloc(struct vm_page *m, int pri)
typedef struct vm_page *vm_page_t;
struct vm_page;
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
struct vm_page;
TAILQ_HEAD(pglist, vm_page);
struct vm_page *p;
struct vm_page *p;
TAILQ_HEAD(, vm_page) alloctail;
struct vm_page;
typedef struct vm_page *vm_page_t;
sizeof(struct vm_page)),
TAILQ_HEAD(, vm_page) devp_pglist;
TAILQ_HEAD(, vm_page) sgp_pglist;
TAILQ_HEAD(pglist, vm_page);
static TAILQ_HEAD(, vm_page) blacklist_head;
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
SLIST_ENTRY(vm_page) ss; /* private slists */
TAILQ_HEAD(pglist, vm_page);
SLIST_HEAD(spglist, vm_page);
struct vm_page marker_page;
struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
struct vm_page vmd_clock[2]; /* markers for active queue scan */
fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
PCTRIE_DEFINE_SMR(VM_RADIX, vm_page, pindex, vm_radix_node_alloc,
vnode_pager_subpage_purge(struct vm_page *m, int base, int end)
struct vm_page *m;
_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
struct vm_page *iommu_pgalloc(struct vm_object *obj, vm_pindex_t idx,
_bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,