mmu_btop
pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma));
mfn = mmu_btop(pte & PT_PADDR_LGPG);
mfn = mmu_btop(pte & PT_PADDR);
if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
for (pages = mmu_btop(size); pages--; virt += MMU_PAGESIZE) {
pages = mmu_btop(sfile.size);
ppn, ppn + mmu_btop(size) - 1, err));
pages = mmu_btop(pap->size);
str, ppn, ppn + mmu_btop(CB_STACK_SIZE) - 1));
sfile.high_ppn = sfile.low_ppn + mmu_btop(sfile.size) - 1;
str, mmu_btop(len),
#define SF_BUF_PPN(off) *(sfile.buf_map + mmu_btop(off))
#define SF_ORIG_PPN(off) sfile.low_ppn + mmu_btop(off)
#define CB_MAX_KPAGES mmu_btop(CPR_MAX_BLOCK)
cwinfo.ranges, mmu_btop(cwinfo.size));
cwinfo.ranges, mmu_btop(cwinfo.size));
pp = page_numtopp_nolock(mmu_btop(pa));
pp = page_numtopp_nolock(mmu_btop(pa));
pp = page_numtopp_nolock(mmu_btop(pa));
npages = mmu_btop(vpm_cache_size);
ulong_t pfn = mfn_to_pfn_mapping[mmu_btop(ma)];
pfn = mmu_btop(pa - mfn_base);
mmu_btop(ma), PROT_READ | PROT_WRITE, HAT_LOAD);
mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
pfn = xen_assign_pfn(mmu_btop(maddr));
pfn = mmu_btop(maddr);
if (start == mmu_ptob(mmu_btop(getcr3_pa())))
pfn = xen_assign_pfn(mmu_btop(base));
if (start == mmu_ptob(mmu_btop(getcr3_pa())))
pfn = xen_assign_pfn(mmu_btop(base));
pfn = xen_assign_pfn(mmu_btop(page_base));
mmu_btop((ulong_t)rp->regspec_addr &
hp->ah_pfn = mmu_btop(
hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr &
pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase)));
*vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase));
mmu_btop(pbase), mp->map_prot | hat_acc_flags,
hp->ah_pfn = mmu_btop(pbase);
pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) |
hp->ah_pfn = mmu_btop(pbase);
pfn = xen_assign_pfn(mmu_btop(paddr));
pfn = mmu_btop(paddr);
pfn = mmu_btop(paddr);
va_pfn = mmu_btop(va - ONE_GIG);
copy_pfn = mmu_btop(next_phys);
pa = pfn_to_pa(xen_assign_pfn(mmu_btop(pa))) | (pa & MMU_PAGEOFFSET);
MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa),
pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN;
pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN;
pages = mmu_btop(mlp_last_addr - address);
memscrub_phys_pages += mmu_btop(bytes);
base = pfn_to_pa(xen_assign_pfn(mmu_btop(addr))) |
hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
phys_addr = pfn_to_pa(xen_assign_pfn(mmu_btop(phys_addr))) |
pfn_t pfn = mmu_btop(phys_addr);
if (physmax + 1 > mmu_btop(TERABYTE / 4)) {
mmu_btop(size));
segkpsize = mmu_btop(ROUND_UP_LPAGE(size));
segkvmmsize = mmu_btop(ROUND_UP_LPAGE(size));
segziosize = mmu_btop(ROUND_UP_LPAGE(size));
#define PHYSMEM_MAX64 mmu_btop(64 * TERABYTE)
segkpsize = mmu_btop(SEGKPDEFSIZE);
segkpsize = mmu_btop((size_t)lvalue);
t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
for (i = 0; i < mmu_btop(pgsize); ++i) {
if (i == mmu_btop(pgsize)) {
pgindx += mmu_btop(pgsize);
IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
pfn += mmu_btop(pgsize);
pg_index = mmu_btop(va - mmu.kmap_addr);
pg_index = mmu_btop(vaddr - mmu.kmap_addr);
pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
pg_off = mmu_btop(vaddr - mmu.kmap_addr);
pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
pgcnt = mmu_btop(eaddr - vaddr);
pfn += mmu_btop(LEVEL_SIZE(l));
pgcnt -= mmu_btop(LEVEL_SIZE(l));
pteptr = x86pte_mapin(mmu_btop(pte_pa),
ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
pteptr = x86pte_mapin(mmu_btop(pte_pa),
mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
ulong_t mfn = mfn_list[mmu_btop(pa)];
pfn += mmu_btop(va - vaddr);
pte = kdi_ptom(mmu_ptob(mmu_btop(pa))) | PT_VALID;
mmu_btop(PTE_GET((p), PTE_IS_LGPG((p), (l)) ? PT_PADDR_LGPG : PT_PADDR))
window_size = mmu_btop(map_len) * mmu.pte_size;
pfn_t gdtpfn = mmu_btop(CPU->cpu_m.mcpu_gdtpa);
mmu_btop(xen_info->pt_base - ONE_GIG));
mmu_btop(getcr3_pa()));
pfn += mmu_btop(des_va - va);
return (mmu_btop(ma_to_pa(old_pte)));
lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
hi = mmu_btop(mattr->dma_attr_addr_hi);
pfnseg = mmu_btop(mattr->dma_attr_seg);
pfnalign = mmu_btop(align);
lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
pfnseg = mmu_btop(mattr->dma_attr_seg);
pfnalign = mmu_btop(align);
lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
pfnseg = mmu_btop(mattr->dma_attr_seg);
pfnalign = mmu_btop(align);
lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
pfnalign = mmu_btop(align);
mmu_btop(dma_attr->dma_attr_addr_lo),
mmu_btop(dma_attr->dma_attr_addr_hi + 1),
pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
#define PFN_16M (mmu_btop((uint64_t)0x1000000))
vgc->gdt_frames[0] = pa_to_ma(mmu_btop(cp->cpu_m.mcpu_gdtpa));
vgc->gdt_frames[0] = pfn_to_mfn(mmu_btop(cp->cpu_m.mcpu_gdtpa));
err = xen_kpm_page(mmu_btop(cp->cpu_m.mcpu_gdtpa), pt_bits);
npgs = mmu_btop(lsize);
size_t j = mmu_btop(off);
sz2 = P2ROUNDUP(mmu_btop(sz) * sizeof (mfn_t), MMU_PAGESIZE);
j = mmu_btop(off);
mfn_t mfn = pfn_to_mfn(mmu_btop(pa));
pfn_t pfn = mfn_to_pfn(mmu_btop(ma));
toplevel_pfn = mmu_btop(xpv_panic_cr3);
fault_pfn = mmu_btop(*(uint64_t *)addr);
ASSERT(color < mmu_btop(shm_alignment));
*pfnbp = mmu_btop(rng_addr);
*pfnlp = mmu_btop(rng_addr + rng_size);
pfn = mmu_btop(((uint64_t)(rp->regspec_bustype &
hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr & (~MMU_PAGEOFFSET));
pfn_t fault_pfn = mmu_btop(*(uint64_t *)addr);
fault_pfn = mmu_btop(*(uint64_t *)addr);
segziosize = mmu_btop(roundup(size, MMU_PAGESIZE));
roundup(mmu_btop(kpm_size * vac_colors) * sizeof (page_t),
if (mmu_btop(alignsize) > (npages >> 2))
mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
pfn = basepfn + mmu_btop(offset);
pbm_p->pbm_base_pfn = mmu_btop(base_addr);
pbm_p->pbm_last_pfn = mmu_btop(last_addr);
npages = mmu_btop(pmem->ml_size);
ppn = mmu_btop(pmem->ml_address);
npages = mmu_btop(pmem->ml_size);
ppn = mmu_btop(pmem->ml_address);
dp->cbd_spfn = mmu_btop(pmem->ml_address);
dp->cbd_epfn = mmu_btop(pmem->ml_address + pmem->ml_size) - 1;
page_t *pp = page_numtopp_nolock(mmu_btop(pa));
pfn = mmu_btop(pa);
if (page_numtopp_nolock(mmu_btop(cur->address)) == NULL) {
ppmap_pages = mmu_btop(shm_alignment);
nsets = mmu_btop(PPMAPSIZE);
pfn = mmu_btop(base);
nsets = mmu_btop(PPMAPSIZE);
end - start, mmu_btop(start),