PHYS_PFN
if (PHYS_PFN(addr) != pfn)
unsigned long pfn = PHYS_PFN(offset);
ms = __pfn_to_section(PHYS_PFN(addr));
#define ARCH_PFN_OFFSET PHYS_PFN(PAGE_OFFSET_RAW)
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
return __phys_mem_access_prot(PHYS_PFN(offset), vm_end - vm_start, prot);
spfn = PHYS_PFN(start);
epfn = PHYS_PFN(end);
uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
base_pfn = PHYS_PFN(addr);
pte_t pte = pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block + i * PAGE_SIZE)), PAGE_KERNEL));
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
pfn_pte(PHYS_PFN(pa), prot));
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1);
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
const unsigned long nr_pages = PHYS_PFN(size);
const unsigned long start_pfn = PHYS_PFN(start);
return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
const unsigned long nr_pages = PHYS_PFN(size);
uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
uv_share_page(PHYS_PFN(__pa(addr)), numpages);
unsigned long pfn = PHYS_PFN(__pa(addr));
uv_share_page(PHYS_PFN(qpage_phys),
uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
f->pfn = PHYS_PFN(large_crste_to_phys(oldcrste, f->gfn));
if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
struct page *p = pfn_to_page(PHYS_PFN(phys));
#define RMP_ENTRY_INDEX(x) ((u64)(PHYS_PFN((x) & rmp_segment_mask)))
size = PHYS_PFN(mapped_size) << 4;
rmp_segment_size_max = PHYS_PFN(rmp_segment_size) << 4;
rmp_size = PHYS_PFN(mapped_size) << 4;
dump_rmpentry(PHYS_PFN(paddr));
start_pfn = max(start_pfn, PHYS_PFN(SZ_1M));
if (tmb->end_pfn > PHYS_PFN(tdmr->base))
shared_info_pfn = PHYS_PFN(pa);
#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
PHYS_PFN(XCHAL_KSEG_SIZE))
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx);
pfn = PHYS_PFN(physical_addr);
unit_pages < PHYS_PFN(memory_block_size_bytes()))
pgoff += PHYS_PFN(range_len(&ranges[i].range));
pfn = PHYS_PFN(phys);
pfn = PHYS_PFN(phys);
pfn = PHYS_PFN(phys);
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
e->page_frame_number = PHYS_PFN(mem_err->physical_addr);
pfn = PHYS_PFN(pa);
pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
ret = vmf_insert_pfn(vma, addr, PHYS_PFN(phys_addr));
return PHYS_PFN(offset + xpagemap->hpa_base);
PHYS_PFN(qp->sq_cmb_addr),
PHYS_PFN(qp->rq_cmb_addr),
PHYS_PFN(db_phys), 0, NULL);
batch_add_pfn_num(batch, PHYS_PFN(dmabuf->phys.paddr + start),
if (!batch_add_pfn(batch, PHYS_PFN(phys)))
*(out_pages++) = pfn_to_page(PHYS_PFN(phys));
int count = PHYS_PFN(size);
count = PHYS_PFN(size);
PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
for (i = 0; i < PHYS_PFN(size); i++) {
PHYS_PFN(sg_dma_address(sglist)));
PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
int count = PHYS_PFN(size);
PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
pfn = PHYS_PFN(virt_to_phys(b->batch_page));
unsigned long base_pfn = PHYS_PFN(base);
unsigned long base_pfn = PHYS_PFN(base);
.end_pfn = PHYS_PFN(end),
nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
altmap->free = PHYS_PFN(offset - reserve);
npfns = PHYS_PFN(size - SZ_8K);
npfns = PHYS_PFN(size - offset - end_trunc);
*pfn = PHYS_PFN(pmem->phys_addr + offset);
actual_nr = PHYS_PFN(
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
pfn_start = PHYS_PFN(phys);
pfn_end = pfn_start + PHYS_PFN(len);
unsigned long pfn = PHYS_PFN(res->start);
unsigned long pfn_j = PHYS_PFN(res_j->start);
if (page_is_ram(PHYS_PFN(pmc->base_addr)))
addr = PHYS_PFN(addr);
unsigned int count = PHYS_PFN(size);
count = PHYS_PFN(size);
PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle));
for (i = 0; i < PHYS_PFN(size); i++) {
PHYS_PFN(sg_dma_address(sglist)));
PHYS_PFN(mmu->dmap->mmu_info->aperture_end),
unsigned int count = PHYS_PFN(size);
PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
if (!pfn_valid(PHYS_PFN(paddr)))
start_pfn = PHYS_PFN(region->memphys);
pfn = PHYS_PFN(region->memphys);
pfn = PHYS_PFN(memregion->memphys) + addr_to_pgoff(vma, addr);
unit_pages = PHYS_PFN(memory_block_size_bytes());
unit_pages = PHYS_PFN(vm->bbm.bb_size);
PHYS_PFN(address + off));
return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
if (*pfnp & (PHYS_PFN(size)-1))
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
PHYS_PFN(size), DAX_RECOVERY_WRITE,
nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
*pfn = PHYS_PFN(fs->window_phys_addr + offset);
#define __phys_to_pfn(paddr) PHYS_PFN(paddr)
#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys))
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
unsigned long pfn = PHYS_PFN(offset);
unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
const unsigned long start_pfn = PHYS_PFN(phys);
unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
pfn = PHYS_PFN(chunk->phys[i]);
if (devmem_is_allowed(PHYS_PFN(res->start)) &&
devmem_is_allowed(PHYS_PFN(res->end))) {
cmrp->base_pfn = PHYS_PFN(mlp->base);
folio = damon_get_folio(PHYS_PFN(addr));
folio = damon_get_folio(PHYS_PFN(addr));
folio = damon_get_folio(PHYS_PFN(addr));
folio = damon_get_folio(PHYS_PFN(addr));
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
listnode = early_pfn_to_nid(PHYS_PFN(__pa(m)));
pfn = PHYS_PFN(paddr);
pfn = PHYS_PFN(phys);
start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
if (PHYS_PFN(phys) < min_low_pfn ||
PHYS_PFN(phys + object->size) > max_low_pfn)
PHYS_PFN(range->end - range->start);
return PHYS_PFN(memblock_phys_mem_size() -
if (memmap_pages == PHYS_PFN(memory_block_size_bytes()))
.base_pfn = PHYS_PFN(cur_start),
.end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
PHYS_PFN(range_len(range)), NULL);
pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start));
conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end));
error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
PHYS_PFN(range->end), pgmap, GFP_KERNEL));
error = pfnmap_track(PHYS_PFN(range->start), range_len(range),
error = add_pages(nid, PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), params);
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), params->altmap,
PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), pgmap);
pfnmap_untrack(PHYS_PFN(range->start), range_len(range));
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
unsigned long pfn = PHYS_PFN(range->start);
if (pfn >= PHYS_PFN(range->start) &&
pfn <= PHYS_PFN(range->end))
remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
PHYS_PFN(range_len(range)));
__remove_pages(PHYS_PFN(range->start),
start_pfn = PHYS_PFN(memblock_start_of_DRAM());
if (usable_startpfn < PHYS_PFN(SZ_4G)) {
unsigned long max_pfn = PHYS_PFN(max_addr);
unsigned long base_pfn = PHYS_PFN(base);
unsigned long hole_pfns = PHYS_PFN(hole);
ptpfn = PHYS_PFN(__pa(p));
PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr));
.pglen = PHYS_PFN(len_), \
pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start);
pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end);
pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
start_pfn = PHYS_PFN(phys);
end_pfn = PHYS_PFN(end_phys);
pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
*pfn = PHYS_PFN(pmem->phys_addr + offset);
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);