Symbol: folio_pfn
arch/arm/mm/flush.c
235
flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
arch/arm/mm/flush.c
267
pfn = folio_pfn(folio);
arch/arm/mm/flush.c
342
if (is_zero_pfn(folio_pfn(folio)))
arch/csky/abiv1/cacheflush.c
22
if (is_zero_pfn(folio_pfn(folio)))
arch/microblaze/include/asm/cacheflush.h
79
unsigned long addr = folio_pfn(folio) << PAGE_SHIFT;
arch/nios2/mm/cacheflush.c
183
if (is_zero_pfn(folio_pfn(folio)))
arch/parisc/kernel/cache.c
122
pfn = folio_pfn(folio);
arch/parisc/kernel/cache.c
508
unsigned long pfn = folio_pfn(folio);
arch/powerpc/mm/cacheflush.c
170
unsigned long pfn = folio_pfn(folio);
arch/s390/include/asm/page.h
261
#define folio_to_phys(page) pfn_to_phys(folio_pfn(folio))
arch/sh/mm/cache-sh4.c
121
unsigned long pfn = folio_pfn(folio);
arch/sh/mm/cache-sh7705.c
143
unsigned long pfn = folio_pfn(folio);
arch/sparc/kernel/smp_64.c
933
unsigned long pfn = folio_pfn(folio)
arch/sparc/mm/init_64.c
459
unsigned long pfn = folio_pfn(folio);
arch/xtensa/mm/cache.c
142
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
arch/xtensa/mm/cache.c
236
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
drivers/dma-buf/udmabuf.c
58
pfn = folio_pfn(ubuf->folios[pgoff]);
drivers/dma-buf/udmabuf.c
76
pfn = folio_pfn(ubuf->folios[pgoff]);
drivers/gpu/drm/drm_gem.c
710
(folio_pfn(folio) >= 0x00100000UL));
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
164
folio_pfn(folio) != next_pfn) {
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
176
next_pfn = folio_pfn(folio) + nr_pages;
fs/dax.c
1389
*entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio),
fs/hugetlbfs/inode.c
405
unsigned long pfn = folio_pfn(folio);
fs/ramfs/file-nommu.c
236
pfn = folio_pfn(fbatch.folios[0]);
fs/ramfs/file-nommu.c
240
if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) {
include/linux/mm.h
2285
return pfn_pte(folio_pfn(folio), pgprot);
include/linux/mm.h
2301
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
include/linux/mm.h
2317
return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
include/linux/rmap.h
878
.pfn = folio_pfn(_folio), \
include/trace/events/filemap.h
31
__entry->pfn = folio_pfn(folio);
include/trace/events/huge_memory.h
128
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/huge_memory.h
186
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/huge_memory.h
220
__entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
include/trace/events/huge_memory.h
73
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/pagemap.h
43
__entry->pfn = folio_pfn(folio);
include/trace/events/pagemap.h
74
__entry->pfn = folio_pfn(folio);
include/trace/events/vmscan.h
348
__entry->pfn = folio_pfn(folio);
kernel/liveupdate/kexec_handover.c
818
const unsigned long pfn = folio_pfn(folio);
kernel/liveupdate/kexec_handover.c
839
const unsigned long pfn = folio_pfn(folio);
lib/test_hmm.c
1710
offset = folio_pfn(tail) - folio_pfn(head);
mm/filemap.c
158
current->comm, folio_pfn(folio));
mm/gup.c
2243
const unsigned long start_pfn = folio_pfn(folio);
mm/huge_memory.c
1546
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
mm/huge_memory.c
1670
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
mm/huge_memory.c
235
WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
mm/hugetlb.c
3177
unsigned long head_pfn = folio_pfn(folio);
mm/hugetlb.c
3235
WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
mm/internal.h
357
folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte));
mm/ksm.c
1311
flush_cache_page(vma, pvmw.address, folio_pfn(folio));
mm/ksm.c
1843
nid = get_kpfn_nid(folio_pfn(folio));
mm/ksm.c
2049
kpfn = folio_pfn(kfolio);
mm/ksm.c
3277
VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
mm/ksm.c
3278
stable_node->kpfn = folio_pfn(newfolio);
mm/memfd_luo.c
197
pfolio->pfn = folio_pfn(folio);
mm/memory-failure.c
1563
folio_pfn(folio));
mm/memory.c
6013
int nr = pte_pfn(fault_pte) - folio_pfn(folio);
mm/memory_hotplug.c
1813
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
mm/memory_hotplug.c
1872
folio_pfn(folio), ret);
mm/migrate_device.c
1466
src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
mm/migrate_device.c
1480
dst_pfn = migrate_pfn(folio_pfn(dfolio));
mm/page_alloc.c
3037
unsigned long pfn = folio_pfn(folio);
mm/page_alloc.c
3061
unsigned long pfn = folio_pfn(folio);
mm/rmap.c
2104
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/rmap.c
2479
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/rmap.c
2510
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/vmscan.c
1126
unmap_poisoned_folio(folio, folio_pfn(folio), false);
virt/kvm/guest_memfd.c
54
return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));