Symbol: pfn
arch/alpha/include/asm/pgtable.h
151
#define PHYS_TWIDDLE(pfn) \
arch/alpha/include/asm/pgtable.h
152
((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
arch/alpha/include/asm/pgtable.h
153
? ((pfn) ^= KSEG_PFN) : (pfn))
arch/alpha/include/asm/pgtable.h
155
#define PHYS_TWIDDLE(pfn) (pfn)
arch/alpha/kernel/core_marvel.c
688
unsigned long pfn;
arch/alpha/kernel/core_marvel.c
746
pfn = ptes[baddr >> PAGE_SHIFT];
arch/alpha/kernel/core_marvel.c
747
if (!(pfn & 1)) {
arch/alpha/kernel/core_marvel.c
752
pfn >>= 1; /* make it a true pfn */
arch/alpha/kernel/core_marvel.c
755
pfn << PAGE_SHIFT,
arch/alpha/kernel/core_titan.c
464
unsigned long pfn;
arch/alpha/kernel/core_titan.c
522
pfn = ptes[baddr >> PAGE_SHIFT];
arch/alpha/kernel/core_titan.c
523
if (!(pfn & 1)) {
arch/alpha/kernel/core_titan.c
528
pfn >>= 1; /* make it a true pfn */
arch/alpha/kernel/core_titan.c
531
pfn << PAGE_SHIFT,
arch/alpha/kernel/setup.c
370
int page_is_ram(unsigned long pfn)
arch/alpha/kernel/setup.c
380
if (pfn >= cluster->start_pfn &&
arch/alpha/kernel/setup.c
381
pfn < cluster->start_pfn + cluster->numpages) {
arch/alpha/mm/init.c
187
unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
arch/alpha/mm/init.c
200
pfn_pte(pfn, PAGE_KERNEL));
arch/alpha/mm/init.c
201
pfn++;
arch/arc/include/asm/cacheflush.h
50
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
arch/arc/include/asm/hugepage.h
45
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
arch/arc/include/asm/page.h
99
extern int pfn_valid(unsigned long pfn);
arch/arc/include/asm/pgtable-levels.h
144
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/arc/include/asm/pgtable-levels.h
178
#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
arch/arc/mm/cache.c
825
unsigned long phy, pfn;
arch/arc/mm/cache.c
828
pfn = vmalloc_to_pfn((void *)kstart);
arch/arc/mm/cache.c
829
phy = (pfn << PAGE_SHIFT) + off;
arch/arc/mm/init.c
172
int pfn_valid(unsigned long pfn)
arch/arc/mm/init.c
174
return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
arch/arc/mm/init.c
175
(pfn >= min_low_pfn && pfn <= max_low_pfn);
arch/arm/include/asm/cacheflush.h
235
unsigned long user_addr, unsigned long pfn, unsigned int nr)
arch/arm/include/asm/cacheflush.h
251
#define flush_cache_pages(vma, addr, pfn, nr) \
arch/arm/include/asm/cacheflush.h
252
vivt_flush_cache_pages(vma, addr, pfn, nr)
arch/arm/include/asm/cacheflush.h
257
unsigned long pfn, unsigned int nr);
arch/arm/include/asm/cacheflush.h
261
#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
arch/arm/include/asm/io.h
415
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
arch/arm/include/asm/kfence.h
14
unsigned long pfn = PFN_DOWN(__pa(addr));
arch/arm/include/asm/kfence.h
21
set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
arch/arm/include/asm/mach/map.h
16
unsigned long pfn;
arch/arm/include/asm/mach/pci.h
68
extern void pci_map_io_early(unsigned long pfn);
arch/arm/include/asm/mach/pci.h
70
static inline void pci_map_io_early(unsigned long pfn) {}
arch/arm/include/asm/memory.h
330
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
arch/arm/include/asm/pgtable-3level.h
211
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
arch/arm/include/asm/pgtable.h
122
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/arm/include/asm/pgtable.h
159
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
arch/arm/kernel/bios32.c
586
void __init pci_map_io_early(unsigned long pfn)
arch/arm/kernel/bios32.c
594
pci_io_desc.pfn = pfn;
arch/arm/kernel/crash_dump.c
19
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/arm/kernel/crash_dump.c
27
vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
arch/arm/kernel/efi.c
53
.pfn = __phys_to_pfn(md->phys_addr),
arch/arm/kernel/hibernate.c
26
int pfn_is_nosave(unsigned long pfn)
arch/arm/kernel/hibernate.c
31
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
arch/arm/kernel/tcm.c
57
.pfn = __phys_to_pfn(DTCM_OFFSET),
arch/arm/kernel/tcm.c
66
.pfn = __phys_to_pfn(ITCM_OFFSET),
arch/arm/mach-bcm/board_bcmbca.c
11
.pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
arch/arm/mach-clps711x/board-dt.c
31
.pfn = __phys_to_pfn(CLPS711X_PHYS_BASE),
arch/arm/mach-davinci/da850.c
255
.pfn = __phys_to_pfn(IO_PHYS),
arch/arm/mach-davinci/da850.c
261
.pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE),
arch/arm/mach-dove/common.c
49
.pfn = __phys_to_pfn(DOVE_SB_REGS_PHYS_BASE),
arch/arm/mach-dove/common.c
54
.pfn = __phys_to_pfn(DOVE_NB_REGS_PHYS_BASE),
arch/arm/mach-exynos/exynos.c
83
iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0]));
arch/arm/mach-footbridge/common.c
221
.pfn = __phys_to_pfn(DC21285_ARMCSR_BASE),
arch/arm/mach-footbridge/common.c
227
.pfn = __phys_to_pfn(DC21285_PCI_MEM),
arch/arm/mach-footbridge/common.c
232
.pfn = __phys_to_pfn(DC21285_PCI_TYPE_0_CONFIG),
arch/arm/mach-footbridge/common.c
237
.pfn = __phys_to_pfn(DC21285_PCI_TYPE_1_CONFIG),
arch/arm/mach-footbridge/common.c
242
.pfn = __phys_to_pfn(DC21285_PCI_IACK),
arch/arm/mach-gemini/board-dt.c
19
.pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
arch/arm/mach-hisi/hisilicon.c
31
.pfn = __phys_to_pfn(HI3620_SYSCTRL_PHYS_BASE),
arch/arm/mach-imx/hardware.h
104
.pfn = __phys_to_pfn(soc ## _ ## name ## _BASE_ADDR), \
arch/arm/mach-imx/platsmp.c
37
scu_io_desc.pfn = __phys_to_pfn(base);
arch/arm/mach-lpc32xx/common.c
80
.pfn = __phys_to_pfn(LPC32XX_AHB0_START),
arch/arm/mach-lpc32xx/common.c
86
.pfn = __phys_to_pfn(LPC32XX_AHB1_START),
arch/arm/mach-lpc32xx/common.c
92
.pfn = __phys_to_pfn(LPC32XX_FABAPB_START),
arch/arm/mach-lpc32xx/common.c
98
.pfn = __phys_to_pfn(LPC32XX_IRAM_BASE),
arch/arm/mach-mmp/common.c
27
.pfn = __phys_to_pfn(APB_PHYS_BASE),
arch/arm/mach-mmp/common.c
32
.pfn = __phys_to_pfn(AXI_PHYS_BASE),
arch/arm/mach-mmp/common.c
41
.pfn = __phys_to_pfn(PGU_PHYS_BASE),
arch/arm/mach-mv78xx0/common.c
132
.pfn = 0,
arch/arm/mach-mv78xx0/common.c
137
.pfn = __phys_to_pfn(MV78XX0_REGS_PHYS_BASE),
arch/arm/mach-mv78xx0/common.c
156
mv78xx0_io_desc[0].pfn = __phys_to_pfn(phys);
arch/arm/mach-nomadik/cpu-8815.c
63
.pfn = __phys_to_pfn(NOMADIK_UART1_BASE),
arch/arm/mach-omap1/board-ams-delta.c
136
.pfn = __phys_to_pfn(LATCH1_PHYS),
arch/arm/mach-omap1/board-ams-delta.c
143
.pfn = __phys_to_pfn(LATCH2_PHYS),
arch/arm/mach-omap1/board-ams-delta.c
150
.pfn = __phys_to_pfn(MODEM_PHYS),
arch/arm/mach-omap1/io.c
28
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
arch/arm/mach-omap1/io.c
33
.pfn = __phys_to_pfn(OMAP1_DSP_START),
arch/arm/mach-omap1/io.c
38
.pfn = __phys_to_pfn(OMAP1_DSPREG_START),
arch/arm/mach-omap2/io.c
108
.pfn = __phys_to_pfn(L4_WK_243X_PHYS),
arch/arm/mach-omap2/io.c
114
.pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS),
arch/arm/mach-omap2/io.c
120
.pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS),
arch/arm/mach-omap2/io.c
126
.pfn = __phys_to_pfn(OMAP243X_SMS_PHYS),
arch/arm/mach-omap2/io.c
138
.pfn = __phys_to_pfn(L3_34XX_PHYS),
arch/arm/mach-omap2/io.c
144
.pfn = __phys_to_pfn(L4_34XX_PHYS),
arch/arm/mach-omap2/io.c
150
.pfn = __phys_to_pfn(OMAP34XX_GPMC_PHYS),
arch/arm/mach-omap2/io.c
156
.pfn = __phys_to_pfn(OMAP343X_SMS_PHYS),
arch/arm/mach-omap2/io.c
162
.pfn = __phys_to_pfn(OMAP343X_SDRC_PHYS),
arch/arm/mach-omap2/io.c
168
.pfn = __phys_to_pfn(L4_PER_34XX_PHYS),
arch/arm/mach-omap2/io.c
174
.pfn = __phys_to_pfn(L4_EMU_34XX_PHYS),
arch/arm/mach-omap2/io.c
185
.pfn = __phys_to_pfn(L4_34XX_PHYS),
arch/arm/mach-omap2/io.c
196
.pfn = __phys_to_pfn(L4_34XX_PHYS),
arch/arm/mach-omap2/io.c
202
.pfn = __phys_to_pfn(L4_WK_AM33XX_PHYS),
arch/arm/mach-omap2/io.c
213
.pfn = __phys_to_pfn(L3_44XX_PHYS),
arch/arm/mach-omap2/io.c
219
.pfn = __phys_to_pfn(L4_44XX_PHYS),
arch/arm/mach-omap2/io.c
225
.pfn = __phys_to_pfn(L4_PER_44XX_PHYS),
arch/arm/mach-omap2/io.c
236
.pfn = __phys_to_pfn(L3_54XX_PHYS),
arch/arm/mach-omap2/io.c
242
.pfn = __phys_to_pfn(L4_54XX_PHYS),
arch/arm/mach-omap2/io.c
248
.pfn = __phys_to_pfn(L4_WK_54XX_PHYS),
arch/arm/mach-omap2/io.c
254
.pfn = __phys_to_pfn(L4_PER_54XX_PHYS),
arch/arm/mach-omap2/io.c
265
.pfn = __phys_to_pfn(L4_CFG_MPU_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
271
.pfn = __phys_to_pfn(L3_MAIN_SN_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
277
.pfn = __phys_to_pfn(L4_PER1_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
283
.pfn = __phys_to_pfn(L4_PER2_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
289
.pfn = __phys_to_pfn(L4_PER3_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
295
.pfn = __phys_to_pfn(L4_CFG_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
301
.pfn = __phys_to_pfn(L4_WKUP_DRA7XX_PHYS),
arch/arm/mach-omap2/io.c
68
.pfn = __phys_to_pfn(L3_24XX_PHYS),
arch/arm/mach-omap2/io.c
74
.pfn = __phys_to_pfn(L4_24XX_PHYS),
arch/arm/mach-omap2/io.c
84
.pfn = __phys_to_pfn(DSP_MEM_2420_PHYS),
arch/arm/mach-omap2/io.c
90
.pfn = __phys_to_pfn(DSP_IPI_2420_PHYS),
arch/arm/mach-omap2/io.c
96
.pfn = __phys_to_pfn(DSP_MMU_2420_PHYS),
arch/arm/mach-omap2/omap4-common.c
160
dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
arch/arm/mach-orion5x/common.c
42
.pfn = __phys_to_pfn(ORION5X_REGS_PHYS_BASE),
arch/arm/mach-orion5x/common.c
47
.pfn = __phys_to_pfn(ORION5X_PCIE_WA_PHYS_BASE),
arch/arm/mach-orion5x/ts78xx-setup.c
49
.pfn = __phys_to_pfn(TS78XX_FPGA_REGS_PHYS_BASE),
arch/arm/mach-pxa/generic.c
103
.pfn = __phys_to_pfn(PERIPH_PHYS),
arch/arm/mach-pxa/pxa25x.c
163
.pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE),
arch/arm/mach-pxa/pxa25x.c
168
.pfn = __phys_to_pfn(0x00000000),
arch/arm/mach-pxa/pxa27x.c
250
.pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE),
arch/arm/mach-pxa/pxa27x.c
255
.pfn = __phys_to_pfn(0x00000000),
arch/arm/mach-pxa/pxa3xx.c
380
.pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
arch/arm/mach-pxa/pxa3xx.c
385
.pfn = __phys_to_pfn(NAND_PHYS),
arch/arm/mach-rpc/riscpc.c
70
.pfn = __phys_to_pfn(SCREEN_START),
arch/arm/mach-rpc/riscpc.c
75
.pfn = __phys_to_pfn(IO_START),
arch/arm/mach-rpc/riscpc.c
80
.pfn = __phys_to_pfn(EASI_START),
arch/arm/mach-s3c/mach-s3c64xx-dt.c
24
.pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
arch/arm/mach-s3c/s3c64xx.c
101
.pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
arch/arm/mach-s3c/s3c64xx.c
106
.pfn = __phys_to_pfn(S3C64XX_PA_SROM),
arch/arm/mach-s3c/s3c64xx.c
111
.pfn = __phys_to_pfn(S3C_PA_UART),
arch/arm/mach-s3c/s3c64xx.c
116
.pfn = __phys_to_pfn(S3C64XX_PA_VIC0),
arch/arm/mach-s3c/s3c64xx.c
121
.pfn = __phys_to_pfn(S3C64XX_PA_VIC1),
arch/arm/mach-s3c/s3c64xx.c
126
.pfn = __phys_to_pfn(S3C_PA_TIMER),
arch/arm/mach-s3c/s3c64xx.c
131
.pfn = __phys_to_pfn(S3C64XX_PA_GPIO),
arch/arm/mach-s3c/s3c64xx.c
136
.pfn = __phys_to_pfn(S3C64XX_PA_MODEM),
arch/arm/mach-s3c/s3c64xx.c
141
.pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG),
arch/arm/mach-s3c/s3c64xx.c
146
.pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
arch/arm/mach-s5pv210/s5pv210.c
33
iodesc.pfn = __phys_to_pfn(be32_to_cpu(reg[0]));
arch/arm/mach-sa1100/assabet.c
669
.pfn = __phys_to_pfn(0x12000000),
arch/arm/mach-sa1100/assabet.c
674
.pfn = __phys_to_pfn(0x4b800000),
arch/arm/mach-sa1100/collie.c
398
.pfn = __phys_to_pfn(0x00000000),
arch/arm/mach-sa1100/collie.c
403
.pfn = __phys_to_pfn(0x08000000),
arch/arm/mach-sa1100/generic.c
364
.pfn = __phys_to_pfn(0x80000000),
arch/arm/mach-sa1100/generic.c
369
.pfn = __phys_to_pfn(0x90000000),
arch/arm/mach-sa1100/generic.c
374
.pfn = __phys_to_pfn(0xa0000000),
arch/arm/mach-sa1100/generic.c
379
.pfn = __phys_to_pfn(0xb0000000),
arch/arm/mach-sa1100/h3xxx.c
248
.pfn = __phys_to_pfn(SA1100_CS2_PHYS),
arch/arm/mach-sa1100/h3xxx.c
253
.pfn = __phys_to_pfn(SA1100_CS4_PHYS),
arch/arm/mach-sa1100/h3xxx.c
258
.pfn = __phys_to_pfn(H3600_EGPIO_PHYS),
arch/arm/mach-sa1100/jornada720.c
288
.pfn = __phys_to_pfn(EPSONREGSTART),
arch/arm/mach-sa1100/jornada720.c
293
.pfn = __phys_to_pfn(EPSONFBSTART),
arch/arm/mach-spear/spear1310.c
43
.pfn = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
arch/arm/mach-spear/spear13xx.c
61
.pfn = __phys_to_pfn(PERIP_GRP2_BASE),
arch/arm/mach-spear/spear13xx.c
66
.pfn = __phys_to_pfn(PERIP_GRP1_BASE),
arch/arm/mach-spear/spear13xx.c
71
.pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE),
arch/arm/mach-spear/spear13xx.c
76
.pfn = __phys_to_pfn(L2CC_BASE),
arch/arm/mach-spear/spear320.c
251
.pfn = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
arch/arm/mach-spear/spear3xx.c
54
.pfn = __phys_to_pfn(SPEAR_ICM1_2_BASE),
arch/arm/mach-spear/spear3xx.c
59
.pfn = __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE),
arch/arm/mach-spear/spear6xx.c
346
.pfn = __phys_to_pfn(SPEAR_ICM3_ML1_2_BASE),
arch/arm/mach-spear/spear6xx.c
351
.pfn = __phys_to_pfn(SPEAR_ICM1_2_BASE),
arch/arm/mach-spear/spear6xx.c
356
.pfn = __phys_to_pfn(SPEAR_ICM3_SMI_CTRL_BASE),
arch/arm/mach-tegra/io.c
27
.pfn = __phys_to_pfn(IO_PPSB_PHYS),
arch/arm/mach-tegra/io.c
33
.pfn = __phys_to_pfn(IO_APB_PHYS),
arch/arm/mach-tegra/io.c
39
.pfn = __phys_to_pfn(IO_CPU_PHYS),
arch/arm/mach-tegra/io.c
45
.pfn = __phys_to_pfn(IO_IRAM_PHYS),
arch/arm/mach-versatile/integrator_ap.c
47
.pfn = __phys_to_pfn(INTEGRATOR_IC_BASE),
arch/arm/mach-versatile/integrator_ap.c
52
.pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE),
arch/arm/mach-versatile/integrator_cp.c
40
.pfn = __phys_to_pfn(INTEGRATOR_IC_BASE),
arch/arm/mach-versatile/integrator_cp.c
45
.pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE),
arch/arm/mach-versatile/integrator_cp.c
50
.pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
arch/arm/mach-versatile/versatile.c
94
.pfn = __phys_to_pfn(VERSATILE_SCTL_BASE),
arch/arm/mach-vt8500/vt8500.c
43
.pfn = __phys_to_pfn(0xd8000000),
arch/arm/mach-zynq/common.c
160
zynq_cortex_a9_scu_map.pfn = __phys_to_pfn(base);
arch/arm/mm/dma-mapping.c
284
map.pfn = __phys_to_pfn(start);
arch/arm/mm/dma-mapping.c
632
unsigned long pfn = __phys_to_pfn(phys);
arch/arm/mm/dma-mapping.c
645
phys = __pfn_to_phys(pfn);
arch/arm/mm/dma-mapping.c
651
vaddr = kmap_atomic_pfn(pfn);
arch/arm/mm/dma-mapping.c
669
pfn++;
arch/arm/mm/fault-armv.c
114
ret = do_adjust_pte(vma, address, pfn, pte);
arch/arm/mm/fault-armv.c
125
unsigned long addr, pte_t *ptep, unsigned long pfn)
arch/arm/mm/fault-armv.c
167
aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
arch/arm/mm/fault-armv.c
171
do_adjust_pte(vma, addr, pfn, ptep);
arch/arm/mm/fault-armv.c
190
unsigned long pfn = pte_pfn(*ptep);
arch/arm/mm/fault-armv.c
194
if (!pfn_valid(pfn))
arch/arm/mm/fault-armv.c
201
if (is_zero_pfn(pfn))
arch/arm/mm/fault-armv.c
204
folio = page_folio(pfn_to_page(pfn));
arch/arm/mm/fault-armv.c
210
make_coherent(mapping, vma, addr, ptep, pfn);
arch/arm/mm/fault-armv.c
37
unsigned long pfn, pte_t *ptep)
arch/arm/mm/fault-armv.c
52
flush_cache_page(vma, address, pfn);
arch/arm/mm/fault-armv.c
53
outer_flush_range((pfn << PAGE_SHIFT),
arch/arm/mm/fault-armv.c
54
(pfn << PAGE_SHIFT) + PAGE_SIZE);
arch/arm/mm/fault-armv.c
65
unsigned long pfn, bool need_lock)
arch/arm/mm/flush.c
101
vivt_flush_cache_pages(vma, user_addr, pfn, nr);
arch/arm/mm/flush.c
106
flush_pfn_alias(pfn, user_addr);
arch/arm/mm/flush.c
115
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
arch/arm/mm/flush.c
116
#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
arch/arm/mm/flush.c
255
unsigned long start, offset, pfn;
arch/arm/mm/flush.c
267
pfn = folio_pfn(folio);
arch/arm/mm/flush.c
271
pfn -= offset;
arch/arm/mm/flush.c
279
flush_cache_pages(vma, start, pfn, nr);
arch/arm/mm/flush.c
287
unsigned long pfn;
arch/arm/mm/flush.c
294
pfn = pte_pfn(pteval);
arch/arm/mm/flush.c
295
if (!pfn_valid(pfn))
arch/arm/mm/flush.c
298
folio = page_folio(pfn_to_page(pfn));
arch/arm/mm/flush.c
38
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
arch/arm/mm/flush.c
384
unsigned long pfn;
arch/arm/mm/flush.c
393
pfn = page_to_pfn(page);
arch/arm/mm/flush.c
395
flush_cache_page(vma, vmaddr, pfn);
arch/arm/mm/flush.c
401
flush_pfn_alias(pfn, vmaddr);
arch/arm/mm/flush.c
43
set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
arch/arm/mm/flush.c
52
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
arch/arm/mm/flush.c
58
set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
arch/arm/mm/flush.c
98
void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
arch/arm/mm/init.c
122
int pfn_valid(unsigned long pfn)
arch/arm/mm/init.c
124
phys_addr_t addr = __pfn_to_phys(pfn);
arch/arm/mm/init.c
127
if (__phys_to_pfn(addr) != pfn)
arch/arm/mm/ioremap.c
213
remap_area_sections(unsigned long virt, unsigned long pfn,
arch/arm/mm/ioremap.c
226
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
arch/arm/mm/ioremap.c
227
pfn += SZ_1M >> PAGE_SHIFT;
arch/arm/mm/ioremap.c
228
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
arch/arm/mm/ioremap.c
229
pfn += SZ_1M >> PAGE_SHIFT;
arch/arm/mm/ioremap.c
240
remap_area_supersections(unsigned long virt, unsigned long pfn,
arch/arm/mm/ioremap.c
254
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
arch/arm/mm/ioremap.c
256
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
arch/arm/mm/ioremap.c
267
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
arch/arm/mm/ioremap.c
274
static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
arch/arm/mm/ioremap.c
281
phys_addr_t paddr = __pfn_to_phys(pfn);
arch/arm/mm/ioremap.c
287
if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
arch/arm/mm/ioremap.c
303
if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
arch/arm/mm/ioremap.c
318
if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
arch/arm/mm/ioremap.c
331
cpu_is_xsc3()) && pfn >= 0x100000 &&
arch/arm/mm/ioremap.c
334
err = remap_area_supersections(addr, pfn, size, type);
arch/arm/mm/ioremap.c
337
err = remap_area_sections(addr, pfn, size, type);
arch/arm/mm/ioremap.c
357
unsigned long pfn = __phys_to_pfn(phys_addr);
arch/arm/mm/ioremap.c
366
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
arch/arm/mm/ioremap.c
380
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
arch/arm/mm/ioremap.c
383
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
arch/arm/mm/mmap.c
164
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
arch/arm/mm/mmap.c
166
return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
arch/arm/mm/mmu.c
1026
vm->phys_addr = __pfn_to_phys(md->pfn);
arch/arm/mm/mmu.c
1135
debug_ll_addr(&map.pfn, &map.virtual);
arch/arm/mm/mmu.c
1136
if (!map.pfn || !map.virtual)
arch/arm/mm/mmu.c
1138
map.pfn = __phys_to_pfn(map.pfn);
arch/arm/mm/mmu.c
1380
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
arch/arm/mm/mmu.c
1391
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
arch/arm/mm/mmu.c
1398
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
arch/arm/mm/mmu.c
1410
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
arch/arm/mm/mmu.c
1428
map.pfn += 1;
arch/arm/mm/mmu.c
1519
map.pfn = __phys_to_pfn(start);
arch/arm/mm/mmu.c
1525
map.pfn = __phys_to_pfn(kernel_sec_end);
arch/arm/mm/mmu.c
1545
map.pfn = __phys_to_pfn(start);
arch/arm/mm/mmu.c
1591
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
arch/arm/mm/mmu.c
1597
map.pfn = __phys_to_pfn(kernel_x_start);
arch/arm/mm/mmu.c
1607
map.pfn = __phys_to_pfn(kernel_nx_start);
arch/arm/mm/mmu.c
1736
map.pfn = pte_pfn(*pte);
arch/arm/mm/mmu.c
706
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/arm/mm/mmu.c
709
if (!pfn_valid(pfn))
arch/arm/mm/mmu.c
755
unsigned long end, unsigned long pfn,
arch/arm/mm/mmu.c
762
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
arch/arm/mm/mmu.c
764
pfn++;
arch/arm/mm/mmu.c
868
phys = __pfn_to_phys(md->pfn);
arch/arm/mm/mmu.c
873
(long long)__pfn_to_phys((u64)md->pfn), addr);
arch/arm/mm/mmu.c
885
(long long)__pfn_to_phys((u64)md->pfn), addr);
arch/arm/mm/mmu.c
889
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
arch/arm/mm/mmu.c
891
(long long)__pfn_to_phys((u64)md->pfn), addr);
arch/arm/mm/mmu.c
899
phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
arch/arm/mm/mmu.c
935
if (md->pfn >= 0x100000) {
arch/arm/mm/mmu.c
942
phys = __pfn_to_phys(md->pfn);
arch/arm/mm/mmu.c
947
(long long)__pfn_to_phys(md->pfn), addr);
arch/arm/mm/mmu.c
974
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
arch/arm/mm/mmu.c
982
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
arch/arm/mm/nommu.c
186
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
arch/arm/mm/nommu.c
189
if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
arch/arm/mm/nommu.c
191
return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
arch/arm/xen/enlighten.c
500
rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
arch/arm/xen/p2m.c
150
bool __set_phys_to_machine_multi(unsigned long pfn,
arch/arm/xen/p2m.c
163
if (p2m_entry->pfn <= pfn &&
arch/arm/xen/p2m.c
164
p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
arch/arm/xen/p2m.c
170
if (pfn < p2m_entry->pfn)
arch/arm/xen/p2m.c
183
p2m_entry->pfn = pfn;
arch/arm/xen/p2m.c
199
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
arch/arm/xen/p2m.c
201
return __set_phys_to_machine_multi(pfn, mfn, 1);
arch/arm/xen/p2m.c
23
unsigned long pfn;
arch/arm/xen/p2m.c
44
if (new->pfn == entry->pfn)
arch/arm/xen/p2m.c
47
if (new->pfn < entry->pfn)
arch/arm/xen/p2m.c
59
__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
arch/arm/xen/p2m.c
64
unsigned long __pfn_to_mfn(unsigned long pfn)
arch/arm/xen/p2m.c
74
if (entry->pfn <= pfn &&
arch/arm/xen/p2m.c
75
entry->pfn + entry->nr_pages > pfn) {
arch/arm/xen/p2m.c
76
unsigned long mfn = entry->mfn + (pfn - entry->pfn);
arch/arm/xen/p2m.c
80
if (pfn < entry->pfn)
arch/arm64/include/asm/io.h
323
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
arch/arm64/include/asm/kexec.h
84
extern bool crash_is_nosave(unsigned long pfn);
arch/arm64/include/asm/kexec.h
91
static inline bool crash_is_nosave(unsigned long pfn) {return false; }
arch/arm64/include/asm/kvm_pkvm.h
200
u64 pfn;
arch/arm64/include/asm/memory.h
393
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/arm64/include/asm/page.h
43
int pfn_is_map_memory(unsigned long pfn);
arch/arm64/include/asm/pgtable.h
137
#define pfn_pte(pfn,prot) \
arch/arm64/include/asm/pgtable.h
138
__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/arm64/include/asm/pgtable.h
459
unsigned long pfn = pte_pfn(pte);
arch/arm64/include/asm/pgtable.h
461
return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
arch/arm64/include/asm/pgtable.h
631
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/arm64/include/asm/pgtable.h
655
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/arm64/include/asm/pgtable.h
660
unsigned long pfn = pmd_pfn(pmd);
arch/arm64/include/asm/pgtable.h
662
return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
arch/arm64/include/asm/pgtable.h
668
unsigned long pfn = pud_pfn(pud);
arch/arm64/include/asm/pgtable.h
670
return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
arch/arm64/include/asm/pgtable.h
774
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/arm64/include/asm/vmalloc.h
23
unsigned long end, u64 pfn,
arch/arm64/include/asm/vmalloc.h
40
if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
arch/arm64/kernel/crash_dump.c
15
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/arm64/kernel/crash_dump.c
23
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/arm64/kernel/hibernate.c
220
static int save_tags(struct page *page, unsigned long pfn)
arch/arm64/kernel/hibernate.c
230
ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
arch/arm64/kernel/hibernate.c
258
unsigned long pfn, max_zone_pfn;
arch/arm64/kernel/hibernate.c
267
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
arch/arm64/kernel/hibernate.c
268
struct page *page = pfn_to_online_page(pfn);
arch/arm64/kernel/hibernate.c
282
ret = save_tags(page, pfn);
arch/arm64/kernel/hibernate.c
305
unsigned long pfn = xa_state.xa_index;
arch/arm64/kernel/hibernate.c
306
struct page *page = pfn_to_online_page(pfn);
arch/arm64/kernel/hibernate.c
91
int pfn_is_nosave(unsigned long pfn)
arch/arm64/kernel/hibernate.c
96
return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
arch/arm64/kernel/hibernate.c
97
crash_is_nosave(pfn);
arch/arm64/kernel/machine_kexec.c
255
bool crash_is_nosave(unsigned long pfn)
arch/arm64/kernel/machine_kexec.c
264
addr = __pfn_to_phys(pfn);
arch/arm64/kernel/vdso.c
72
unsigned long pfn;
arch/arm64/kernel/vdso.c
89
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
arch/arm64/kernel/vdso.c
92
vdso_pagelist[i] = pfn_to_page(pfn + i);
arch/arm64/kvm/hyp/include/nvhe/gfp.h
32
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
36
int __pkvm_host_share_hyp(u64 pfn);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
37
int __pkvm_host_unshare_hyp(u64 pfn);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
38
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
39
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
40
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
41
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
42
int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
arch/arm64/kvm/hyp/include/nvhe/memory.h
83
#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
arch/arm64/kvm/hyp/nvhe/ffa.c
353
u64 pfn = hyp_phys_to_pfn(range->address);
arch/arm64/kvm/hyp/nvhe/ffa.c
358
if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
arch/arm64/kvm/hyp/nvhe/ffa.c
373
u64 pfn = hyp_phys_to_pfn(range->address);
arch/arm64/kvm/hyp/nvhe/ffa.c
378
if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
arch/arm64/kvm/hyp/nvhe/hyp-main.c
253
DECLARE_REG(u64, pfn, host_ctxt, 1);
arch/arm64/kvm/hyp/nvhe/hyp-main.c
271
ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot);
arch/arm64/kvm/hyp/nvhe/hyp-main.c
511
DECLARE_REG(u64, pfn, host_ctxt, 1);
arch/arm64/kvm/hyp/nvhe/hyp-main.c
513
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
arch/arm64/kvm/hyp/nvhe/hyp-main.c
518
DECLARE_REG(u64, pfn, host_ctxt, 1);
arch/arm64/kvm/hyp/nvhe/hyp-main.c
520
cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
106
unsigned long nr_pages, pfn;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
109
pfn = hyp_virt_to_pfn(pgt_pool_base);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
111
ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1288
u64 phys, size, pfn, gfn;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1297
pfn = hyp_phys_to_pfn(phys);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1304
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1305
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1306
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1307
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1308
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1310
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1315
assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1316
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1317
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1318
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1324
assert_transition_res(0, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1325
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1326
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1327
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1328
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1329
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1336
assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1337
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1338
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1339
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1340
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1341
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1350
assert_transition_res(0, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1354
assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1355
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1356
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1357
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1358
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1359
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1360
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1366
assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1367
assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1371
assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1372
assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1373
assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1374
assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1375
assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1376
assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1377
assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1381
assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
1393
assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
378
static bool pfn_range_is_valid(u64 pfn, u64 nr_pages)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
382
return pfn < limit && ((limit - pfn) >= nr_pages);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
730
int __pkvm_host_share_hyp(u64 pfn)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
732
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
756
int __pkvm_host_unshare_hyp(u64 pfn)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
758
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
787
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
789
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
794
if (!pfn_range_is_valid(pfn, nr_pages))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
818
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
820
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
825
if (!pfn_range_is_valid(pfn, nr_pages))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
905
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
907
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
911
if (!pfn_range_is_valid(pfn, nr_pages))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
923
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
925
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
929
if (!pfn_range_is_valid(pfn, nr_pages))
arch/arm64/kvm/hyp/nvhe/mem_protect.c
963
int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
arch/arm64/kvm/hyp/nvhe/mem_protect.c
967
u64 phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
975
if (!pfn_range_is_valid(pfn, nr_pages))
arch/arm64/kvm/hyp/nvhe/page_alloc.c
223
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
arch/arm64/kvm/hyp/nvhe/page_alloc.c
226
phys_addr_t phys = hyp_pfn_to_phys(pfn);
arch/arm64/kvm/hyp/nvhe/setup.c
290
unsigned long nr_pages, reserved_pages, pfn;
arch/arm64/kvm/hyp/nvhe/setup.c
294
pfn = hyp_virt_to_pfn(hyp_pgt_base);
arch/arm64/kvm/hyp/nvhe/setup.c
297
ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
arch/arm64/kvm/mmu.c
1406
kvm_pfn_t pfn = *pfnp;
arch/arm64/kvm/mmu.c
1423
pfn &= ~(PTRS_PER_PMD - 1);
arch/arm64/kvm/mmu.c
1424
*pfnp = pfn;
arch/arm64/kvm/mmu.c
1470
static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
arch/arm64/kvm/mmu.c
1474
struct page *page = pfn_to_page(pfn);
arch/arm64/kvm/mmu.c
1579
kvm_pfn_t pfn;
arch/arm64/kvm/mmu.c
1601
ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
arch/arm64/kvm/mmu.c
1629
__pfn_to_phys(pfn), prot,
arch/arm64/kvm/mmu.c
1659
kvm_pfn_t pfn;
arch/arm64/kvm/mmu.c
1788
pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
arch/arm64/kvm/mmu.c
1790
if (pfn == KVM_PFN_ERR_HWPOISON) {
arch/arm64/kvm/mmu.c
1794
if (is_error_noslot_pfn(pfn))
arch/arm64/kvm/mmu.c
1801
if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(pfn)) {
arch/arm64/kvm/mmu.c
1875
hva, &pfn,
arch/arm64/kvm/mmu.c
1887
sanitise_mte_tags(kvm, pfn, vma_pagesize);
arch/arm64/kvm/mmu.c
1926
__pfn_to_phys(pfn), prot,
arch/arm64/kvm/mmu.c
448
u64 pfn;
arch/arm64/kvm/mmu.c
456
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
arch/arm64/kvm/mmu.c
466
if (this->pfn < pfn)
arch/arm64/kvm/mmu.c
468
else if (this->pfn > pfn)
arch/arm64/kvm/mmu.c
477
static int share_pfn_hyp(u64 pfn)
arch/arm64/kvm/mmu.c
484
this = find_shared_pfn(pfn, &node, &parent);
arch/arm64/kvm/mmu.c
496
this->pfn = pfn;
arch/arm64/kvm/mmu.c
500
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn);
arch/arm64/kvm/mmu.c
507
static int unshare_pfn_hyp(u64 pfn)
arch/arm64/kvm/mmu.c
514
this = find_shared_pfn(pfn, &node, &parent);
arch/arm64/kvm/mmu.c
526
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn);
arch/arm64/kvm/mmu.c
536
u64 pfn;
arch/arm64/kvm/mmu.c
556
pfn = __phys_to_pfn(cur);
arch/arm64/kvm/mmu.c
557
ret = share_pfn_hyp(pfn);
arch/arm64/kvm/mmu.c
568
u64 pfn;
arch/arm64/kvm/mmu.c
576
pfn = __phys_to_pfn(cur);
arch/arm64/kvm/mmu.c
577
WARN_ON(unshare_pfn_hyp(pfn));
arch/arm64/kvm/nested.c
1267
u64 va, pfn, gfn;
arch/arm64/kvm/nested.c
1311
pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
arch/arm64/kvm/nested.c
1313
if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
arch/arm64/kvm/nested.c
1316
ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL);
arch/arm64/kvm/nested.c
1329
vt->hpa = pfn << PAGE_SHIFT;
arch/arm64/kvm/pkvm.c
367
u64 pfn = phys >> PAGE_SHIFT;
arch/arm64/kvm/pkvm.c
392
ret = kvm_call_hyp_nvhe(__pkvm_host_share_guest, pfn, gfn, size / PAGE_SIZE, prot);
arch/arm64/kvm/pkvm.c
398
mapping->pfn = pfn;
arch/arm64/kvm/pkvm.c
437
__clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
arch/arm64/mm/contpte.c
260
unsigned long pfn;
arch/arm64/mm/contpte.c
279
pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES);
arch/arm64/mm/contpte.c
281
expected_pte = pfn_pte(pfn, prot);
arch/arm64/mm/contpte.c
361
static inline bool contpte_is_consistent(pte_t pte, unsigned long pfn,
arch/arm64/mm/contpte.c
366
return pte_valid_cont(pte) && pte_pfn(pte) == pfn &&
arch/arm64/mm/contpte.c
390
unsigned long pfn;
arch/arm64/mm/contpte.c
404
pfn = pte_pfn(orig_pte) - (orig_ptep - ptep);
arch/arm64/mm/contpte.c
406
for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) {
arch/arm64/mm/contpte.c
409
if (!contpte_is_consistent(pte, pfn, orig_prot))
arch/arm64/mm/contpte.c
414
for (; i < CONT_PTES; i++, ptep++, pfn++) {
arch/arm64/mm/contpte.c
417
if (!contpte_is_consistent(pte, pfn, orig_prot))
arch/arm64/mm/contpte.c
432
pfn++;
arch/arm64/mm/contpte.c
433
for (; i < CONT_PTES; i++, ptep++, pfn++) {
arch/arm64/mm/contpte.c
436
if (!contpte_is_consistent(pte, pfn, orig_prot))
arch/arm64/mm/contpte.c
457
unsigned long pfn;
arch/arm64/mm/contpte.c
472
pfn = pte_pfn(pte);
arch/arm64/mm/contpte.c
478
pte = pfn_pte(pfn, prot);
arch/arm64/mm/contpte.c
480
if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0)
arch/arm64/mm/contpte.c
489
pfn += nr;
arch/arm64/mm/init.c
164
int pfn_is_map_memory(unsigned long pfn)
arch/arm64/mm/init.c
166
phys_addr_t addr = PFN_PHYS(pfn);
arch/arm64/mm/init.c
169
if (PHYS_PFN(addr) != pfn)
arch/arm64/mm/ioremap.c
55
unsigned long pfn = PHYS_PFN(offset);
arch/arm64/mm/ioremap.c
57
return pfn_is_map_memory(pfn);
arch/arm64/mm/mmap.c
62
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
arch/arm64/mm/mmap.c
64
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
arch/arm64/mm/mmu.c
100
if (!pfn_is_map_memory(pfn))
arch/arm64/mm/mmu.c
2109
static bool can_unmap_without_split(unsigned long pfn, unsigned long nr_pages)
arch/arm64/mm/mmu.c
2113
phys_start = PFN_PHYS(pfn);
arch/arm64/mm/mmu.c
2127
start = (unsigned long)pfn_to_page(pfn);
arch/arm64/mm/mmu.c
2128
end = (unsigned long)pfn_to_page(pfn + nr_pages);
arch/arm64/mm/mmu.c
2155
unsigned long pfn = arg->start_pfn;
arch/arm64/mm/mmu.c
2160
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
arch/arm64/mm/mmu.c
2161
unsigned long start = PFN_PHYS(pfn);
arch/arm64/mm/mmu.c
2164
ms = __pfn_to_section(pfn);
arch/arm64/mm/mmu.c
2196
if (!can_unmap_without_split(pfn, arg->nr_pages))
arch/arm64/mm/mmu.c
586
unsigned long pfn = pmd_pfn(pmd);
arch/arm64/mm/mmu.c
607
for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
arch/arm64/mm/mmu.c
608
__set_pte(ptep, pfn_pte(pfn, prot));
arch/arm64/mm/mmu.c
633
unsigned long pfn = pud_pfn(pud);
arch/arm64/mm/mmu.c
654
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++, pfn += step)
arch/arm64/mm/mmu.c
655
set_pmd(pmdp, pfn_pmd(pfn, prot));
arch/arm64/mm/mmu.c
97
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/arm64/mm/trans_pgd.c
217
unsigned long pfn = __phys_to_pfn(dst_addr);
arch/arm64/mm/trans_pgd.c
224
prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
arch/arm64/mm/trans_pgd.c
238
pfn = virt_to_pfn(levels[this_level]);
arch/arm64/mm/trans_pgd.c
239
prev_level_entry = pte_val(pfn_pte(pfn,
arch/arm64/mm/trans_pgd.c
246
*trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
arch/csky/abiv1/cacheflush.c
47
unsigned long pfn = pte_pfn(*ptep);
arch/csky/abiv1/cacheflush.c
52
if (!pfn_valid(pfn))
arch/csky/abiv1/cacheflush.c
55
if (is_zero_pfn(pfn))
arch/csky/abiv1/cacheflush.c
58
folio = page_folio(pfn_to_page(pfn));
arch/csky/abiv1/inc/abi/cacheflush.h
16
#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
arch/csky/abiv2/cacheflush.c
13
unsigned long pfn = pte_pfn(*pte);
arch/csky/abiv2/cacheflush.c
19
if (!pfn_valid(pfn))
arch/csky/abiv2/cacheflush.c
22
folio = page_folio(pfn_to_page(pfn));
arch/csky/abiv2/inc/abi/cacheflush.h
17
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
arch/csky/include/asm/pgtable.h
219
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/csky/include/asm/pgtable.h
39
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
arch/csky/mm/ioremap.c
11
if (!pfn_valid(pfn)) {
arch/csky/mm/ioremap.c
8
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/hexagon/include/asm/pgtable.h
337
#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
arch/loongarch/include/asm/cacheflush.h
44
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
arch/loongarch/include/asm/io.h
89
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
arch/loongarch/include/asm/kvm_mmu.h
28
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/loongarch/include/asm/page.h
101
#define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
arch/loongarch/include/asm/page.h
69
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/loongarch/include/asm/page.h
75
#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
arch/loongarch/include/asm/pgtable.h
269
#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/loongarch/include/asm/pgtable.h
270
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/loongarch/kernel/crash_dump.c
14
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/loongarch/kernel/crash_dump.c
6
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/loongarch/kernel/vdso.c
47
unsigned long i, cpu, pfn;
arch/loongarch/kernel/vdso.c
61
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
arch/loongarch/kernel/vdso.c
63
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
arch/loongarch/kvm/mmu.c
777
kvm_pfn_t pfn;
arch/loongarch/kvm/mmu.c
823
pfn = kvm_faultin_pfn(vcpu, gfn, write, &writeable, &page);
arch/loongarch/kvm/mmu.c
824
if (is_error_noslot_pfn(pfn)) {
arch/loongarch/kvm/mmu.c
853
if (pfn_valid(pfn))
arch/loongarch/kvm/mmu.c
882
pfn = pfn & ~(PTRS_PER_PTE - 1);
arch/loongarch/kvm/mmu.c
888
new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits));
arch/loongarch/mm/init.c
53
int __ref page_is_ram(unsigned long pfn)
arch/loongarch/mm/init.c
55
unsigned long addr = PFN_PHYS(pfn);
arch/loongarch/mm/mmap.c
151
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
arch/loongarch/mm/mmap.c
153
return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
arch/loongarch/power/hibernate.c
42
int pfn_is_nosave(unsigned long pfn)
arch/loongarch/power/hibernate.c
47
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
arch/m68k/include/asm/cacheflush_mm.h
215
static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
arch/m68k/include/asm/mcf_pgtable.h
292
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/m68k/include/asm/motorola_pgtable.h
112
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/m68k/include/asm/page_mm.h
128
static inline void *pfn_to_virt(unsigned long pfn)
arch/m68k/include/asm/page_mm.h
130
return __va(pfn << PAGE_SHIFT);
arch/m68k/include/asm/page_mm.h
145
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
arch/m68k/include/asm/page_no.h
26
static inline void *pfn_to_virt(unsigned long pfn)
arch/m68k/include/asm/page_no.h
28
return __va(pfn << PAGE_SHIFT);
arch/m68k/include/asm/sun3_pgtable.h
104
#define pfn_pte(pfn, pgprot) \
arch/m68k/include/asm/sun3_pgtable.h
105
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
arch/microblaze/include/asm/cacheflush.h
85
#define flush_cache_page(vma, vmaddr, pfn) \
arch/microblaze/include/asm/cacheflush.h
86
flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
arch/microblaze/include/asm/page.h
125
static inline const void *pfn_to_virt(unsigned long pfn)
arch/microblaze/include/asm/page.h
127
return __va(pfn_to_phys((pfn)));
arch/microblaze/include/asm/page.h
93
extern int page_is_ram(unsigned long pfn);
arch/microblaze/include/asm/page.h
96
# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
arch/microblaze/include/asm/pgtable.h
226
#define pfn_pte(pfn, prot) \
arch/microblaze/include/asm/pgtable.h
227
__pte(((pte_basic_t)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/microblaze/mm/init.c
111
int page_is_ram(unsigned long pfn)
arch/microblaze/mm/init.c
113
return pfn < max_low_pfn;
arch/mips/alchemy/common/setup.c
97
unsigned long io_remap_pfn_range_pfn(unsigned long pfn, unsigned long size)
arch/mips/alchemy/common/setup.c
99
phys_addr_t phys_addr = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
arch/mips/include/asm/cacheflush.h
52
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
41
dsrl t1, 12 # 4K pfn
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
42
dsrl t2, 12 # 4K pfn
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
43
dsll t1, 6 # Get pfn into place
arch/mips/include/asm/mach-ip27/kernel-entry-init.h
44
dsll t2, 6 # Get pfn into place
arch/mips/include/asm/page.h
207
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/mips/include/asm/pgtable-32.h
158
pfn_pte(unsigned long pfn, pgprot_t prot)
arch/mips/include/asm/pgtable-32.h
162
pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
arch/mips/include/asm/pgtable-32.h
164
pte.pte_high = (pfn << PFN_PTE_SHIFT) |
arch/mips/include/asm/pgtable-32.h
174
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
arch/mips/include/asm/pgtable-32.h
178
pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
arch/mips/include/asm/pgtable-32.h
188
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/mips/include/asm/pgtable-32.h
189
#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/mips/include/asm/pgtable-64.h
302
#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/mips/include/asm/pgtable-64.h
303
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
arch/mips/include/asm/pgtable.h
607
unsigned long io_remap_pfn_range_pfn(unsigned long pfn, unsigned long size);
arch/mips/kernel/crash_dump.c
14
vaddr = kmap_local_pfn(pfn);
arch/mips/kernel/crash_dump.c
6
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/mips/kvm/mmu.c
555
kvm_pfn_t pfn;
arch/mips/kvm/mmu.c
594
pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
arch/mips/kvm/mmu.c
595
if (is_error_noslot_pfn(pfn)) {
arch/mips/kvm/mmu.c
625
entry = pfn_pte(pfn, __pgprot(prot_bits));
arch/mips/mm/c-octeon.c
155
unsigned long page, unsigned long pfn)
arch/mips/mm/c-r3k.c
237
unsigned long addr, unsigned long pfn)
arch/mips/mm/c-r3k.c
239
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
arch/mips/mm/c-r4k.c
534
unsigned long pfn;
arch/mips/mm/c-r4k.c
542
struct page *page = pfn_to_page(fcp_args->pfn);
arch/mips/mm/c-r4k.c
609
unsigned long addr, unsigned long pfn)
arch/mips/mm/c-r4k.c
615
args.pfn = pfn;
arch/mips/mm/cache.c
149
unsigned long pfn, addr;
arch/mips/mm/cache.c
153
pfn = pte_pfn(pte);
arch/mips/mm/cache.c
154
if (unlikely(!pfn_valid(pfn)))
arch/mips/mm/cache.c
157
folio = page_folio(pfn_to_page(pfn));
arch/mips/mm/cache.c
159
address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
arch/mips/mm/cache.c
36
unsigned long pfn);
arch/mips/mm/init.c
464
unsigned long pfn;
arch/mips/mm/init.c
466
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
arch/mips/mm/init.c
467
struct page *page = pfn_to_page(pfn);
arch/mips/mm/init.c
468
void *addr = phys_to_virt(PFN_PHYS(pfn));
arch/mips/mm/ioremap.c
50
unsigned long offset, pfn, last_pfn;
arch/mips/mm/ioremap.c
83
pfn = PFN_DOWN(phys_addr);
arch/mips/mm/ioremap.c
85
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
arch/mips/power/cpu.c
37
int pfn_is_nosave(unsigned long pfn)
arch/mips/power/cpu.c
42
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
arch/nios2/include/asm/cacheflush.h
29
unsigned long pfn);
arch/nios2/include/asm/page.h
83
# define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/nios2/include/asm/pgtable.h
166
#define pfn_pte(pfn, prot) (__pte(pfn | pgprot_val(prot)))
arch/nios2/mm/cacheflush.c
153
unsigned long pfn)
arch/nios2/mm/cacheflush.c
213
unsigned long pfn = pte_pfn(pte);
arch/nios2/mm/cacheflush.c
219
if (!pfn_valid(pfn))
arch/nios2/mm/cacheflush.c
226
if (is_zero_pfn(pfn))
arch/nios2/mm/cacheflush.c
229
folio = page_folio(pfn_to_page(pfn));
arch/nios2/mm/ioremap.c
27
unsigned long pfn;
arch/nios2/mm/ioremap.c
37
pfn = PFN_DOWN(phys_addr);
arch/nios2/mm/ioremap.c
43
set_pte(pte, pfn_pte(pfn, pgprot));
arch/nios2/mm/ioremap.c
45
pfn++;
arch/openrisc/include/asm/pgtable.h
339
#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
arch/openrisc/mm/cache.c
84
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
arch/openrisc/mm/cache.c
85
struct folio *folio = page_folio(pfn_to_page(pfn));
arch/parisc/include/asm/cacheflush.h
75
unsigned long pfn);
arch/parisc/include/asm/io.h
279
extern int devmem_is_allowed(unsigned long pfn);
arch/parisc/include/asm/pgtable.h
339
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
arch/parisc/include/asm/pgtable.h
342
pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
arch/parisc/kernel/cache.c
107
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
arch/parisc/kernel/cache.c
111
unsigned long pfn = pte_pfn(pte);
arch/parisc/kernel/cache.c
118
if (!pfn_valid(pfn))
arch/parisc/kernel/cache.c
121
folio = page_folio(pfn_to_page(pfn));
arch/parisc/kernel/cache.c
122
pfn = folio_pfn(folio);
arch/parisc/kernel/cache.c
127
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
arch/parisc/kernel/cache.c
131
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
arch/parisc/kernel/cache.c
508
unsigned long pfn = folio_pfn(folio);
arch/parisc/kernel/cache.c
513
pfn -= offset;
arch/parisc/kernel/cache.c
526
(pfn + i) * PAGE_SIZE);
arch/parisc/kernel/cache.c
771
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
arch/parisc/kernel/cache.c
773
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
arch/parisc/kernel/cache.c
809
unsigned long pfn = pte_pfn(pte);
arch/parisc/kernel/cache.c
811
if (pfn_valid(pfn))
arch/parisc/kernel/cache.c
812
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
arch/powerpc/include/asm/book3s/32/pgtable.h
452
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
arch/powerpc/include/asm/book3s/32/pgtable.h
454
return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
arch/powerpc/include/asm/book3s/64/hash-4k.h
74
#define remap_4k_pfn(vma, addr, pfn, prot) \
arch/powerpc/include/asm/book3s/64/hash-4k.h
75
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
arch/powerpc/include/asm/book3s/64/hash-64k.h
174
unsigned long pfn, unsigned long size, pgprot_t);
arch/powerpc/include/asm/book3s/64/hash-64k.h
176
unsigned long pfn, pgprot_t prot)
arch/powerpc/include/asm/book3s/64/hash-64k.h
178
if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
arch/powerpc/include/asm/book3s/64/hash-64k.h
182
return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
11
unsigned long pfn, pgprot_t prot)
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
15
return hash__remap_4k_pfn(vma, addr, pfn, prot);
arch/powerpc/include/asm/book3s/64/pgtable.h
1105
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
arch/powerpc/include/asm/book3s/64/pgtable.h
1106
extern pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot);
arch/powerpc/include/asm/book3s/64/pgtable.h
564
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
arch/powerpc/include/asm/book3s/64/pgtable.h
566
VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
arch/powerpc/include/asm/book3s/64/pgtable.h
567
VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
arch/powerpc/include/asm/book3s/64/pgtable.h
569
return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
arch/powerpc/include/asm/kvm_book3s.h
56
u64 pfn;
arch/powerpc/include/asm/kvm_host.h
176
unsigned long pfn;
arch/powerpc/include/asm/kvm_ppc.h
930
static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
arch/powerpc/include/asm/kvm_ppc.h
937
if (!pfn_valid(pfn))
arch/powerpc/include/asm/kvm_ppc.h
941
folio = page_folio(pfn_to_page(pfn));
arch/powerpc/include/asm/machdep.h
114
pgprot_t (*phys_mem_access_prot)(unsigned long pfn,
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
218
static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
230
if (!IS_ALIGNED(PFN_PHYS(pfn), size))
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
237
u64 pfn, unsigned int max_page_shift)
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
239
if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
243
if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
90
#define remap_4k_pfn(vma, addr, pfn, prot) \
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
91
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
arch/powerpc/include/asm/nohash/pgtable.h
263
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
arch/powerpc/include/asm/nohash/pgtable.h
264
return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
arch/powerpc/include/asm/page.h
225
static inline const void *pfn_to_kaddr(unsigned long pfn)
arch/powerpc/include/asm/page.h
227
return __va(pfn << PAGE_SHIFT);
arch/powerpc/include/asm/page.h
277
extern int devmem_is_allowed(unsigned long pfn);
arch/powerpc/include/asm/pci.h
108
extern pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
arch/powerpc/include/asm/pgtable.h
122
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
arch/powerpc/include/asm/pgtable.h
126
static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/powerpc/include/asm/pgtable.h
129
return __phys_mem_access_prot(pfn, size, vma_prot);
arch/powerpc/include/asm/rtas.h
554
static inline int page_is_rtas_user_buf(unsigned long pfn)
arch/powerpc/include/asm/rtas.h
556
unsigned long paddr = (pfn << PAGE_SHIFT);
arch/powerpc/include/asm/rtas.h
567
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
arch/powerpc/include/asm/ultravisor.h
34
static inline int uv_share_page(u64 pfn, u64 npages)
arch/powerpc/include/asm/ultravisor.h
36
return ucall_norets(UV_SHARE_PAGE, pfn, npages);
arch/powerpc/include/asm/ultravisor.h
39
static inline int uv_unshare_page(u64 pfn, u64 npages)
arch/powerpc/include/asm/ultravisor.h
41
return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
arch/powerpc/kernel/crash_dump.c
72
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/powerpc/kernel/crash_dump.c
82
paddr = pfn << PAGE_SHIFT;
arch/powerpc/kernel/fadump.c
1182
unsigned long pfn;
arch/powerpc/kernel/fadump.c
1188
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
arch/powerpc/kernel/fadump.c
1189
free_reserved_page(pfn_to_page(pfn));
arch/powerpc/kernel/mce.c
309
unsigned long pfn;
arch/powerpc/kernel/mce.c
311
pfn = evt->u.ue_error.physical_address >>
arch/powerpc/kernel/mce.c
313
memory_failure(pfn, 0);
arch/powerpc/kernel/mce_power.c
33
unsigned long pfn, flags;
arch/powerpc/kernel/mce_power.c
44
pfn = ULONG_MAX;
arch/powerpc/kernel/mce_power.c
459
unsigned long pfn, instr_addr;
arch/powerpc/kernel/mce_power.c
463
pfn = addr_to_pfn(regs, regs->nip);
arch/powerpc/kernel/mce_power.c
464
if (pfn != ULONG_MAX) {
arch/powerpc/kernel/mce_power.c
465
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
arch/powerpc/kernel/mce_power.c
468
pfn = addr_to_pfn(regs, op.ea);
arch/powerpc/kernel/mce_power.c
470
*phys_addr = (pfn << PAGE_SHIFT);
arch/powerpc/kernel/mce_power.c
50
pfn = ULONG_MAX;
arch/powerpc/kernel/mce_power.c
55
pfn = pte_pfn(pte);
arch/powerpc/kernel/mce_power.c
550
unsigned long pfn;
arch/powerpc/kernel/mce_power.c
553
pfn = addr_to_pfn(regs, regs->nip);
arch/powerpc/kernel/mce_power.c
554
if (pfn != ULONG_MAX) {
arch/powerpc/kernel/mce_power.c
556
(pfn << PAGE_SHIFT);
arch/powerpc/kernel/mce_power.c
58
pfn = pte_pfn(__pte(pte_val(pte) | (addr & rpnmask)));
arch/powerpc/kernel/mce_power.c
62
return pfn;
arch/powerpc/kernel/pci-common.c
524
pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
arch/powerpc/kernel/pci-common.c
530
resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
arch/powerpc/kernel/pci-common.c
533
if (page_is_ram(pfn))
arch/powerpc/kernel/suspend.c
18
int pfn_is_nosave(unsigned long pfn)
arch/powerpc/kernel/suspend.c
22
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
arch/powerpc/kvm/book3s.c
437
kvm_pfn_t pfn;
arch/powerpc/kvm/book3s.c
439
pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
arch/powerpc/kvm/book3s.c
440
*page = pfn_to_page(pfn);
arch/powerpc/kvm/book3s.c
444
return pfn;
arch/powerpc/kvm/book3s_32_mmu_host.c
248
pte->pfn = hpaddr >> PAGE_SHIFT;
arch/powerpc/kvm/book3s_64_mmu_host.c
105
hpaddr = pfn << PAGE_SHIFT;
arch/powerpc/kvm/book3s_64_mmu_host.c
133
kvmppc_mmu_flush_icache(pfn);
arch/powerpc/kvm/book3s_64_mmu_host.c
195
cpte->pfn = pfn;
arch/powerpc/kvm/book3s_64_mmu_host.c
90
unsigned long pfn;
arch/powerpc/kvm/book3s_64_mmu_host.c
98
pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page);
arch/powerpc/kvm/book3s_64_mmu_host.c
99
if (is_error_noslot_pfn(pfn)) {
arch/powerpc/kvm/book3s_64_mmu_hv.c
515
unsigned long gpa, gfn, hva, pfn, hpa;
arch/powerpc/kvm/book3s_64_mmu_hv.c
606
pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0,
arch/powerpc/kvm/book3s_64_mmu_hv.c
608
if (is_error_noslot_pfn(pfn))
arch/powerpc/kvm/book3s_64_mmu_radix.c
836
kvm_pfn_t pfn;
arch/powerpc/kvm/book3s_64_mmu_radix.c
843
pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0,
arch/powerpc/kvm/book3s_64_mmu_radix.c
845
if (is_error_noslot_pfn(pfn))
arch/powerpc/kvm/book3s_hv_uvmem.c
1020
unsigned long pfn = page_to_pfn(page) -
arch/powerpc/kvm/book3s_hv_uvmem.c
1025
bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
arch/powerpc/kvm/book3s_hv_uvmem.c
522
unsigned long pfn;
arch/powerpc/kvm/book3s_hv_uvmem.c
558
pfn = page_to_pfn(dpage);
arch/powerpc/kvm/book3s_hv_uvmem.c
568
ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
arch/powerpc/kvm/book3s_hv_uvmem.c
572
*mig.dst = migrate_pfn(pfn);
arch/powerpc/kvm/book3s_hv_uvmem.c
749
unsigned long pfn;
arch/powerpc/kvm/book3s_hv_uvmem.c
777
pfn = *mig.src >> MIGRATE_PFN_SHIFT;
arch/powerpc/kvm/book3s_hv_uvmem.c
780
ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
arch/powerpc/kvm/e500.h
43
kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
arch/powerpc/kvm/e500_mmu_host.c
167
kvm_pfn_t pfn;
arch/powerpc/kvm/e500_mmu_host.c
169
pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
arch/powerpc/kvm/e500_mmu_host.c
170
get_page(pfn_to_page(pfn));
arch/powerpc/kvm/e500_mmu_host.c
178
magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
arch/powerpc/kvm/e500_mmu_host.c
250
kvm_pfn_t pfn, unsigned int wimg,
arch/powerpc/kvm/e500_mmu_host.c
253
tlbe->pfn = pfn;
arch/powerpc/kvm/e500_mmu_host.c
266
trace_kvm_booke206_ref_release(tlbe->pfn, tlbe->flags);
arch/powerpc/kvm/e500_mmu_host.c
307
kvm_pfn_t pfn = tlbe->pfn;
arch/powerpc/kvm/e500_mmu_host.c
316
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
arch/powerpc/kvm/e500_mmu_host.c
327
unsigned long pfn;
arch/powerpc/kvm/e500_mmu_host.c
355
pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page);
arch/powerpc/kvm/e500_mmu_host.c
356
if (is_error_noslot_pfn(pfn)) {
arch/powerpc/kvm/e500_mmu_host.c
388
__func__, (long)gfn, pfn);
arch/powerpc/kvm/e500_mmu_host.c
401
start = pfn & ~(psize_pages - 1);
arch/powerpc/kvm/e500_mmu_host.c
404
slot_start = pfn - (gfn - slot->base_gfn);
arch/powerpc/kvm/e500_mmu_host.c
441
if (gfn_start + pfn - gfn < start)
arch/powerpc/kvm/e500_mmu_host.c
443
if (gfn_end + pfn - gfn > end)
arch/powerpc/kvm/e500_mmu_host.c
446
(pfn & (tsize_pages - 1)))
arch/powerpc/kvm/e500_mmu_host.c
450
pfn &= ~(tsize_pages - 1);
arch/powerpc/kvm/e500_mmu_host.c
455
kvmppc_e500_tlbe_setup(tlbe, gtlbe, pfn, wimg, writable);
arch/powerpc/kvm/e500_mmu_host.c
461
kvmppc_mmu_flush_icache(pfn);
arch/powerpc/kvm/e500_mmu_host.c
589
hfn_t pfn;
arch/powerpc/kvm/e500_mmu_host.c
656
pfn = addr >> PAGE_SHIFT;
arch/powerpc/kvm/e500_mmu_host.c
659
if (unlikely(!page_is_ram(pfn))) {
arch/powerpc/kvm/e500_mmu_host.c
666
page = pfn_to_page(pfn);
arch/powerpc/kvm/trace_booke.h
121
TP_PROTO(__u64 pfn, __u32 flags),
arch/powerpc/kvm/trace_booke.h
122
TP_ARGS(pfn, flags),
arch/powerpc/kvm/trace_booke.h
125
__field( __u64, pfn )
arch/powerpc/kvm/trace_booke.h
130
__entry->pfn = pfn;
arch/powerpc/kvm/trace_booke.h
135
__entry->pfn, __entry->flags)
arch/powerpc/kvm/trace_pr.h
107
__entry->pfn = pte->pfn;
arch/powerpc/kvm/trace_pr.h
117
__entry->host_vpn, __entry->pfn, __entry->eaddr,
arch/powerpc/kvm/trace_pr.h
69
__field( u64, pfn )
arch/powerpc/kvm/trace_pr.h
78
__entry->pfn = pte->pfn;
arch/powerpc/kvm/trace_pr.h
88
__entry->host_vpn, __entry->pfn, __entry->eaddr,
arch/powerpc/kvm/trace_pr.h
98
__field( u64, pfn )
arch/powerpc/lib/code-patching.c
241
unsigned long pfn = get_patch_pfn(addr);
arch/powerpc/lib/code-patching.c
243
return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
arch/powerpc/lib/code-patching.c
287
unsigned long pfn = get_patch_pfn(addr);
arch/powerpc/lib/code-patching.c
300
__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
arch/powerpc/lib/code-patching.c
333
unsigned long pfn = get_patch_pfn(addr);
arch/powerpc/lib/code-patching.c
339
__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
arch/powerpc/lib/code-patching.c
471
unsigned long pfn = get_patch_pfn(addr);
arch/powerpc/lib/code-patching.c
486
__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
arch/powerpc/lib/code-patching.c
521
unsigned long pfn = get_patch_pfn(addr);
arch/powerpc/lib/code-patching.c
531
__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
arch/powerpc/mm/book3s64/pgtable.c
237
pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
arch/powerpc/mm/book3s64/pgtable.c
241
pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
arch/powerpc/mm/book3s64/pgtable.c
246
pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
arch/powerpc/mm/book3s64/pgtable.c
250
pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
arch/powerpc/mm/book3s64/radix_pgtable.c
115
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
arch/powerpc/mm/book3s64/radix_pgtable.c
130
unsigned long pfn = pa >> PAGE_SHIFT;
arch/powerpc/mm/book3s64/radix_pgtable.c
175
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
arch/powerpc/mm/book3s64/radix_pgtable.c
78
unsigned long pfn = pa >> PAGE_SHIFT;
arch/powerpc/mm/cacheflush.c
170
unsigned long pfn = folio_pfn(folio);
arch/powerpc/mm/cacheflush.c
172
flush_dcache_icache_phys((pfn + i) * PAGE_SIZE);
arch/powerpc/mm/mem.c
361
int devmem_is_allowed(unsigned long pfn)
arch/powerpc/mm/mem.c
363
if (page_is_rtas_user_buf(pfn))
arch/powerpc/mm/mem.c
365
if (iomem_is_exclusive(PFN_PHYS(pfn)))
arch/powerpc/mm/mem.c
367
if (!page_is_ram(pfn))
arch/powerpc/mm/mem.c
41
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
arch/powerpc/mm/mem.c
45
return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
arch/powerpc/mm/mem.c
47
if (!page_is_ram(pfn))
arch/powerpc/mm/pgtable.c
422
unsigned long pfn = vmalloc_to_pfn(va);
arch/powerpc/mm/pgtable.c
424
BUG_ON(!pfn);
arch/powerpc/mm/pgtable.c
425
return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
arch/powerpc/mm/pgtable.c
64
unsigned long pfn = pte_pfn(pte);
arch/powerpc/mm/pgtable.c
67
if (unlikely(!pfn_valid(pfn)))
arch/powerpc/mm/pgtable.c
69
page = pfn_to_page(pfn);
arch/powerpc/platforms/book3s/vas-api.c
509
unsigned long pfn;
arch/powerpc/platforms/book3s/vas-api.c
565
pfn = paste_addr >> PAGE_SHIFT;
arch/powerpc/platforms/book3s/vas-api.c
573
rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
arch/powerpc/platforms/cell/spufs/file.c
234
unsigned long pfn, offset;
arch/powerpc/platforms/cell/spufs/file.c
249
pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
arch/powerpc/platforms/cell/spufs/file.c
252
pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
arch/powerpc/platforms/cell/spufs/file.c
254
ret = vmf_insert_pfn(vma, vmf->address, pfn);
arch/powerpc/platforms/powernv/memtrace.c
123
for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
arch/powerpc/platforms/powernv/memtrace.c
124
__SetPageOffline(pfn_to_page(pfn));
arch/powerpc/platforms/powernv/memtrace.c
202
unsigned long pfn;
arch/powerpc/platforms/powernv/memtrace.c
209
for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
arch/powerpc/platforms/powernv/memtrace.c
210
__ClearPageOffline(pfn_to_page(pfn));
arch/powerpc/platforms/powernv/memtrace.c
98
unsigned long pfn, start_pfn;
arch/powerpc/platforms/pseries/ras.c
647
unsigned long pfn;
arch/powerpc/platforms/pseries/ras.c
649
pfn = addr_to_pfn(regs, eaddr);
arch/powerpc/platforms/pseries/ras.c
650
if (pfn != ULONG_MAX)
arch/powerpc/platforms/pseries/ras.c
651
paddr = pfn << PAGE_SHIFT;
arch/powerpc/platforms/pseries/svm.c
86
unsigned long pfn = PHYS_PFN(__pa(addr));
arch/powerpc/platforms/pseries/svm.c
87
struct page *page = pfn_to_page(pfn);
arch/powerpc/platforms/pseries/svm.c
93
uv_share_page(pfn, 1);
arch/riscv/include/asm/page.h
183
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
arch/riscv/include/asm/page.h
186
#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
arch/riscv/include/asm/page.h
195
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
arch/riscv/include/asm/page.h
197
return __va(pfn << PAGE_SHIFT);
arch/riscv/include/asm/pgalloc.h
21
unsigned long pfn = virt_to_pfn(pte);
arch/riscv/include/asm/pgalloc.h
23
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
29
unsigned long pfn = virt_to_pfn(page_address(pte));
arch/riscv/include/asm/pgalloc.h
31
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
37
unsigned long pfn = virt_to_pfn(pmd);
arch/riscv/include/asm/pgalloc.h
39
set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
45
unsigned long pfn = virt_to_pfn(pud);
arch/riscv/include/asm/pgalloc.h
47
set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
55
unsigned long pfn = virt_to_pfn(pud);
arch/riscv/include/asm/pgalloc.h
58
__p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
65
unsigned long pfn = virt_to_pfn(p4d);
arch/riscv/include/asm/pgalloc.h
67
set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
75
unsigned long pfn = virt_to_pfn(p4d);
arch/riscv/include/asm/pgalloc.h
78
__pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgtable-64.h
213
static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
arch/riscv/include/asm/pgtable-64.h
215
return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
arch/riscv/include/asm/pgtable-64.h
253
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
arch/riscv/include/asm/pgtable-64.h
259
return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
arch/riscv/include/asm/pgtable-64.h
314
static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
arch/riscv/include/asm/pgtable-64.h
316
return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
arch/riscv/include/asm/pgtable.h
255
static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
arch/riscv/include/asm/pgtable.h
261
return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
arch/riscv/include/asm/pgtable.h
335
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
arch/riscv/include/asm/pgtable.h
341
return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
arch/riscv/include/asm/pgtable.h
347
unsigned long pfn = pte_pfn(pte);
arch/riscv/include/asm/pgtable.h
349
return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
arch/riscv/kernel/crash_dump.c
12
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/riscv/kernel/crash_dump.c
20
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
arch/riscv/kernel/hibernate.c
73
int pfn_is_nosave(unsigned long pfn)
arch/riscv/kernel/hibernate.c
78
return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn));
arch/riscv/kernel/vdso.c
48
unsigned long pfn;
arch/riscv/kernel/vdso.c
63
pfn = sym_to_pfn(vdso_info->vdso_code_start);
arch/riscv/kernel/vdso.c
66
vdso_pagelist[i] = pfn_to_page(pfn + i);
arch/riscv/kvm/mmu.c
43
unsigned long pfn;
arch/riscv/kvm/mmu.c
58
pfn = __phys_to_pfn(hpa);
arch/riscv/kvm/mmu.c
63
map.pte = pfn_pte(pfn, prot);
arch/riscv/kvm/mmu.c
80
pfn++;
arch/riscv/mm/fault.c
176
unsigned long pfn;
arch/riscv/mm/fault.c
191
pfn = csr_read(CSR_SATP) & SATP_PPN;
arch/riscv/mm/fault.c
192
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
arch/riscv/mm/pageattr.c
113
unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
arch/riscv/mm/pageattr.c
124
set_pte(ptep_new, pfn_pte(pfn + i, prot));
arch/riscv/mm/pageattr.c
153
unsigned long pfn = _pud_pfn(pudp_get(pudp));
arch/riscv/mm/pageattr.c
165
pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
arch/riscv/mm/pageattr.c
202
unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
arch/riscv/mm/pageattr.c
218
pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
arch/s390/include/asm/page.h
177
static inline int devmem_is_allowed(unsigned long pfn)
arch/s390/include/asm/page.h
258
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
arch/s390/include/asm/page.h
263
static inline void *pfn_to_virt(unsigned long pfn)
arch/s390/include/asm/page.h
265
return __va(pfn_to_phys(pfn));
arch/s390/include/asm/page.h
273
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
arch/s390/include/asm/pgtable.h
1468
#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
arch/s390/include/asm/pgtable.h
1772
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
arch/s390/kernel/crash_dump.c
165
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/s390/kernel/crash_dump.c
170
src = pfn_to_phys(pfn) + offset;
arch/s390/kernel/crash_dump.c
181
unsigned long from, unsigned long pfn,
arch/s390/kernel/crash_dump.c
187
if (pfn < oldmem_data.size >> PAGE_SHIFT) {
arch/s390/kernel/crash_dump.c
188
size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
arch/s390/kernel/crash_dump.c
190
pfn + (oldmem_data.start >> PAGE_SHIFT),
arch/s390/kernel/crash_dump.c
196
pfn += size_old >> PAGE_SHIFT;
arch/s390/kernel/crash_dump.c
198
return remap_pfn_range(vma, from, pfn, size, prot);
arch/s390/kernel/crash_dump.c
209
unsigned long pfn,
arch/s390/kernel/crash_dump.c
215
if (pfn < hsa_end >> PAGE_SHIFT) {
arch/s390/kernel/crash_dump.c
216
size_hsa = min(size, hsa_end - (pfn << PAGE_SHIFT));
arch/s390/kernel/crash_dump.c
221
pfn += size_hsa >> PAGE_SHIFT;
arch/s390/kernel/crash_dump.c
223
return remap_pfn_range(vma, from, pfn, size, prot);
arch/s390/kernel/crash_dump.c
230
unsigned long pfn, unsigned long size, pgprot_t prot)
arch/s390/kernel/crash_dump.c
233
return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
arch/s390/kernel/crash_dump.c
235
return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
arch/s390/kvm/dat.h
393
static inline union pte _pte(kvm_pfn_t pfn, bool writable, bool dirty, bool special)
arch/s390/kvm/dat.h
395
union pte res = { .val = PFN_PHYS(pfn) };
arch/s390/kvm/dat.h
407
static inline union crste _crste_fc0(kvm_pfn_t pfn, int tt)
arch/s390/kvm/dat.h
409
union crste res = { .val = PFN_PHYS(pfn) };
arch/s390/kvm/dat.h
431
static inline union crste _crste_fc1(kvm_pfn_t pfn, int tt, bool writable, bool dirty)
arch/s390/kvm/dat.h
433
union crste res = { .val = PFN_PHYS(pfn) & _SEGMENT_MASK };
arch/s390/kvm/dat.h
490
kvm_pfn_t pfn; /* Host PFN */
arch/s390/kvm/faultin.c
136
f->pfn = __kvm_faultin_pfn(slot, gfn, foll, &f->writable, &f->page);
arch/s390/kvm/faultin.c
137
if (is_noslot_pfn(f->pfn))
arch/s390/kvm/faultin.c
139
if (is_sigpending_pfn(f->pfn))
arch/s390/kvm/faultin.c
141
if (f->pfn == KVM_PFN_ERR_NEEDS_IO)
arch/s390/kvm/faultin.c
143
if (is_error_pfn(f->pfn))
arch/s390/kvm/faultin.c
66
f->pfn = __kvm_faultin_pfn(slot, f->gfn, foll, &f->writable, &f->page);
arch/s390/kvm/faultin.c
69
if (f->pfn == KVM_PFN_ERR_NEEDS_IO) {
arch/s390/kvm/faultin.c
80
f->pfn = __kvm_faultin_pfn(slot, f->gfn, foll, &f->writable, &f->page);
arch/s390/kvm/faultin.c
84
if (is_noslot_pfn(f->pfn))
arch/s390/kvm/faultin.c
87
if (f->pfn == KVM_PFN_ERR_SIGPENDING)
arch/s390/kvm/faultin.c
90
if (f->pfn == KVM_PFN_ERR_RO_FAULT)
arch/s390/kvm/faultin.c
93
if (is_error_pfn(f->pfn))
arch/s390/kvm/faultin.h
36
*val = *(unsigned long *)phys_to_virt(pfn_to_phys(f->pfn) | offset_in_page(gaddr));
arch/s390/kvm/gaccess.c
1108
context->exception = __cmpxchg_with_key(__va(PFN_PHYS(f->pfn) | context->offset),
arch/s390/kvm/gaccess.c
1439
newpte = _pte(f->pfn, f->writable, !p, ptep_h->s.s);
arch/s390/kvm/gaccess.c
1455
newpte = _pte(f->pfn, 0, !p, 0);
arch/s390/kvm/gaccess.c
1485
newcrste = _crste_fc1(f->pfn, oldcrste.h.tt, f->writable, !p);
arch/s390/kvm/gaccess.c
1498
newcrste = _crste_fc1(f->pfn, oldcrste.h.tt, 0, !p);
arch/s390/kvm/gaccess.c
1545
entries[i].pfn, i + 1, entries[i].writable);
arch/s390/kvm/gaccess.c
872
ptr = __va(PFN_PHYS(f->pfn) | context->offset);
arch/s390/kvm/gmap.c
1007
kvm_pfn_t pfn, int level, bool wr)
arch/s390/kvm/gmap.c
1032
pte = ptep->s.pr ? *ptep : _pte(pfn, wr, false, false);
arch/s390/kvm/gmap.c
1183
rc = gmap_protect_rmap(mc, sg, context->f[i].gfn, 0, context->f[i].pfn,
arch/s390/kvm/gmap.c
531
f->pfn = PHYS_PFN(large_crste_to_phys(oldcrste, f->gfn));
arch/s390/kvm/gmap.c
560
f->pfn = oldpte.h.pfra;
arch/s390/kvm/gmap.c
653
newpte = _pte(f->pfn, f->writable, f->write_attempt | oldpte.s.d, !f->page);
arch/s390/kvm/gmap.c
656
if (oldpte.val == _PTE_EMPTY.val || oldpte.h.pfra == f->pfn) {
arch/s390/kvm/gmap.c
667
newval = _crste_fc1(f->pfn, oldval.h.tt, f->writable,
arch/s390/kvm/gmap.h
104
kvm_pfn_t pfn, int level, bool wr);
arch/s390/kvm/pv.c
107
folio = pfn_folio(f->pfn);
arch/s390/pci/pci_mmio.c
191
io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
arch/s390/pci/pci_mmio.c
343
io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
arch/sh/include/asm/cacheflush.h
41
unsigned long addr, unsigned long pfn);
arch/sh/include/asm/io.h
289
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
arch/sh/include/asm/mmzone.h
13
if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid))
arch/sh/include/asm/mmzone.h
19
static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
arch/sh/include/asm/mmzone.h
21
return NODE_DATA(pfn_to_nid(pfn));
arch/sh/include/asm/mmzone.h
8
static inline int pfn_to_nid(unsigned long pfn)
arch/sh/include/asm/page.h
147
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/sh/include/asm/pgtable_32.h
317
#define pfn_pte(pfn, prot) \
arch/sh/include/asm/pgtable_32.h
318
__pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/sh/include/asm/pgtable_32.h
319
#define pfn_pmd(pfn, prot) \
arch/sh/include/asm/pgtable_32.h
320
__pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/sh/kernel/crash_dump.c
14
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/sh/kernel/crash_dump.c
22
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/sh/kernel/swsusp.c
19
int pfn_is_nosave(unsigned long pfn)
arch/sh/kernel/swsusp.c
24
return (pfn >= begin_pfn) && (pfn < end_pfn);
arch/sh/mm/cache-sh4.c
121
unsigned long pfn = folio_pfn(folio);
arch/sh/mm/cache-sh4.c
128
pfn * PAGE_SIZE);
arch/sh/mm/cache-sh4.c
130
pfn++;
arch/sh/mm/cache-sh4.c
219
unsigned long address, pfn, phys;
arch/sh/mm/cache-sh4.c
227
pfn = data->addr2;
arch/sh/mm/cache-sh4.c
228
phys = pfn << PAGE_SHIFT;
arch/sh/mm/cache-sh4.c
229
page = pfn_to_page(pfn);
arch/sh/mm/cache-sh7705.c
143
unsigned long pfn = folio_pfn(folio);
arch/sh/mm/cache-sh7705.c
147
__flush_dcache_page((pfn + i) * PAGE_SIZE);
arch/sh/mm/cache-sh7705.c
171
unsigned long pfn = data->addr2;
arch/sh/mm/cache-sh7705.c
173
__flush_dcache_page(pfn << PAGE_SHIFT);
arch/sh/mm/cache.c
144
unsigned long pfn = pte_pfn(pte);
arch/sh/mm/cache.c
149
if (pfn_valid(pfn)) {
arch/sh/mm/cache.c
150
struct folio *folio = page_folio(pfn_to_page(pfn));
arch/sh/mm/cache.c
201
unsigned long pfn)
arch/sh/mm/cache.c
207
data.addr2 = pfn;
arch/sh/mm/mmap.c
181
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
arch/sparc/include/asm/cacheflush_32.h
16
#define flush_cache_page(vma,addr,pfn) \
arch/sparc/include/asm/cacheflush_64.h
26
#define flush_cache_page(vma, page, pfn) \
arch/sparc/include/asm/leon.h
254
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
arch/sparc/include/asm/page_64.h
148
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
arch/sparc/include/asm/pgtable_32.h
251
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
arch/sparc/include/asm/pgtable_32.h
253
return __pte((pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot));
arch/sparc/include/asm/pgtable_32.h
386
#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
arch/sparc/include/asm/pgtable_32.h
387
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
arch/sparc/include/asm/pgtable_32.h
388
#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
arch/sparc/include/asm/pgtable_32.h
390
static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
arch/sparc/include/asm/pgtable_32.h
395
offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
arch/sparc/include/asm/pgtable_32.h
396
space = GET_IOSPACE(pfn);
arch/sparc/include/asm/pgtable_64.h
1044
#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
arch/sparc/include/asm/pgtable_64.h
1045
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
arch/sparc/include/asm/pgtable_64.h
1046
#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
arch/sparc/include/asm/pgtable_64.h
1081
static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
arch/sparc/include/asm/pgtable_64.h
1084
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
arch/sparc/include/asm/pgtable_64.h
1085
int space = GET_IOSPACE(pfn);
arch/sparc/include/asm/pgtable_64.h
218
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
arch/sparc/include/asm/pgtable_64.h
220
unsigned long paddr = pfn << PAGE_SHIFT;
arch/sparc/include/asm/pgtable_64.h
836
unsigned long pfn;
arch/sparc/include/asm/pgtable_64.h
838
pfn = pte_pfn(pte);
arch/sparc/include/asm/pgtable_64.h
840
return ((unsigned long) __va(pfn << PAGE_SHIFT));
arch/sparc/include/asm/pgtable_64.h
846
unsigned long pfn;
arch/sparc/include/asm/pgtable_64.h
848
pfn = pte_pfn(pte);
arch/sparc/include/asm/pgtable_64.h
850
return ((pmd_t *) __va(pfn << PAGE_SHIFT));
arch/sparc/kernel/smp_64.c
933
unsigned long pfn = folio_pfn(folio)
arch/sparc/kernel/smp_64.c
935
__flush_icache_page((pfn + i) * PAGE_SIZE);
arch/sparc/mm/init_64.c
213
__flush_icache_page((pfn + i) * PAGE_SIZE);
arch/sparc/mm/init_64.c
283
static void flush_dcache(unsigned long pfn)
arch/sparc/mm/init_64.c
287
page = pfn_to_page(pfn);
arch/sparc/mm/init_64.c
398
unsigned long pfn = pte_pfn(pte);
arch/sparc/mm/init_64.c
400
if (pfn_valid(pfn))
arch/sparc/mm/init_64.c
401
flush_dcache(pfn);
arch/sparc/mm/init_64.c
456
unsigned long pfn = folio_pfn(folio);
arch/sparc/mm/init_64.c
467
if (is_zero_pfn(pfn))
arch/sparc/mm/iommu.c
190
unsigned long pfn = __phys_to_pfn(paddr);
arch/sparc/mm/iommu.c
216
ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
arch/sparc/mm/iommu.c
225
iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
arch/sparc/mm/iommu.c
229
pfn++;
arch/sparc/mm/iommu.c
53
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
arch/sparc/mm/tlb.c
127
unsigned long paddr, pfn = pte_pfn(orig);
arch/sparc/mm/tlb.c
132
if (!pfn_valid(pfn))
arch/sparc/mm/tlb.c
135
page = pfn_to_page(pfn);
arch/sparc/power/hibernate.c
23
int pfn_is_nosave(unsigned long pfn)
arch/sparc/power/hibernate.c
28
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
arch/um/include/asm/page.h
89
#define pfn_to_phys(pfn) PFN_PHYS(pfn)
arch/um/include/asm/pgtable-2level.h
40
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
arch/um/include/asm/pgtable.h
261
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
arch/um/include/asm/pgtable.h
265
pte_set_val(pte, pfn_to_phys(pfn), pgprot);
arch/x86/boot/startup/sev-shared.c
621
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
arch/x86/coco/sev/core.c
195
u64 pfn;
arch/x86/coco/sev/core.c
201
pfn = e->gfn;
arch/x86/coco/sev/core.c
202
vaddr = (unsigned long)pfn_to_kaddr(pfn);
arch/x86/coco/sev/core.c
213
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
arch/x86/coco/sev/core.c
216
__pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
arch/x86/coco/sev/core.c
219
__pval_terminate(pfn, validate, size, rc, 0);
arch/x86/coco/sev/core.c
327
unsigned long pfn;
arch/x86/coco/sev/core.c
341
pfn = vmalloc_to_pfn((void *)vaddr);
arch/x86/coco/sev/core.c
344
pfn = __pa(vaddr) >> PAGE_SHIFT;
arch/x86/coco/sev/core.c
348
e->gfn = pfn;
arch/x86/coco/sev/core.c
545
set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
arch/x86/coco/sev/core.c
948
u64 pfn;
arch/x86/coco/sev/core.c
960
pfn = address >> PAGE_SHIFT;
arch/x86/coco/sev/core.c
962
if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
arch/x86/coco/sev/core.c
970
pfn = address >> PAGE_SHIFT;
arch/x86/coco/sev/core.c
971
if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags_enc))
arch/x86/coco/sev/internal.h
110
static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
arch/x86/coco/sev/internal.h
114
pfn, action, page_size, ret, svsm_ret);
arch/x86/coco/sev/svsm.c
108
while (pfn < pfn_end) {
arch/x86/coco/sev/svsm.c
113
pe->pfn = pfn;
arch/x86/coco/sev/svsm.c
116
pfn++;
arch/x86/coco/sev/svsm.c
123
return pfn;
arch/x86/coco/sev/svsm.c
144
pe->pfn = e->gfn;
arch/x86/coco/sev/svsm.c
162
u64 pfn;
arch/x86/coco/sev/svsm.c
164
pfn = pc->entry[pc->cur_index].pfn;
arch/x86/coco/sev/svsm.c
168
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
arch/x86/coco/sev/svsm.c
238
u64 pfn, pfn_end;
arch/x86/coco/sev/svsm.c
241
pfn = pv_4k[i].pfn;
arch/x86/coco/sev/svsm.c
242
pfn_end = pfn + 512;
arch/x86/coco/sev/svsm.c
244
while (pfn < pfn_end) {
arch/x86/coco/sev/svsm.c
245
pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc);
arch/x86/coco/sev/svsm.c
97
static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
arch/x86/coco/sev/vc-shared.c
613
unsigned long pfn = paddr >> PAGE_SHIFT;
arch/x86/coco/sev/vc-shared.c
616
sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn));
arch/x86/coco/sev/vc-shared.c
623
(GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn))
arch/x86/entry/vdso/vma.c
104
unsigned long pfn = hv_get_tsc_pfn();
arch/x86/entry/vdso/vma.c
105
if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
arch/x86/entry/vdso/vma.c
106
return vmf_insert_pfn(vma, vmf->address, pfn);
arch/x86/hyperv/hv_init.c
138
*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
arch/x86/hyperv/hv_init.c
165
msr.pfn = vmalloc_to_pfn(*hvp);
arch/x86/hyperv/ivm.c
487
u64 pfn;
arch/x86/hyperv/ivm.c
498
u64 pfn;
arch/x86/hyperv/ivm.c
502
pfn = pfn_list[i];
arch/x86/hyperv/ivm.c
507
if ((ent->pfn <= pfn) && (ent->pfn + ent->count - 1 >= pfn))
arch/x86/hyperv/ivm.c
520
if (ent->pfn + ent->count == pfn) {
arch/x86/hyperv/ivm.c
524
} else if (pfn + 1 == ent->pfn) {
arch/x86/hyperv/ivm.c
526
ent->pfn--;
arch/x86/hyperv/ivm.c
538
ent->pfn = pfn;
arch/x86/hyperv/ivm.c
556
u64 pfn;
arch/x86/hyperv/ivm.c
560
pfn = pfn_list[i];
arch/x86/hyperv/ivm.c
564
if (pfn == ent->pfn + ent->count - 1) {
arch/x86/hyperv/ivm.c
572
} else if (pfn == ent->pfn) {
arch/x86/hyperv/ivm.c
575
ent->pfn++;
arch/x86/hyperv/ivm.c
581
} else if (pfn > ent->pfn && pfn < ent->pfn + ent->count - 1) {
arch/x86/hyperv/ivm.c
587
new_region.pfn = pfn + 1;
arch/x86/hyperv/ivm.c
588
new_region.count = ent->count - (pfn - ent->pfn + 1);
arch/x86/hyperv/ivm.c
589
ent->count = pfn - ent->pfn;
arch/x86/hyperv/ivm.c
605
ent->pfn = new_region.pfn;
arch/x86/hyperv/ivm.c
648
input->gpa_page_list[cur] = ent->pfn + i;
arch/x86/hyperv/ivm.c
677
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
arch/x86/hyperv/ivm.c
696
ret = hv_list_enc_remove(pfn, count);
arch/x86/hyperv/ivm.c
698
ret = hv_list_enc_add(pfn, count);
arch/x86/hyperv/ivm.c
714
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
arch/x86/hyperv/ivm.c
724
ret = hv_list_enc_add(pfn, count);
arch/x86/hyperv/ivm.c
726
ret = hv_list_enc_remove(pfn, count);
arch/x86/hyperv/ivm.c
775
int i, pfn, err;
arch/x86/hyperv/ivm.c
785
for (i = 0, pfn = 0; i < pagecount; i++) {
arch/x86/hyperv/ivm.c
794
pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
arch/x86/hyperv/ivm.c
795
pfn++;
arch/x86/hyperv/ivm.c
797
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
arch/x86/hyperv/ivm.c
798
ret = hv_mark_gpa_visibility(pfn, pfn_array,
arch/x86/hyperv/ivm.c
802
pfn = 0;
arch/x86/include/asm/io.h
114
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
arch/x86/include/asm/iomap.h
16
void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
arch/x86/include/asm/kvm_host.h
1962
int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
arch/x86/include/asm/kvm_host.h
1964
int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
arch/x86/include/asm/memtype.h
24
extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
arch/x86/include/asm/page.h
66
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
arch/x86/include/asm/page.h
68
return __va(pfn << PAGE_SHIFT);
arch/x86/include/asm/paravirt.h
286
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
arch/x86/include/asm/paravirt.h
288
PVOP_VCALL2(pv_ops, mmu.alloc_pte, mm, pfn);
arch/x86/include/asm/paravirt.h
290
static inline void paravirt_release_pte(unsigned long pfn)
arch/x86/include/asm/paravirt.h
292
PVOP_VCALL1(pv_ops, mmu.release_pte, pfn);
arch/x86/include/asm/paravirt.h
295
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
arch/x86/include/asm/paravirt.h
297
PVOP_VCALL2(pv_ops, mmu.alloc_pmd, mm, pfn);
arch/x86/include/asm/paravirt.h
300
static inline void paravirt_release_pmd(unsigned long pfn)
arch/x86/include/asm/paravirt.h
302
PVOP_VCALL1(pv_ops, mmu.release_pmd, pfn);
arch/x86/include/asm/paravirt.h
305
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
arch/x86/include/asm/paravirt.h
307
PVOP_VCALL2(pv_ops, mmu.alloc_pud, mm, pfn);
arch/x86/include/asm/paravirt.h
309
static inline void paravirt_release_pud(unsigned long pfn)
arch/x86/include/asm/paravirt.h
311
PVOP_VCALL1(pv_ops, mmu.release_pud, pfn);
arch/x86/include/asm/paravirt.h
314
static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
arch/x86/include/asm/paravirt.h
316
PVOP_VCALL2(pv_ops, mmu.alloc_p4d, mm, pfn);
arch/x86/include/asm/paravirt.h
319
static inline void paravirt_release_p4d(unsigned long pfn)
arch/x86/include/asm/paravirt.h
321
PVOP_VCALL1(pv_ops, mmu.release_p4d, pfn);
arch/x86/include/asm/paravirt.h
54
static inline void notify_page_enc_status_changed(unsigned long pfn,
arch/x86/include/asm/paravirt.h
57
PVOP_VCALL3(pv_ops, mmu.notify_page_enc_status_changed, pfn, npages, enc);
arch/x86/include/asm/paravirt_types.h
115
void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
arch/x86/include/asm/paravirt_types.h
135
void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
136
void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
137
void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
138
void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
139
void (*release_pte)(unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
140
void (*release_pmd)(unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
141
void (*release_pud)(unsigned long pfn);
arch/x86/include/asm/paravirt_types.h
142
void (*release_p4d)(unsigned long pfn);
arch/x86/include/asm/pgalloc.h
22
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
23
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
24
static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
arch/x86/include/asm/pgalloc.h
26
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
27
static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
28
static inline void paravirt_release_pte(unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
29
static inline void paravirt_release_pmd(unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
30
static inline void paravirt_release_pud(unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
31
static inline void paravirt_release_p4d(unsigned long pfn) {}
arch/x86/include/asm/pgalloc.h
79
unsigned long pfn = page_to_pfn(pte);
arch/x86/include/asm/pgalloc.h
81
paravirt_alloc_pte(mm, pfn);
arch/x86/include/asm/pgalloc.h
82
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
arch/x86/include/asm/pgtable.h
1650
extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
arch/x86/include/asm/pgtable.h
1692
int arch_memory_failure(unsigned long pfn, int flags);
arch/x86/include/asm/pgtable.h
258
phys_addr_t pfn = pte_val(pte);
arch/x86/include/asm/pgtable.h
259
pfn ^= protnone_mask(pfn);
arch/x86/include/asm/pgtable.h
260
return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
265
phys_addr_t pfn = pmd_val(pmd);
arch/x86/include/asm/pgtable.h
266
pfn ^= protnone_mask(pfn);
arch/x86/include/asm/pgtable.h
267
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
273
phys_addr_t pfn = pud_val(pud);
arch/x86/include/asm/pgtable.h
274
pfn ^= protnone_mask(pfn);
arch/x86/include/asm/pgtable.h
275
return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
732
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
736
pfn ^= protnone_mask(pgprot_val(pgprot));
arch/x86/include/asm/pgtable.h
737
pfn &= PTE_PFN_MASK;
arch/x86/include/asm/pgtable.h
738
return __pte(pfn | check_pgprot(pgprot));
arch/x86/include/asm/pgtable.h
743
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
744
pfn ^= protnone_mask(pgprot_val(pgprot));
arch/x86/include/asm/pgtable.h
745
pfn &= PHYSICAL_PMD_PAGE_MASK;
arch/x86/include/asm/pgtable.h
746
return __pmd(pfn | check_pgprot(pgprot));
arch/x86/include/asm/pgtable.h
751
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
752
pfn ^= protnone_mask(pgprot_val(pgprot));
arch/x86/include/asm/pgtable.h
753
pfn &= PHYSICAL_PUD_PAGE_MASK;
arch/x86/include/asm/pgtable.h
754
return __pud(pfn | check_pgprot(pgprot));
arch/x86/include/asm/pgtable_types.h
532
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/x86/include/asm/pgtable_types.h
573
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
arch/x86/include/asm/sev.h
354
pfn : 52;
arch/x86/include/asm/sev.h
397
unsigned long pfn;
arch/x86/include/asm/sev.h
521
void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot);
arch/x86/include/asm/sev.h
630
static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { }
arch/x86/include/asm/sev.h
652
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
arch/x86/include/asm/sev.h
654
int psmash(u64 pfn);
arch/x86/include/asm/sev.h
655
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
arch/x86/include/asm/sev.h
656
int rmp_make_shared(u64 pfn, enum pg_level level);
arch/x86/include/asm/sev.h
657
void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp);
arch/x86/include/asm/sev.h
660
static inline void snp_leak_pages(u64 pfn, unsigned int pages)
arch/x86/include/asm/sev.h
662
__snp_leak_pages(pfn, pages, true);
arch/x86/include/asm/sev.h
669
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
arch/x86/include/asm/sev.h
671
static inline int psmash(u64 pfn) { return -ENODEV; }
arch/x86/include/asm/sev.h
672
static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid,
arch/x86/include/asm/sev.h
677
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
arch/x86/include/asm/sev.h
678
static inline void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp) {}
arch/x86/include/asm/sev.h
679
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
arch/x86/include/asm/xen/interface_32.h
100
#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
arch/x86/include/asm/xen/interface_64.h
133
#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
arch/x86/include/asm/xen/page.h
133
static inline unsigned long __pfn_to_mfn(unsigned long pfn)
arch/x86/include/asm/xen/page.h
137
if (pfn < xen_p2m_size)
arch/x86/include/asm/xen/page.h
138
mfn = xen_p2m_addr[pfn];
arch/x86/include/asm/xen/page.h
139
else if (unlikely(pfn < xen_max_p2m_pfn))
arch/x86/include/asm/xen/page.h
140
return get_phys_to_machine(pfn);
arch/x86/include/asm/xen/page.h
142
return IDENTITY_FRAME(pfn);
arch/x86/include/asm/xen/page.h
145
return get_phys_to_machine(pfn);
arch/x86/include/asm/xen/page.h
150
static inline unsigned long __pfn_to_mfn(unsigned long pfn)
arch/x86/include/asm/xen/page.h
152
return pfn;
arch/x86/include/asm/xen/page.h
156
static inline unsigned long pfn_to_mfn(unsigned long pfn)
arch/x86/include/asm/xen/page.h
166
return pfn;
arch/x86/include/asm/xen/page.h
168
mfn = __pfn_to_mfn(pfn);
arch/x86/include/asm/xen/page.h
176
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
arch/x86/include/asm/xen/page.h
181
return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
arch/x86/include/asm/xen/page.h
186
unsigned long pfn;
arch/x86/include/asm/xen/page.h
197
ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
arch/x86/include/asm/xen/page.h
201
return pfn;
arch/x86/include/asm/xen/page.h
206
unsigned long pfn;
arch/x86/include/asm/xen/page.h
216
pfn = mfn_to_pfn_no_overrides(mfn);
arch/x86/include/asm/xen/page.h
217
if (__pfn_to_mfn(pfn) != mfn)
arch/x86/include/asm/xen/page.h
218
pfn = ~0;
arch/x86/include/asm/xen/page.h
224
if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
arch/x86/include/asm/xen/page.h
225
pfn = mfn;
arch/x86/include/asm/xen/page.h
227
return pfn;
arch/x86/include/asm/xen/page.h
243
static inline unsigned long pfn_to_gfn(unsigned long pfn)
arch/x86/include/asm/xen/page.h
246
return pfn;
arch/x86/include/asm/xen/page.h
248
return pfn_to_mfn(pfn);
arch/x86/include/asm/xen/page.h
260
#define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
arch/x86/include/asm/xen/page.h
285
unsigned long pfn;
arch/x86/include/asm/xen/page.h
290
pfn = mfn_to_pfn(mfn);
arch/x86/include/asm/xen/page.h
291
if (__pfn_to_mfn(pfn) != mfn)
arch/x86/include/asm/xen/page.h
293
return pfn;
arch/x86/include/asm/xen/page.h
56
extern int xen_alloc_p2m_entry(unsigned long pfn);
arch/x86/include/asm/xen/page.h
58
extern unsigned long get_phys_to_machine(unsigned long pfn);
arch/x86/include/asm/xen/page.h
59
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
arch/x86/include/asm/xen/page.h
60
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
arch/x86/kernel/aperture_64.c
70
static int gart_mem_pfn_is_ram(unsigned long pfn)
arch/x86/kernel/aperture_64.c
72
return likely((pfn < aperture_pfn_start) ||
arch/x86/kernel/aperture_64.c
73
(pfn >= aperture_pfn_start + aperture_page_count));
arch/x86/kernel/aperture_64.c
77
static bool gart_oldmem_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn)
arch/x86/kernel/aperture_64.c
79
return !!gart_mem_pfn_is_ram(pfn);
arch/x86/kernel/cpu/mce/core.c
1446
unsigned long pfn;
arch/x86/kernel/cpu/mce/core.c
1455
pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
arch/x86/kernel/cpu/mce/core.c
1456
ret = memory_failure(pfn, flags);
arch/x86/kernel/cpu/mce/core.c
1458
set_mce_nospec(pfn);
arch/x86/kernel/cpu/mce/core.c
1480
unsigned long pfn;
arch/x86/kernel/cpu/mce/core.c
1484
pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
arch/x86/kernel/cpu/mce/core.c
1485
if (!memory_failure(pfn, 0))
arch/x86/kernel/cpu/mce/core.c
1486
set_mce_nospec(pfn);
arch/x86/kernel/cpu/mce/core.c
1744
int memory_failure(unsigned long pfn, int flags)
arch/x86/kernel/cpu/mce/core.c
1750
pfn);
arch/x86/kernel/cpu/mce/core.c
637
unsigned long pfn;
arch/x86/kernel/cpu/mce/core.c
646
pfn = (mce->addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
arch/x86/kernel/cpu/mce/core.c
647
if (!memory_failure(pfn, 0)) {
arch/x86/kernel/cpu/mce/core.c
648
set_mce_nospec(pfn);
arch/x86/kernel/cpu/sgx/main.c
681
int arch_memory_failure(unsigned long pfn, int flags)
arch/x86/kernel/cpu/sgx/main.c
683
struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT);
arch/x86/kernel/cpu/sgx/virt.c
38
unsigned long index, pfn;
arch/x86/kernel/cpu/sgx/virt.c
58
pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));
arch/x86/kernel/cpu/sgx/virt.c
60
ret = vmf_insert_pfn(vma, addr, pfn);
arch/x86/kernel/crash_dump_32.c
15
static inline bool is_crashed_pfn_valid(unsigned long pfn)
arch/x86/kernel/crash_dump_32.c
25
return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
arch/x86/kernel/crash_dump_32.c
31
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/x86/kernel/crash_dump_32.c
39
if (!is_crashed_pfn_valid(pfn))
arch/x86/kernel/crash_dump_32.c
42
vaddr = kmap_local_pfn(pfn);
arch/x86/kernel/crash_dump_64.c
15
static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/x86/kernel/crash_dump_64.c
25
vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/crash_dump_64.c
27
vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
arch/x86/kernel/crash_dump_64.c
38
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/x86/kernel/crash_dump_64.c
41
return __copy_oldmem_page(iter, pfn, csize, offset, false);
arch/x86/kernel/crash_dump_64.c
49
ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
arch/x86/kernel/crash_dump_64.c
52
return __copy_oldmem_page(iter, pfn, csize, offset, true);
arch/x86/kernel/kvm.c
942
static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
arch/x86/kernel/kvm.c
944
kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
arch/x86/kernel/ldt.c
314
unsigned long pfn;
arch/x86/kernel/ldt.c
319
pfn = is_vmalloc ? vmalloc_to_pfn(src) :
arch/x86/kernel/ldt.c
337
pte = pfn_pte(pfn, pte_prot);
arch/x86/kernel/tboot.c
113
static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
arch/x86/kernel/tboot.c
135
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
arch/x86/kvm/mmu.h
258
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn);
arch/x86/kvm/mmu/mmu.c
3032
kvm_pfn_t pfn, struct kvm_page_fault *fault)
arch/x86/kvm/mmu/mmu.c
3049
pfn == spte_to_pfn(*sptep))
arch/x86/kvm/mmu/mmu.c
3063
} else if (pfn != spte_to_pfn(*sptep)) {
arch/x86/kvm/mmu/mmu.c
3071
if (unlikely(is_noslot_pfn(pfn))) {
arch/x86/kvm/mmu/mmu.c
3079
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
arch/x86/kvm/mmu/mmu.c
3317
kvm_pfn_t pfn;
arch/x86/kvm/mmu/mmu.c
3321
pfn = fault->pfn;
arch/x86/kvm/mmu/mmu.c
3326
pfn = KVM_PFN_ERR_FAULT;
arch/x86/kvm/mmu/mmu.c
3338
coco_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn, is_private);
arch/x86/kvm/mmu/mmu.c
3390
if (is_error_noslot_pfn(fault->pfn))
arch/x86/kvm/mmu/mmu.c
3411
VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
arch/x86/kvm/mmu/mmu.c
3412
fault->pfn &= ~mask;
arch/x86/kvm/mmu/mmu.c
3431
fault->pfn |= fault->gfn & page_mask;
arch/x86/kvm/mmu/mmu.c
3472
base_gfn, fault->pfn, fault);
arch/x86/kvm/mmu/mmu.c
3489
if (is_sigpending_pfn(fault->pfn)) {
arch/x86/kvm/mmu/mmu.c
3499
if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
arch/x86/kvm/mmu/mmu.c
3502
if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
arch/x86/kvm/mmu/mmu.c
3525
fault->pfn = KVM_PFN_NOSLOT;
arch/x86/kvm/mmu/mmu.c
4584
r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
arch/x86/kvm/mmu/mmu.c
4606
fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
arch/x86/kvm/mmu/mmu.c
4615
if (fault->pfn != KVM_PFN_ERR_NEEDS_IO)
arch/x86/kvm/mmu/mmu.c
4636
fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
arch/x86/kvm/mmu/mmu.c
4740
if (unlikely(is_error_pfn(fault->pfn)))
arch/x86/kvm/mmu/mmu.c
4743
if (WARN_ON_ONCE(!fault->slot || is_noslot_pfn(fault->pfn)))
arch/x86/kvm/mmu/mmu.c
5036
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
arch/x86/kvm/mmu/mmu.c
5052
.pfn = pfn,
arch/x86/kvm/mmu/mmu_internal.h
281
kvm_pfn_t pfn;
arch/x86/kvm/mmu/mmu_internal.h
362
.pfn = KVM_PFN_ERR_FAULT,
arch/x86/kvm/mmu/mmutrace.h
380
__field(u64, pfn)
arch/x86/kvm/mmu/mmutrace.h
386
__entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
arch/x86/kvm/mmu/mmutrace.h
391
__entry->gfn, __entry->pfn, __entry->level
arch/x86/kvm/mmu/paging_tmpl.h
743
base_gfn, fault->pfn, fault);
arch/x86/kvm/mmu/spte.c
107
static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
arch/x86/kvm/mmu/spte.c
109
if (pfn_valid(pfn))
arch/x86/kvm/mmu/spte.c
110
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
arch/x86/kvm/mmu/spte.c
121
(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
arch/x86/kvm/mmu/spte.c
123
return !e820__mapped_raw_any(pfn_to_hpa(pfn),
arch/x86/kvm/mmu/spte.c
124
pfn_to_hpa(pfn + 1) - 1,
arch/x86/kvm/mmu/spte.c
128
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
arch/x86/kvm/mmu/spte.c
136
*is_host_mmio = __kvm_is_mmio_pfn(pfn);
arch/x86/kvm/mmu/spte.c
188
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
arch/x86/kvm/mmu/spte.c
244
kvm_is_mmio_pfn(pfn, &is_host_mmio));
arch/x86/kvm/mmu/spte.c
250
if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio))
arch/x86/kvm/mmu/spte.c
253
spte |= (u64)pfn << PAGE_SHIFT;
arch/x86/kvm/mmu/spte.c
297
kvm_is_mmio_pfn(pfn, &is_host_mmio))
arch/x86/kvm/mmu/spte.h
545
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
arch/x86/kvm/mmu/tdp_mmu.c
1183
WARN_ON_ONCE(fault->pfn != spte_to_pfn(iter->old_spte));
arch/x86/kvm/mmu/tdp_mmu.c
1191
fault->pfn, iter->old_spte, fault->prefetch,
arch/x86/kvm/svm/sev.c
2280
static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
arch/x86/kvm/svm/sev.c
2293
ret = snp_lookup_rmpentry((u64)pfn, &assigned, &level);
arch/x86/kvm/svm/sev.c
2303
void *dst_vaddr = kmap_local_pfn(pfn);
arch/x86/kvm/svm/sev.c
2311
ret = rmp_make_private(pfn, gfn << PAGE_SHIFT, PG_LEVEL_4K,
arch/x86/kvm/svm/sev.c
2317
fw_args.address = __sme_set(pfn_to_hpa(pfn));
arch/x86/kvm/svm/sev.c
2335
if (ret && !snp_page_reclaim(kvm, pfn) &&
arch/x86/kvm/svm/sev.c
2339
void *dst_vaddr = kmap_local_pfn(pfn);
arch/x86/kvm/svm/sev.c
2455
u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
arch/x86/kvm/svm/sev.c
2462
ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true);
arch/x86/kvm/svm/sev.c
2471
snp_page_reclaim(kvm, pfn);
arch/x86/kvm/svm/sev.c
310
static int kvm_rmp_make_shared(struct kvm *kvm, u64 pfn, enum pg_level level)
arch/x86/kvm/svm/sev.c
312
if (KVM_BUG_ON(rmp_make_shared(pfn, level), kvm)) {
arch/x86/kvm/svm/sev.c
313
snp_leak_pages(pfn, page_level_size(level) >> PAGE_SHIFT);
arch/x86/kvm/svm/sev.c
3269
u64 pfn = __pa(svm->sev_es.vmsa) >> PAGE_SHIFT;
arch/x86/kvm/svm/sev.c
3271
if (kvm_rmp_make_shared(vcpu->kvm, pfn, PG_LEVEL_4K))
arch/x86/kvm/svm/sev.c
334
static int snp_page_reclaim(struct kvm *kvm, u64 pfn)
arch/x86/kvm/svm/sev.c
339
data.paddr = __sme_set(pfn << PAGE_SHIFT);
arch/x86/kvm/svm/sev.c
341
if (KVM_BUG(rc, kvm, "Failed to reclaim PFN %llx, rc %d fw_err %d", pfn, rc, fw_err)) {
arch/x86/kvm/svm/sev.c
342
snp_leak_pages(pfn, 1);
arch/x86/kvm/svm/sev.c
346
if (kvm_rmp_make_shared(kvm, pfn, PG_LEVEL_4K))
arch/x86/kvm/svm/sev.c
3702
static int snp_rmptable_psmash(kvm_pfn_t pfn)
arch/x86/kvm/svm/sev.c
3706
pfn = pfn & ~(KVM_PAGES_PER_HPAGE(PG_LEVEL_2M) - 1);
arch/x86/kvm/svm/sev.c
3713
ret = psmash(pfn);
arch/x86/kvm/svm/sev.c
3967
kvm_pfn_t pfn;
arch/x86/kvm/svm/sev.c
4004
if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
arch/x86/kvm/svm/sev.c
4019
svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
arch/x86/kvm/svm/sev.c
4813
unsigned long pfn;
arch/x86/kvm/svm/sev.c
4834
pfn = page_to_pfn(p);
arch/x86/kvm/svm/sev.c
4835
if (IS_ALIGNED(pfn, PTRS_PER_PMD))
arch/x86/kvm/svm/sev.c
4850
kvm_pfn_t pfn;
arch/x86/kvm/svm/sev.c
4875
ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &page, &order);
arch/x86/kvm/svm/sev.c
4882
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
arch/x86/kvm/svm/sev.c
4885
gpa, pfn, ret);
arch/x86/kvm/svm/sev.c
4914
ret = snp_rmptable_psmash(pfn);
arch/x86/kvm/svm/sev.c
4921
if (!snp_lookup_rmpentry(pfn, &assigned, &rmp_level) &&
arch/x86/kvm/svm/sev.c
4926
gpa, pfn, ret);
arch/x86/kvm/svm/sev.c
4931
trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret);
arch/x86/kvm/svm/sev.c
4938
kvm_pfn_t pfn = start;
arch/x86/kvm/svm/sev.c
4940
while (pfn < end) {
arch/x86/kvm/svm/sev.c
4944
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
arch/x86/kvm/svm/sev.c
4947
pfn, start, end, rmp_level, ret);
arch/x86/kvm/svm/sev.c
4953
__func__, pfn, start, end, rmp_level);
arch/x86/kvm/svm/sev.c
4957
pfn++;
arch/x86/kvm/svm/sev.c
4971
static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
arch/x86/kvm/svm/sev.c
4973
kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
arch/x86/kvm/svm/sev.c
4987
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
arch/x86/kvm/svm/sev.c
4998
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
arch/x86/kvm/svm/sev.c
5001
gfn, pfn, rc);
arch/x86/kvm/svm/sev.c
5007
__func__, gfn, pfn, max_order, level);
arch/x86/kvm/svm/sev.c
5011
if (is_large_rmp_possible(kvm, pfn, max_order)) {
arch/x86/kvm/svm/sev.c
5013
pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
arch/x86/kvm/svm/sev.c
5017
pfn_aligned = pfn;
arch/x86/kvm/svm/sev.c
5024
gfn, pfn, level, rc);
arch/x86/kvm/svm/sev.c
5029
__func__, gfn, pfn, pfn_aligned, max_order, level);
arch/x86/kvm/svm/sev.c
5036
kvm_pfn_t pfn;
arch/x86/kvm/svm/sev.c
5043
for (pfn = start; pfn < end;) {
arch/x86/kvm/svm/sev.c
5048
rc = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
arch/x86/kvm/svm/sev.c
5052
use_2m_update = IS_ALIGNED(pfn, PTRS_PER_PMD) &&
arch/x86/kvm/svm/sev.c
5053
end >= (pfn + PTRS_PER_PMD) &&
arch/x86/kvm/svm/sev.c
5067
rc = snp_rmptable_psmash(pfn);
arch/x86/kvm/svm/sev.c
5069
pfn, rc);
arch/x86/kvm/svm/sev.c
5072
rc = rmp_make_shared(pfn, use_2m_update ? PG_LEVEL_2M : PG_LEVEL_4K);
arch/x86/kvm/svm/sev.c
5074
pfn, rc))
arch/x86/kvm/svm/sev.c
5089
clflush_cache_range(__va(pfn_to_hpa(pfn)),
arch/x86/kvm/svm/sev.c
5092
pfn += use_2m_update ? PTRS_PER_PMD : 1;
arch/x86/kvm/svm/sev.c
5097
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
arch/x86/kvm/svm/sev.c
5105
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
arch/x86/kvm/svm/svm.h
897
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
arch/x86/kvm/svm/svm.h
899
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
arch/x86/kvm/svm/svm.h
923
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
arch/x86/kvm/svm/svm.h
928
static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
arch/x86/kvm/trace.h
1941
TP_PROTO(struct kvm_vcpu *vcpu, u64 gpa, u64 pfn, u64 error_code,
arch/x86/kvm/trace.h
1943
TP_ARGS(vcpu, gpa, pfn, error_code, rmp_level, psmash_ret),
arch/x86/kvm/trace.h
1948
__field(u64, pfn)
arch/x86/kvm/trace.h
1957
__entry->pfn = pfn;
arch/x86/kvm/trace.h
1964
__entry->vcpu_id, __entry->gpa, __entry->pfn,
arch/x86/kvm/vmx/main.c
842
static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
arch/x86/kvm/vmx/main.c
846
return tdx_gmem_max_mapping_level(kvm, pfn, is_private);
arch/x86/kvm/vmx/nested.c
3441
vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn));
arch/x86/kvm/vmx/nested.c
3457
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
arch/x86/kvm/vmx/nested.c
3487
pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
arch/x86/kvm/vmx/tdx.c
1625
kvm_pfn_t pfn)
arch/x86/kvm/vmx/tdx.c
1637
err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
arch/x86/kvm/vmx/tdx.c
1649
enum pg_level level, kvm_pfn_t pfn)
arch/x86/kvm/vmx/tdx.c
1653
struct page *page = pfn_to_page(pfn);
arch/x86/kvm/vmx/tdx.c
1672
kvm_pfn_t pfn = spte_to_pfn(mirror_spte);
arch/x86/kvm/vmx/tdx.c
1694
return tdx_mem_page_add(kvm, gfn, level, pfn);
arch/x86/kvm/vmx/tdx.c
1696
return tdx_mem_page_aug(kvm, gfn, level, pfn);
arch/x86/kvm/vmx/tdx.c
3119
static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
arch/x86/kvm/vmx/tdx.c
3135
ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn);
arch/x86/kvm/vmx/tdx.c
3285
int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private)
arch/x86/kvm/vmx/vmx.c
7037
kvm_pfn_t pfn;
arch/x86/kvm/vmx/vmx.c
7071
pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &refcounted_page);
arch/x86/kvm/vmx/vmx.c
7072
if (is_error_noslot_pfn(pfn))
arch/x86/kvm/vmx/vmx.c
7079
vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
arch/x86/kvm/vmx/x86_ops.h
156
int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
arch/x86/kvm/x86.c
14070
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
arch/x86/kvm/x86.c
14072
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
arch/x86/mm/fault.c
289
static bool low_pfn(unsigned long pfn)
arch/x86/mm/fault.c
291
return pfn < max_low_pfn;
arch/x86/mm/init.c
128
unsigned long pfn;
arch/x86/mm/init.c
153
pfn = ret >> PAGE_SHIFT;
arch/x86/mm/init.c
155
pfn = pgt_buf_end;
arch/x86/mm/init.c
162
adr = __va((pfn + i) << PAGE_SHIFT);
arch/x86/mm/init.c
166
return __va(pfn << PAGE_SHIFT);
arch/x86/mm/init.c
406
unsigned long pfn;
arch/x86/mm/init.c
412
pfn = start_pfn = PFN_DOWN(start);
arch/x86/mm/init.c
420
if (pfn == 0)
arch/x86/mm/init.c
423
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
425
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
431
pfn = end_pfn;
arch/x86/mm/init.c
435
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
439
end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
arch/x86/mm/init.c
447
pfn = end_pfn;
arch/x86/mm/init.c
452
start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
arch/x86/mm/init.c
458
pfn = end_pfn;
arch/x86/mm/init.c
462
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
467
pfn = end_pfn;
arch/x86/mm/init.c
472
start_pfn = pfn;
arch/x86/mm/init_32.c
259
unsigned long pfn;
arch/x86/mm/init_32.c
290
pfn = start_pfn;
arch/x86/mm/init_32.c
291
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
arch/x86/mm/init_32.c
296
if (pfn >= end_pfn)
arch/x86/mm/init_32.c
299
pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
arch/x86/mm/init_32.c
304
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
arch/x86/mm/init_32.c
306
unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
arch/x86/mm/init_32.c
323
pfn &= PMD_MASK >> PAGE_SHIFT;
arch/x86/mm/init_32.c
324
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
arch/x86/mm/init_32.c
333
set_pmd(pmd, pfn_pmd(pfn, init_prot));
arch/x86/mm/init_32.c
335
set_pmd(pmd, pfn_pmd(pfn, prot));
arch/x86/mm/init_32.c
337
pfn += PTRS_PER_PTE;
arch/x86/mm/init_32.c
342
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
arch/x86/mm/init_32.c
344
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
arch/x86/mm/init_32.c
345
pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
arch/x86/mm/init_32.c
358
set_pte(pte, pfn_pte(pfn, init_prot));
arch/x86/mm/init_32.c
359
last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
arch/x86/mm/init_32.c
361
set_pte(pte, pfn_pte(pfn, prot));
arch/x86/mm/init_32.c
420
unsigned long pfn, va;
arch/x86/mm/init_32.c
436
for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
arch/x86/mm/init_32.c
437
va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
arch/x86/mm/init_32.c
451
pfn, pmd, __pa(pmd));
arch/x86/mm/init_32.c
460
pfn, pmd, __pa(pmd), pte, __pa(pte));
arch/x86/mm/iomap_32.c
47
void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
arch/x86/mm/iomap_32.c
63
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
arch/x86/mm/ioremap.c
74
unsigned long pfn;
arch/x86/mm/ioremap.c
82
for_each_valid_pfn(pfn, start_pfn, stop_pfn)
arch/x86/mm/ioremap.c
83
if (!PageReserved(pfn_to_page(pfn)))
arch/x86/mm/mem_encrypt_amd.c
219
unsigned long pfn = 0;
arch/x86/mm/mem_encrypt_amd.c
224
pfn = pte_pfn(*kpte);
arch/x86/mm/mem_encrypt_amd.c
228
pfn = pmd_pfn(*(pmd_t *)kpte);
arch/x86/mm/mem_encrypt_amd.c
232
pfn = pud_pfn(*(pud_t *)kpte);
arch/x86/mm/mem_encrypt_amd.c
243
return pfn;
arch/x86/mm/mem_encrypt_amd.c
263
unsigned long pfn;
arch/x86/mm/mem_encrypt_amd.c
272
pfn = pg_level_to_pfn(level, kpte, NULL);
arch/x86/mm/mem_encrypt_amd.c
273
if (!pfn)
arch/x86/mm/mem_encrypt_amd.c
279
notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
arch/x86/mm/mem_encrypt_amd.c
318
d->pfn = pg_level_to_pfn(d->pte_level, d->kpte, &old_prot);
arch/x86/mm/mem_encrypt_amd.c
319
if (!d->pfn)
arch/x86/mm/mem_encrypt_amd.c
332
d->pa = d->pfn << PAGE_SHIFT;
arch/x86/mm/mem_encrypt_amd.c
349
void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot)
arch/x86/mm/mem_encrypt_amd.c
354
new_pte = pfn_pte(pfn, new_prot);
arch/x86/mm/mem_encrypt_amd.c
382
set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
arch/x86/mm/mmap.c
212
int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
arch/x86/mm/mmap.c
214
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
arch/x86/mm/mmap.c
226
bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
arch/x86/mm/mmap.c
233
if (pfn_valid(pfn))
arch/x86/mm/mmap.c
235
if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
arch/x86/mm/pat/cpa-test.c
139
unsigned long pfn = get_random_u32_below(max_pfn_mapped);
arch/x86/mm/pat/cpa-test.c
141
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
arch/x86/mm/pat/cpa-test.c
143
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
arch/x86/mm/pat/cpa-test.c
167
if (test_bit(pfn + k, bm)) {
arch/x86/mm/pat/cpa-test.c
171
__set_bit(pfn + k, bm);
arch/x86/mm/pat/cpa-test.c
173
pages[k] = pfn_to_page(pfn + k);
arch/x86/mm/pat/memtype.c
441
u64 pfn;
arch/x86/mm/pat/memtype.c
455
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
arch/x86/mm/pat/memtype.c
458
page = pfn_to_page(pfn);
arch/x86/mm/pat/memtype.c
473
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
arch/x86/mm/pat/memtype.c
474
page = pfn_to_page(pfn);
arch/x86/mm/pat/memtype.c
483
u64 pfn;
arch/x86/mm/pat/memtype.c
485
for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
arch/x86/mm/pat/memtype.c
486
page = pfn_to_page(pfn);
arch/x86/mm/pat/memtype.c
693
bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
arch/x86/mm/pat/memtype.c
695
enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
arch/x86/mm/pat/memtype.c
769
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
arch/x86/mm/pat/memtype.c
772
if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
arch/x86/mm/pat/memtype.c
784
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
arch/x86/mm/pat/memtype.c
792
if (!range_is_allowed(pfn, size))
arch/x86/mm/pat/memtype.c
908
int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot)
arch/x86/mm/pat/memtype.c
910
resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
arch/x86/mm/pat/memtype.c
930
int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot)
arch/x86/mm/pat/memtype.c
932
const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
arch/x86/mm/pat/memtype.c
937
void pfnmap_untrack(unsigned long pfn, unsigned long size)
arch/x86/mm/pat/memtype.c
939
const resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1084
static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
arch/x86/mm/pat/set_memory.c
1099
prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
arch/x86/mm/pat/set_memory.c
1117
set_pte(pte, pfn_pte(pfn, ref_prot));
arch/x86/mm/pat/set_memory.c
1124
unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
arch/x86/mm/pat/set_memory.c
1183
pfn = ref_pfn;
arch/x86/mm/pat/set_memory.c
1184
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
arch/x86/mm/pat/set_memory.c
1185
split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
arch/x86/mm/pat/set_memory.c
1188
unsigned long pfn = PFN_DOWN(__pa(address));
arch/x86/mm/pat/set_memory.c
1190
if (pfn_range_is_mapped(pfn, pfn + 1))
arch/x86/mm/pat/set_memory.c
1251
unsigned long pfn;
arch/x86/mm/pat/set_memory.c
1261
pfn = pte_pfn(first);
arch/x86/mm/pat/set_memory.c
1264
if (PFN_PHYS(pfn) & ~PMD_MASK)
arch/x86/mm/pat/set_memory.c
1288
_pmd = pfn_pmd(pfn, pgprot);
arch/x86/mm/pat/set_memory.c
1310
if (virt_addr_valid(addr) && pfn_range_is_mapped(pfn, pfn + 1))
arch/x86/mm/pat/set_memory.c
1319
unsigned long pfn;
arch/x86/mm/pat/set_memory.c
1334
pfn = pmd_pfn(first);
arch/x86/mm/pat/set_memory.c
1335
if (!pmd_leaf(first) || (PFN_PHYS(pfn) & ~PUD_MASK))
arch/x86/mm/pat/set_memory.c
1355
set_pud(pud, pfn_pud(pfn, pmd_pgprot(first)));
arch/x86/mm/pat/set_memory.c
1357
if (virt_addr_valid(addr) && pfn_range_is_mapped(pfn, pfn + 1))
arch/x86/mm/pat/set_memory.c
1576
set_pte(pte, pfn_pte(cpa->pfn, pgprot));
arch/x86/mm/pat/set_memory.c
1579
cpa->pfn++;
arch/x86/mm/pat/set_memory.c
1635
set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
arch/x86/mm/pat/set_memory.c
1639
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1708
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
arch/x86/mm/pat/set_memory.c
1712
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1827
cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1830
} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
arch/x86/mm/pat/set_memory.c
1864
unsigned long pfn = pte_pfn(old_pte);
arch/x86/mm/pat/set_memory.c
1871
new_prot = static_protections(new_prot, address, pfn, 1, 0,
arch/x86/mm/pat/set_memory.c
1874
new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
arch/x86/mm/pat/set_memory.c
1884
new_pte = pfn_pte(pfn, new_prot);
arch/x86/mm/pat/set_memory.c
1885
cpa->pfn = pfn;
arch/x86/mm/pat/set_memory.c
1928
unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
arch/x86/mm/pat/set_memory.c
1932
if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
arch/x86/mm/pat/set_memory.c
1968
__cpa_pfn_in_highmap(cpa->pfn)) {
arch/x86/mm/pat/set_memory.c
1969
unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
arch/x86/mm/pat/set_memory.c
2260
int set_mce_nospec(unsigned long pfn)
arch/x86/mm/pat/set_memory.c
2266
if (arch_is_platform_page(pfn << PAGE_SHIFT))
arch/x86/mm/pat/set_memory.c
2280
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
arch/x86/mm/pat/set_memory.c
2284
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
arch/x86/mm/pat/set_memory.c
2290
int clear_mce_nospec(unsigned long pfn)
arch/x86/mm/pat/set_memory.c
2292
unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
arch/x86/mm/pat/set_memory.c
264
static bool __cpa_pfn_in_highmap(unsigned long pfn)
arch/x86/mm/pat/set_memory.c
270
return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
arch/x86/mm/pat/set_memory.c
2705
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
arch/x86/mm/pat/set_memory.c
2712
.pfn = pfn,
arch/x86/mm/pat/set_memory.c
275
static bool __cpa_pfn_in_highmap(unsigned long pfn)
arch/x86/mm/pat/set_memory.c
2755
.pfn = 0,
arch/x86/mm/pat/set_memory.c
48
unsigned long pfn;
arch/x86/mm/pat/set_memory.c
594
unsigned long pfn, const char *txt)
arch/x86/mm/pat/set_memory.c
606
lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
arch/x86/mm/pat/set_memory.c
617
unsigned long pfn, unsigned long npg,
arch/x86/mm/pat/set_memory.c
634
check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
arch/x86/mm/pat/set_memory.c
645
check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
arch/x86/mm/pat/set_memory.c
650
res = protect_pci_bios(pfn, pfn + npg - 1);
arch/x86/mm/pat/set_memory.c
651
check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
arch/x86/mm/pat/set_memory.c
654
res = protect_rodata(pfn, pfn + npg - 1);
arch/x86/mm/pat/set_memory.c
655
check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
arch/x86/mm/pat/set_memory.c
665
unsigned long pfn, unsigned long npg,
arch/x86/mm/pat/set_memory.c
696
start, end, pfn);
arch/x86/mm/pat/set_memory.c
925
unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
arch/x86/mm/pat/set_memory.c
992
pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
arch/x86/mm/pat/set_memory.c
993
cpa->pfn = pfn;
arch/x86/platform/efi/efi_64.c
182
unsigned long pfn, text, pf, rodata, tramp;
arch/x86/platform/efi/efi_64.c
193
pfn = pa_memmap >> PAGE_SHIFT;
arch/x86/platform/efi/efi_64.c
195
if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
arch/x86/platform/efi/efi_64.c
252
pfn = rodata >> PAGE_SHIFT;
arch/x86/platform/efi/efi_64.c
255
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
arch/x86/platform/efi/efi_64.c
261
pfn = tramp >> PAGE_SHIFT;
arch/x86/platform/efi/efi_64.c
264
if (kernel_map_pages_in_pgd(pgd, pfn, tramp, 1, pf)) {
arch/x86/platform/efi/efi_64.c
275
unsigned long pfn;
arch/x86/platform/efi/efi_64.c
301
pfn = md->phys_addr >> PAGE_SHIFT;
arch/x86/platform/efi/efi_64.c
302
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
arch/x86/platform/efi/efi_64.c
373
unsigned long pfn;
arch/x86/platform/efi/efi_64.c
378
pfn = md->phys_addr >> PAGE_SHIFT;
arch/x86/platform/efi/efi_64.c
379
err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
arch/x86/platform/efi/efi_64.c
385
err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
arch/x86/power/hibernate.c
47
int pfn_is_nosave(unsigned long pfn)
arch/x86/power/hibernate.c
55
return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
arch/x86/power/hibernate_32.c
102
if (pfn >= max_low_pfn)
arch/x86/power/hibernate_32.c
110
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
arch/x86/power/hibernate_32.c
111
pfn += PTRS_PER_PTE;
arch/x86/power/hibernate_32.c
120
for (; pte < max_pte; pte++, pfn++) {
arch/x86/power/hibernate_32.c
121
if (pfn >= max_low_pfn)
arch/x86/power/hibernate_32.c
124
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
arch/x86/power/hibernate_32.c
83
unsigned long pfn;
arch/x86/power/hibernate_32.c
91
pfn = 0;
arch/x86/power/hibernate_32.c
98
if (pfn >= max_low_pfn)
arch/x86/virt/svm/sev.c
1010
pfn, level, ret);
arch/x86/virt/svm/sev.c
1011
dump_rmpentry(pfn);
arch/x86/virt/svm/sev.c
1020
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable)
arch/x86/virt/svm/sev.c
1031
return rmpupdate(pfn, &state);
arch/x86/virt/svm/sev.c
1036
int rmp_make_shared(u64 pfn, enum pg_level level)
arch/x86/virt/svm/sev.c
1043
return rmpupdate(pfn, &state);
arch/x86/virt/svm/sev.c
1047
void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp)
arch/x86/virt/svm/sev.c
1049
struct page *page = pfn_to_page(pfn);
arch/x86/virt/svm/sev.c
1051
pr_warn("Leaking PFN range 0x%llx-0x%llx\n", pfn, pfn + npages);
arch/x86/virt/svm/sev.c
1071
dump_rmpentry(pfn);
arch/x86/virt/svm/sev.c
1073
pfn++;
arch/x86/virt/svm/sev.c
687
static struct rmpentry_raw *get_raw_rmpentry(u64 pfn)
arch/x86/virt/svm/sev.c
695
paddr = pfn << PAGE_SHIFT;
arch/x86/virt/svm/sev.c
716
static int get_rmpentry(u64 pfn, struct rmpentry *e)
arch/x86/virt/svm/sev.c
726
: "a" (pfn << PAGE_SHIFT), "c" (e)
arch/x86/virt/svm/sev.c
732
e_raw = get_raw_rmpentry(pfn);
arch/x86/virt/svm/sev.c
752
static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *e, int *level)
arch/x86/virt/svm/sev.c
760
ret = get_rmpentry(pfn, e);
arch/x86/virt/svm/sev.c
769
ret = get_rmpentry(pfn & PFN_PMD_MASK, &e_large);
arch/x86/virt/svm/sev.c
778
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
arch/x86/virt/svm/sev.c
783
ret = __snp_lookup_rmpentry(pfn, &e, level);
arch/x86/virt/svm/sev.c
798
static void dump_rmpentry(u64 pfn)
arch/x86/virt/svm/sev.c
805
ret = __snp_lookup_rmpentry(pfn, &e, &level);
arch/x86/virt/svm/sev.c
808
pfn, ret);
arch/x86/virt/svm/sev.c
813
e_raw = get_raw_rmpentry(pfn);
arch/x86/virt/svm/sev.c
816
pfn, PTR_ERR(e_raw));
arch/x86/virt/svm/sev.c
821
pfn, e_raw->lo, e_raw->hi);
arch/x86/virt/svm/sev.c
833
pfn_i = ALIGN_DOWN(pfn, PTRS_PER_PMD);
arch/x86/virt/svm/sev.c
837
pfn, pfn_i, pfn_end);
arch/x86/virt/svm/sev.c
878
int psmash(u64 pfn)
arch/x86/virt/svm/sev.c
880
unsigned long paddr = pfn << PAGE_SHIFT;
arch/x86/virt/svm/sev.c
886
if (!pfn_valid(pfn))
arch/x86/virt/svm/sev.c
925
static int adjust_direct_map(u64 pfn, int rmp_level)
arch/x86/virt/svm/sev.c
936
vaddr = (unsigned long)pfn_to_kaddr(pfn);
arch/x86/virt/svm/sev.c
942
if (!pfn_valid(pfn))
arch/x86/virt/svm/sev.c
946
(!IS_ALIGNED(pfn, PTRS_PER_PMD) || !pfn_valid(pfn + PTRS_PER_PMD - 1)))
arch/x86/virt/svm/sev.c
969
pfn, ret);
arch/x86/virt/svm/sev.c
987
static int rmpupdate(u64 pfn, struct rmp_state *state)
arch/x86/virt/svm/sev.c
989
unsigned long paddr = pfn << PAGE_SHIFT;
arch/x86/virt/svm/sev.c
997
if (adjust_direct_map(pfn, level))
arch/x86/xen/enlighten_pv.c
459
unsigned long pfn;
arch/x86/xen/enlighten_pv.c
466
pfn = pte_pfn(*ptep);
arch/x86/xen/enlighten_pv.c
467
pte = pfn_pte(pfn, prot);
arch/x86/xen/enlighten_pv.c
496
va = __va(PFN_PHYS(pfn));
arch/x86/xen/enlighten_pv.c
554
unsigned long pfn, mfn;
arch/x86/xen/enlighten_pv.c
573
pfn = pte_pfn(*ptep);
arch/x86/xen/enlighten_pv.c
574
mfn = pfn_to_mfn(pfn);
arch/x86/xen/enlighten_pv.c
575
virt = __va(PFN_PHYS(pfn));
arch/x86/xen/enlighten_pv.c
591
unsigned long pfn, mfn;
arch/x86/xen/enlighten_pv.c
598
pfn = virt_to_pfn((void *)va);
arch/x86/xen/enlighten_pv.c
599
mfn = pfn_to_mfn(pfn);
arch/x86/xen/enlighten_pv.c
601
pte = pfn_pte(pfn, PAGE_KERNEL_RO);
arch/x86/xen/grant-table.c
162
return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
arch/x86/xen/mmu_hvm.c
18
static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn)
arch/x86/xen/mmu_hvm.c
22
.pfn = pfn,
arch/x86/xen/mmu_pv.c
1057
static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1062
op.arg1.mfn = pfn_to_mfn(pfn);
arch/x86/xen/mmu_pv.c
1516
unsigned long pfn;
arch/x86/xen/mmu_pv.c
1524
pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
arch/x86/xen/mmu_pv.c
1526
pfn >= xen_start_info->first_p2m_pfn &&
arch/x86/xen/mmu_pv.c
1527
pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
arch/x86/xen/mmu_pv.c
1537
static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1542
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
arch/x86/xen/mmu_pv.c
1543
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
arch/x86/xen/mmu_pv.c
1547
static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1552
make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
arch/x86/xen/mmu_pv.c
1557
static void __init xen_release_pte_init(unsigned long pfn)
arch/x86/xen/mmu_pv.c
1559
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
arch/x86/xen/mmu_pv.c
1560
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
arch/x86/xen/mmu_pv.c
1563
static void __init xen_release_pmd_init(unsigned long pfn)
arch/x86/xen/mmu_pv.c
1565
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
arch/x86/xen/mmu_pv.c
1568
static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1576
op->arg1.mfn = pfn_to_mfn(pfn);
arch/x86/xen/mmu_pv.c
1581
static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
arch/x86/xen/mmu_pv.c
1584
unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
arch/x86/xen/mmu_pv.c
1588
pfn_pte(pfn, prot), 0);
arch/x86/xen/mmu_pv.c
1593
static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
arch/x86/xen/mmu_pv.c
1598
trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
arch/x86/xen/mmu_pv.c
1601
struct page *page = pfn_to_page(pfn);
arch/x86/xen/mmu_pv.c
1611
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
arch/x86/xen/mmu_pv.c
1615
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
arch/x86/xen/mmu_pv.c
1621
static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1623
xen_alloc_ptpage(mm, pfn, PT_PTE);
arch/x86/xen/mmu_pv.c
1626
static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1628
xen_alloc_ptpage(mm, pfn, PT_PMD);
arch/x86/xen/mmu_pv.c
1632
static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
arch/x86/xen/mmu_pv.c
1634
struct page *page = pfn_to_page(pfn);
arch/x86/xen/mmu_pv.c
1637
trace_xen_mmu_release_ptpage(pfn, level, pinned);
arch/x86/xen/mmu_pv.c
1643
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
arch/x86/xen/mmu_pv.c
1645
__set_pfn_prot(pfn, PAGE_KERNEL);
arch/x86/xen/mmu_pv.c
1653
static void xen_release_pte(unsigned long pfn)
arch/x86/xen/mmu_pv.c
1655
xen_release_ptpage(pfn, PT_PTE);
arch/x86/xen/mmu_pv.c
1658
static void xen_release_pmd(unsigned long pfn)
arch/x86/xen/mmu_pv.c
1660
xen_release_ptpage(pfn, PT_PMD);
arch/x86/xen/mmu_pv.c
1663
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
arch/x86/xen/mmu_pv.c
1665
xen_alloc_ptpage(mm, pfn, PT_PUD);
arch/x86/xen/mmu_pv.c
1668
static void xen_release_pud(unsigned long pfn)
arch/x86/xen/mmu_pv.c
1670
xen_release_ptpage(pfn, PT_PUD);
arch/x86/xen/mmu_pv.c
1703
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
arch/x86/xen/mmu_pv.c
1704
pte_t pte = pfn_pte(pfn, prot);
arch/x86/xen/mmu_pv.c
1956
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
arch/x86/xen/mmu_pv.c
2044
pfn = xen_start_info->first_p2m_pfn;
arch/x86/xen/mmu_pv.c
2049
pfn = p2m_pfn;
arch/x86/xen/mmu_pv.c
2053
memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
arch/x86/xen/mmu_pv.c
2054
while (pfn < pfn_end) {
arch/x86/xen/mmu_pv.c
2055
if (pfn == p2m_pfn) {
arch/x86/xen/mmu_pv.c
2056
pfn = p2m_pfn_end;
arch/x86/xen/mmu_pv.c
2059
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
arch/x86/xen/mmu_pv.c
2060
pfn++;
arch/x86/xen/mmu_pv.c
2449
xen_pfn_t *pfn;
arch/x86/xen/mmu_pv.c
2459
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
arch/x86/xen/mmu_pv.c
2466
(*rmd->pfn)++;
arch/x86/xen/mmu_pv.c
2468
rmd->pfn++;
arch/x86/xen/mmu_pv.c
2481
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
arch/x86/xen/mmu_pv.c
2492
rmd.pfn = pfn;
arch/x86/xen/mmu_pv.c
383
unsigned long pfn = mfn_to_pfn(mfn);
arch/x86/xen/mmu_pv.c
386
if (unlikely(pfn == ~0))
arch/x86/xen/mmu_pv.c
389
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
arch/x86/xen/mmu_pv.c
398
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
arch/x86/xen/mmu_pv.c
402
mfn = __pfn_to_mfn(pfn);
arch/x86/xen/mmu_pv.c
731
static void xen_do_pin(unsigned level, unsigned long pfn)
arch/x86/xen/mmu_pv.c
736
op.arg1.mfn = pfn_to_mfn(pfn);
arch/x86/xen/mmu_pv.c
748
unsigned long pfn = page_to_pfn(page);
arch/x86/xen/mmu_pv.c
777
pfn_pte(pfn, PAGE_KERNEL_RO),
arch/x86/xen/mmu_pv.c
781
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
arch/x86/xen/mmu_pv.c
878
unsigned long pfn = page_to_pfn(page);
arch/x86/xen/mmu_pv.c
893
xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
arch/x86/xen/mmu_pv.c
899
pfn_pte(pfn, PAGE_KERNEL),
arch/x86/xen/p2m.c
127
static inline unsigned p2m_top_index(unsigned long pfn)
arch/x86/xen/p2m.c
129
BUG_ON(pfn >= MAX_P2M_PFN);
arch/x86/xen/p2m.c
130
return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
arch/x86/xen/p2m.c
133
static inline unsigned p2m_mid_index(unsigned long pfn)
arch/x86/xen/p2m.c
135
return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
arch/x86/xen/p2m.c
170
static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
arch/x86/xen/p2m.c
175
p2m[i] = IDENTITY_FRAME(pfn + i);
arch/x86/xen/p2m.c
209
unsigned long pfn, mfn;
arch/x86/xen/p2m.c
232
for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
arch/x86/xen/p2m.c
233
pfn += P2M_PER_PAGE) {
arch/x86/xen/p2m.c
234
topidx = p2m_top_index(pfn);
arch/x86/xen/p2m.c
235
mididx = p2m_mid_index(pfn);
arch/x86/xen/p2m.c
238
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
arch/x86/xen/p2m.c
252
pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
arch/x86/xen/p2m.c
287
unsigned long pfn;
arch/x86/xen/p2m.c
292
for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
arch/x86/xen/p2m.c
293
xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
arch/x86/xen/p2m.c
303
static int xen_p2m_elem_type(unsigned long pfn)
arch/x86/xen/p2m.c
307
if (pfn >= xen_p2m_size)
arch/x86/xen/p2m.c
310
mfn = xen_p2m_addr[pfn];
arch/x86/xen/p2m.c
324
unsigned long pfn;
arch/x86/xen/p2m.c
346
for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
arch/x86/xen/p2m.c
357
chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
arch/x86/xen/p2m.c
360
type = xen_p2m_elem_type(pfn);
arch/x86/xen/p2m.c
364
if (xen_p2m_elem_type(pfn + i) != type)
arch/x86/xen/p2m.c
373
copy_page(mfns, xen_p2m_addr + pfn);
arch/x86/xen/p2m.c
374
ptep = populate_extra_pte((unsigned long)(p2m + pfn));
arch/x86/xen/p2m.c
384
ptep = populate_extra_pte((unsigned long)(p2m + pfn));
arch/x86/xen/p2m.c
395
(unsigned long)(p2m + pfn) + i * PMD_SIZE);
arch/x86/xen/p2m.c
425
unsigned long get_phys_to_machine(unsigned long pfn)
arch/x86/xen/p2m.c
430
if (unlikely(pfn >= xen_p2m_size)) {
arch/x86/xen/p2m.c
431
if (pfn < xen_max_p2m_pfn)
arch/x86/xen/p2m.c
432
return xen_chk_extra_mem(pfn);
arch/x86/xen/p2m.c
434
return IDENTITY_FRAME(pfn);
arch/x86/xen/p2m.c
437
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
arch/x86/xen/p2m.c
446
return IDENTITY_FRAME(pfn);
arch/x86/xen/p2m.c
448
return xen_p2m_addr[pfn];
arch/x86/xen/p2m.c
521
int xen_alloc_p2m_entry(unsigned long pfn)
arch/x86/xen/p2m.c
528
unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
arch/x86/xen/p2m.c
542
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
arch/x86/xen/p2m.c
543
topidx = p2m_top_index(pfn);
arch/x86/xen/p2m.c
587
p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
arch/x86/xen/p2m.c
599
mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
arch/x86/xen/p2m.c
610
if (pfn >= xen_p2m_last_pfn) {
arch/x86/xen/p2m.c
611
xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
arch/x86/xen/p2m.c
622
unsigned long pfn;
arch/x86/xen/p2m.c
633
for (pfn = pfn_s; pfn < pfn_e; pfn++)
arch/x86/xen/p2m.c
634
xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
arch/x86/xen/p2m.c
636
return pfn - pfn_s;
arch/x86/xen/p2m.c
639
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
arch/x86/xen/p2m.c
645
if (unlikely(pfn >= xen_p2m_size))
arch/x86/xen/p2m.c
652
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
arch/x86/xen/p2m.c
655
ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
arch/x86/xen/p2m.c
662
return mfn == IDENTITY_FRAME(pfn);
arch/x86/xen/p2m.c
667
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
arch/x86/xen/p2m.c
669
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
arch/x86/xen/p2m.c
672
ret = xen_alloc_p2m_entry(pfn);
arch/x86/xen/p2m.c
676
return __set_phys_to_machine(pfn, mfn);
arch/x86/xen/p2m.c
700
unsigned long mfn, pfn;
arch/x86/xen/p2m.c
716
pfn = page_to_pfn(pages[i]);
arch/x86/xen/p2m.c
718
WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
arch/x86/xen/p2m.c
720
if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
arch/x86/xen/p2m.c
777
unsigned long pfn = page_to_pfn(pages[i]);
arch/x86/xen/p2m.c
780
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
arch/x86/xen/p2m.c
810
unsigned long pfn, mfn, end_pfn;
arch/x86/xen/p2m.c
814
pfn = PFN_DOWN(remap->paddr);
arch/x86/xen/p2m.c
816
while (pfn < end_pfn) {
arch/x86/xen/p2m.c
817
if (!set_phys_to_machine(pfn, mfn))
arch/x86/xen/p2m.c
819
pfn, mfn);
arch/x86/xen/p2m.c
821
pfn++;
arch/x86/xen/p2m.c
896
unsigned long pfn, first_pfn;
arch/x86/xen/p2m.c
902
for (pfn = 0; pfn < xen_p2m_size; pfn++) {
arch/x86/xen/p2m.c
903
type = xen_p2m_elem_type(pfn);
arch/x86/xen/p2m.c
905
seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
arch/x86/xen/p2m.c
908
first_pfn = pfn;
arch/x86/xen/p2m.c
911
seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
arch/x86/xen/pmu.c
494
unsigned long pfn;
arch/x86/xen/pmu.c
507
pfn = virt_to_pfn(xenpmu_data);
arch/x86/xen/pmu.c
509
xp.val = pfn_to_mfn(pfn);
arch/x86/xen/setup.c
124
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
arch/x86/xen/setup.c
129
if (pfn >= xen_extra_mem[i].start_pfn &&
arch/x86/xen/setup.c
130
pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
arch/x86/xen/setup.c
134
return IDENTITY_FRAME(pfn);
arch/x86/xen/setup.c
142
unsigned long pfn, pfn_s, pfn_e;
arch/x86/xen/setup.c
150
for (pfn = pfn_s; pfn < pfn_e; pfn++)
arch/x86/xen/setup.c
151
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
arch/x86/xen/setup.c
217
unsigned long pfn, end;
arch/x86/xen/setup.c
224
for (pfn = start_pfn; pfn < end; pfn++) {
arch/x86/xen/setup.c
225
unsigned long mfn = pfn_to_mfn(pfn);
arch/x86/xen/setup.c
228
if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
arch/x86/xen/setup.c
232
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
arch/x86/xen/setup.c
236
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
arch/x86/xen/setup.c
248
static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
arch/x86/xen/setup.c
252
.val = pfn
arch/x86/xen/setup.c
256
if (!set_phys_to_machine(pfn, mfn)) {
arch/x86/xen/setup.c
258
pfn, mfn);
arch/x86/xen/setup.c
265
mfn, pfn);
arch/x86/xen/setup.c
269
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
arch/x86/xen/setup.c
272
mfn, pfn);
arch/x86/xen/setup.c
346
unsigned long pfn;
arch/x86/xen/setup.c
390
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
arch/x86/xen/setup.c
392
(unsigned long)__va(pfn << PAGE_SHIFT),
arch/x86/xen/setup.c
456
unsigned long mfn_save, pfn;
arch/x86/xen/setup.c
470
pfn = xen_remap_buf.target_pfn;
arch/x86/xen/setup.c
472
xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
arch/x86/xen/setup.c
474
pfn++;
arch/x86/xen/setup.c
476
if (pfn_s == ~0UL || pfn == pfn_s) {
arch/x86/xen/xen-ops.h
197
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
arch/x86/xen/xen-ops.h
198
void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
arch/x86/xen/xen-ops.h
56
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
arch/xtensa/include/asm/cacheflush.h
139
unsigned long address, unsigned long pfn);
arch/xtensa/include/asm/highmem.h
63
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
arch/xtensa/include/asm/pgtable.h
271
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
arch/xtensa/kernel/hibernate.c
12
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
arch/xtensa/kernel/hibernate.c
7
int pfn_is_nosave(unsigned long pfn)
arch/xtensa/kernel/pci-dma.c
28
unsigned long pfn = PFN_DOWN(paddr);
arch/xtensa/kernel/pci-dma.c
29
struct page *page = pfn_to_page(pfn);
arch/xtensa/kernel/smp.c
562
unsigned long address, unsigned long pfn)
arch/xtensa/kernel/smp.c
567
.addr2 = pfn,
arch/xtensa/mm/cache.c
200
unsigned long pfn)
arch/xtensa/mm/cache.c
204
unsigned long phys = page_to_phys(pfn_to_page(pfn));
arch/xtensa/mm/cache.c
219
unsigned long pfn = pte_pfn(*ptep);
arch/xtensa/mm/cache.c
223
if (!pfn_valid(pfn))
arch/xtensa/mm/cache.c
226
folio = page_folio(pfn_to_page(pfn));
arch/xtensa/mm/highmem.c
38
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
arch/xtensa/mm/highmem.c
40
return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
arch/xtensa/mm/ioremap.c
16
unsigned long pfn = __phys_to_pfn((phys_addr));
arch/xtensa/mm/ioremap.c
17
WARN_ON(pfn_valid(pfn));
drivers/accel/amdxdna/amdxdna_ubuf.c
76
unsigned long pfn;
drivers/accel/amdxdna/amdxdna_ubuf.c
82
pfn = page_to_pfn(ubuf->pages[pgoff]);
drivers/accel/amdxdna/amdxdna_ubuf.c
83
return vmf_insert_pfn(vma, vmf->address, pfn);
drivers/acpi/apei/ghes.c
179
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
drivers/acpi/apei/ghes.c
184
paddr = PFN_PHYS(pfn);
drivers/acpi/apei/ghes.c
485
u64 pfn;
drivers/acpi/apei/ghes.c
494
ret = memory_failure(twcb->pfn, twcb->flags);
drivers/acpi/apei/ghes.c
501
twcb->pfn, current->comm, task_pid_nr(current));
drivers/acpi/apei/ghes.c
508
unsigned long pfn;
drivers/acpi/apei/ghes.c
513
pfn = PHYS_PFN(physical_addr);
drivers/acpi/apei/ghes.c
520
twcb->pfn = pfn;
drivers/acpi/apei/ghes.c
527
memory_failure_queue(pfn, flags);
drivers/acpi/osl.c
287
#define should_use_kmap(pfn) 0
drivers/acpi/osl.c
289
#define should_use_kmap(pfn) page_is_ram(pfn)
drivers/acpi/osl.c
294
unsigned long pfn;
drivers/acpi/osl.c
296
pfn = pg_off >> PAGE_SHIFT;
drivers/acpi/osl.c
297
if (should_use_kmap(pfn)) {
drivers/acpi/osl.c
300
return (void __iomem __force *)kmap(pfn_to_page(pfn));
drivers/acpi/osl.c
307
unsigned long pfn;
drivers/acpi/osl.c
309
pfn = pg_off >> PAGE_SHIFT;
drivers/acpi/osl.c
310
if (should_use_kmap(pfn))
drivers/acpi/osl.c
311
kunmap(pfn_to_page(pfn));
drivers/base/memory.c
1228
void memblk_nr_poison_inc(unsigned long pfn)
drivers/base/memory.c
1230
const unsigned long block_id = pfn_to_block_id(pfn);
drivers/base/memory.c
1237
void memblk_nr_poison_sub(unsigned long pfn, long i)
drivers/base/memory.c
1239
const unsigned long block_id = pfn_to_block_id(pfn);
drivers/base/memory.c
609
u64 pfn;
drivers/base/memory.c
612
if (kstrtoull(buf, 0, &pfn) < 0)
drivers/base/memory.c
614
pfn >>= PAGE_SHIFT;
drivers/base/memory.c
615
ret = soft_offline_page(pfn, 0);
drivers/base/memory.c
625
u64 pfn;
drivers/base/memory.c
628
if (kstrtoull(buf, 0, &pfn) < 0)
drivers/base/memory.c
630
pfn >>= PAGE_SHIFT;
drivers/base/memory.c
631
ret = memory_failure(pfn, MF_SW_SIMULATED);
drivers/block/ublk_drv.c
2616
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
drivers/block/ublk_drv.c
2644
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
drivers/block/ublk_drv.c
2645
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
drivers/block/ublk_drv.c
5299
unsigned long pfn = page_to_pfn(pages[i]);
drivers/block/ublk_drv.c
5305
page_to_pfn(pages[i + 1]) == pfn + (i - start) + 1)
drivers/block/ublk_drv.c
5317
ret = mtree_insert_range(&ub->buf_tree, pfn,
drivers/block/ublk_drv.c
5318
pfn + (i - start),
drivers/block/ublk_drv.c
5514
unsigned long pfn = page_to_pfn(bv.bv_page);
drivers/block/ublk_drv.c
5515
unsigned long end_pfn = pfn +
drivers/block/ublk_drv.c
5519
MA_STATE(mas, &ub->buf_tree, pfn, pfn);
drivers/block/ublk_drv.c
5530
(pfn - mas.index) * PAGE_SIZE + bv.bv_offset;
drivers/char/mem.c
243
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
drivers/char/mem.c
269
static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
drivers/char/mem.c
273
phys_addr_t offset = pfn << PAGE_SHIFT;
drivers/char/mem.c
53
static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
drivers/char/mem.c
60
static inline int page_is_allowed(unsigned long pfn)
drivers/char/mem.c
62
return devmem_is_allowed(pfn);
drivers/char/mem.c
65
static inline int page_is_allowed(unsigned long pfn)
drivers/clocksource/hyperv_timer.c
471
tsc_msr.pfn = tsc_pfn;
drivers/clocksource/hyperv_timer.c
597
tsc_pfn = tsc_msr.pfn;
drivers/clocksource/hyperv_timer.c
601
tsc_msr.pfn = tsc_pfn;
drivers/comedi/comedi_fops.c
2572
unsigned long pfn;
drivers/comedi/comedi_fops.c
2575
pfn = page_to_pfn(virt_to_page(buf->virt_addr));
drivers/comedi/comedi_fops.c
2576
retval = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
drivers/crypto/ccp/sev-dev-tio.c
26
#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
drivers/crypto/ccp/sev-dev-tio.c
33
u64 pfn = sla.pfn;
drivers/crypto/ccp/sev-dev-tio.c
34
u64 pa = pfn << PAGE_SHIFT;
drivers/crypto/ccp/sev-dev-tio.c
53
.pfn = pa >> PAGE_SHIFT,
drivers/crypto/ccp/sev-dev-tio.h
17
pfn :40,
drivers/crypto/ccp/sev-dev.c
422
unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
drivers/crypto/ccp/sev-dev.c
425
for (i = 0; i < npages; i++, pfn++) {
drivers/crypto/ccp/sev-dev.c
426
rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true);
drivers/cxl/core/mce.c
19
unsigned long pfn;
drivers/cxl/core/mce.c
29
pfn = spa >> PAGE_SHIFT;
drivers/cxl/core/mce.c
30
if (!pfn_valid(pfn))
drivers/cxl/core/mce.c
37
pfn = spa_alias >> PAGE_SHIFT;
drivers/cxl/core/mce.c
45
if (!memory_failure(pfn, 0))
drivers/cxl/core/mce.c
46
set_mce_nospec(pfn);
drivers/dax/device.c
114
unsigned long pfn;
drivers/dax/device.c
135
pfn = PHYS_PFN(phys);
drivers/dax/device.c
137
dax_set_mapping(vmf, pfn, fault_size);
drivers/dax/device.c
139
return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
drivers/dax/device.c
150
unsigned long pfn;
drivers/dax/device.c
179
pfn = PHYS_PFN(phys);
drivers/dax/device.c
181
dax_set_mapping(vmf, pfn, fault_size);
drivers/dax/device.c
183
return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
drivers/dax/device.c
195
unsigned long pfn;
drivers/dax/device.c
225
pfn = PHYS_PFN(phys);
drivers/dax/device.c
227
dax_set_mapping(vmf, pfn, fault_size);
drivers/dax/device.c
229
return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
drivers/dax/device.c
83
static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
drivers/dax/device.c
99
struct folio *folio = pfn_folio(pfn + i);
drivers/dax/super.c
150
enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
drivers/dax/super.c
164
mode, kaddr, pfn);
drivers/dma-buf/udmabuf.c
52
unsigned long addr, pfn;
drivers/dma-buf/udmabuf.c
58
pfn = folio_pfn(ubuf->folios[pgoff]);
drivers/dma-buf/udmabuf.c
59
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
drivers/dma-buf/udmabuf.c
61
ret = vmf_insert_pfn(vma, vmf->address, pfn);
drivers/dma-buf/udmabuf.c
76
pfn = folio_pfn(ubuf->folios[pgoff]);
drivers/dma-buf/udmabuf.c
77
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
drivers/dma-buf/udmabuf.c
85
if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
drivers/dma/idxd/cdev.c
392
unsigned long pfn;
drivers/dma/idxd/cdev.c
416
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
drivers/dma/idxd/cdev.c
421
return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
drivers/edac/cpc925_edac.c
436
unsigned long *pfn, unsigned long *offset, int *csrow)
drivers/edac/cpc925_edac.c
499
*pfn = pa >> PAGE_SHIFT;
drivers/edac/cpc925_edac.c
525
unsigned long pfn = 0, offset = 0;
drivers/edac/cpc925_edac.c
539
cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
drivers/edac/cpc925_edac.c
545
pfn, offset, syndrome,
drivers/edac/cpc925_edac.c
553
pfn, offset, 0,
drivers/edac/fsl_ddr_edac.c
294
u32 pfn;
drivers/edac/fsl_ddr_edac.c
327
pfn = err_addr >> PAGE_SHIFT;
drivers/edac/fsl_ddr_edac.c
331
if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
drivers/edac/fsl_ddr_edac.c
368
fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
drivers/edac/fsl_ddr_edac.c
376
pfn, err_addr & ~PAGE_MASK, syndrome,
drivers/edac/fsl_ddr_edac.c
382
pfn, err_addr & ~PAGE_MASK, syndrome,
drivers/edac/i3000_edac.c
235
unsigned long pfn, offset;
drivers/edac/i3000_edac.c
252
pfn = deap_pfn(info->edeap, info->deap);
drivers/edac/i3000_edac.c
256
row = edac_mc_find_csrow_by_page(mci, pfn);
drivers/edac/i3000_edac.c
260
pfn, offset, 0,
drivers/edac/i3000_edac.c
265
pfn, offset, info->derrsyn,
drivers/edac/versalnet_edac.c
435
phys_addr_t pfn;
drivers/edac/versalnet_edac.c
464
pfn = PHYS_PFN(pa);
drivers/edac/versalnet_edac.c
467
err = memory_failure(pfn, MF_ACTION_REQUIRED);
drivers/firmware/efi/unaccepted_memory.c
220
unsigned long pfn)
drivers/firmware/efi/unaccepted_memory.c
222
return !pfn_is_unaccepted_memory(pfn);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3577
kfree(ecc_err->err_pages.pfn);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5637
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
5641
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
1026
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
479
uint64_t *pfn;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2846
unsigned long pfn;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2858
pfn = addr >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2859
if (!pfn_valid(pfn))
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2862
p = pfn_to_page(pfn);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2901
unsigned long pfn;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2909
pfn = addr >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2910
if (!pfn_valid(pfn))
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2913
p = pfn_to_page(pfn);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1198
uint64_t pfn = cursor.start >> PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1201
contiguous = pages_addr[pfn + 1] ==
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1202
pages_addr[pfn] + PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1207
uint64_t idx = pfn + count;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
162
cursor->pfn = start;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
191
idx = (cursor->pfn >> shift) & mask;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
226
cursor->pfn += 1ULL << shift;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
227
cursor->pfn &= ~((1ULL << shift) - 1);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
271
cursor->pfn = ~0ll;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
35
uint64_t pfn;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
814
while (cursor.pfn < end) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
880
pe_start = ((cursor.pfn >> shift) & mask) * 8;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
890
entry_end += cursor.pfn & ~(entry_end - 1);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
933
while (cursor.pfn < frag_start) {
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1066
unsigned long pfn;
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1084
pfn = __pa(page->kernel_address);
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1085
pfn >>= PAGE_SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1093
pr_debug(" pfn == 0x%016lX\n", pfn);
drivers/gpu/drm/amd/amdkfd/kfd_events.c
1101
ret = remap_pfn_range(vma, vma->vm_start, pfn,
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
219
svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
223
page = pfn_to_page(pfn);
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
405
uint64_t address, uint64_t *pfn, uint32_t max_pfn_sz)
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
412
if (!pfn || !max_pfn_sz)
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
426
pfn[i] = rsp.retired_addr[i] >> AMDGPU_GPU_PAGE_SHIFT;
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.h
64
uint64_t address, uint64_t *pfn, uint32_t max_pfn_sz);
drivers/gpu/drm/amd/ras/rascore/ras.h
45
#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT)
drivers/gpu/drm/armada/armada_gem.c
25
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
drivers/gpu/drm/armada/armada_gem.c
27
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
drivers/gpu/drm/armada/armada_gem.c
28
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
drivers/gpu/drm/drm_gem_shmem_helper.c
573
unsigned long pfn)
drivers/gpu/drm/drm_gem_shmem_helper.c
576
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
drivers/gpu/drm/drm_gem_shmem_helper.c
579
unsigned long paddr = pfn << PAGE_SHIFT;
drivers/gpu/drm/drm_gem_shmem_helper.c
583
folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) {
drivers/gpu/drm/drm_gem_shmem_helper.c
586
pfn &= PMD_MASK >> PAGE_SHIFT;
drivers/gpu/drm/drm_gem_shmem_helper.c
595
ret = vmf_insert_pfn_pmd(vmf, pfn,
drivers/gpu/drm/drm_gem_shmem_helper.c
619
unsigned long pfn;
drivers/gpu/drm/drm_gem_shmem_helper.c
635
pfn = page_to_pfn(page);
drivers/gpu/drm/drm_gem_shmem_helper.c
637
ret = try_insert_pfn(vmf, order, pfn);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
169
unsigned long pfn;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
193
pfn = page_to_pfn(pages[pgoff]);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
196
pfn, pfn << PAGE_SHIFT);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
198
return vmf_insert_pfn(vma, vmf->address, pfn);
drivers/gpu/drm/gma500/fbdev.c
27
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
drivers/gpu/drm/gma500/fbdev.c
35
err = vmf_insert_mixed(vma, address, pfn);
drivers/gpu/drm/gma500/fbdev.c
39
++pfn;
drivers/gpu/drm/gma500/gem.c
262
unsigned long pfn;
drivers/gpu/drm/gma500/gem.c
295
pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
drivers/gpu/drm/gma500/gem.c
297
pfn = page_to_pfn(pobj->pages[page_offset]);
drivers/gpu/drm/gma500/gem.c
298
ret = vmf_insert_pfn(vma, vmf->address, pfn);
drivers/gpu/drm/gma500/gtt.c
52
uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
drivers/gpu/drm/gma500/gtt.c
58
BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
drivers/gpu/drm/gma500/gtt.c
67
return (pfn << PAGE_SHIFT) | mask;
drivers/gpu/drm/gma500/gtt.h
36
uint32_t psb_gtt_mask_pte(uint32_t pfn, int type);
drivers/gpu/drm/gma500/mmu.c
144
static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
drivers/gpu/drm/gma500/mmu.c
155
return (pfn << PAGE_SHIFT) | mask;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
303
unsigned long *pfn)
drivers/gpu/drm/i915/gem/i915_gem_mman.c
333
*pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
334
*pfn += (*start_vaddr - area->vm_start) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
335
*pfn += obj_offset - vma->gtt_view.partial.offset;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
355
unsigned long pfn;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
460
&start, &end, &pfn);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
463
ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
drivers/gpu/drm/i915/gt/shmem_utils.c
101
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
drivers/gpu/drm/i915/gt/shmem_utils.c
107
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
drivers/gpu/drm/i915/gt/shmem_utils.c
134
unsigned long pfn;
drivers/gpu/drm/i915/gt/shmem_utils.c
136
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
drivers/gpu/drm/i915/gt/shmem_utils.c
142
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
drivers/gpu/drm/i915/gt/shmem_utils.c
99
unsigned long pfn;
drivers/gpu/drm/i915/gvt/gtt.c
2138
unsigned long pfn;
drivers/gpu/drm/i915/gvt/gtt.c
2140
pfn = pte_ops->get_pfn(entry);
drivers/gpu/drm/i915/gvt/gtt.c
2141
if (pfn != vgpu->gvt->gtt.scratch_mfn)
drivers/gpu/drm/i915/gvt/gtt.c
2142
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
304
unsigned long pfn;
drivers/gpu/drm/i915/gvt/gtt.c
307
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gvt/gtt.c
309
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gvt/gtt.c
311
pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gvt/gtt.c
313
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
drivers/gpu/drm/i915/gvt/gtt.c
314
return pfn;
drivers/gpu/drm/i915/gvt/gtt.c
317
static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
drivers/gpu/drm/i915/gvt/gtt.c
321
pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
324
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
327
pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
330
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
333
e->val64 |= (pfn << PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.c
925
unsigned long pfn;
drivers/gpu/drm/i915/gvt/gtt.c
928
pfn = ops->get_pfn(entry);
drivers/gpu/drm/i915/gvt/gtt.c
932
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
drivers/gpu/drm/i915/gvt/gtt.c
935
intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
drivers/gpu/drm/i915/gvt/gtt.h
80
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
drivers/gpu/drm/i915/i915_mm.c
105
r.pfn = pfn;
drivers/gpu/drm/i915/i915_mm.c
111
zap_special_vma_range(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
drivers/gpu/drm/i915/i915_mm.c
159
zap_special_vma_range(vma, addr, r.pfn << PAGE_SHIFT);
drivers/gpu/drm/i915/i915_mm.c
34
unsigned long pfn;
drivers/gpu/drm/i915/i915_mm.c
48
return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
drivers/gpu/drm/i915/i915_mm.c
61
r->pfn++; /* track insertions in case we need to unwind later */
drivers/gpu/drm/i915/i915_mm.c
78
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
drivers/gpu/drm/i915/i915_mm.c
79
r->pfn++;
drivers/gpu/drm/i915/i915_mm.c
95
unsigned long addr, unsigned long pfn, unsigned long size,
drivers/gpu/drm/i915/i915_mm.h
18
unsigned long addr, unsigned long pfn, unsigned long size,
drivers/gpu/drm/i915/i915_mm.h
23
unsigned long addr, unsigned long pfn, unsigned long size,
drivers/gpu/drm/i915/i915_scatterlist.h
113
((__pp) = (__iter).pfn == 0 ? NULL : \
drivers/gpu/drm/i915/i915_scatterlist.h
114
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
drivers/gpu/drm/i915/i915_scatterlist.h
26
unsigned long pfn;
drivers/gpu/drm/i915/i915_scatterlist.h
42
s.pfn = page_to_pfn(sg_page(s.sgp));
drivers/gpu/drm/i915/selftests/scatterlist.c
101
pfn++;
drivers/gpu/drm/i915/selftests/scatterlist.c
103
if (pfn != pt->end) {
drivers/gpu/drm/i915/selftests/scatterlist.c
105
__func__, who, pt->end, pfn);
drivers/gpu/drm/i915/selftests/scatterlist.c
118
unsigned long pfn;
drivers/gpu/drm/i915/selftests/scatterlist.c
120
pfn = pt->start;
drivers/gpu/drm/i915/selftests/scatterlist.c
122
if (page != pfn_to_page(pfn)) {
drivers/gpu/drm/i915/selftests/scatterlist.c
124
__func__, who, pfn, page_to_pfn(page));
drivers/gpu/drm/i915/selftests/scatterlist.c
131
pfn++;
drivers/gpu/drm/i915/selftests/scatterlist.c
133
if (pfn != pt->end) {
drivers/gpu/drm/i915/selftests/scatterlist.c
135
__func__, who, pt->end, pfn);
drivers/gpu/drm/i915/selftests/scatterlist.c
221
unsigned long n, pfn;
drivers/gpu/drm/i915/selftests/scatterlist.c
239
pfn = pt->start;
drivers/gpu/drm/i915/selftests/scatterlist.c
245
if (!page_contiguous(pfn_to_page(pfn),
drivers/gpu/drm/i915/selftests/scatterlist.c
246
pfn_to_page(pfn + npages),
drivers/gpu/drm/i915/selftests/scatterlist.c
254
sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
drivers/gpu/drm/i915/selftests/scatterlist.c
256
GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
drivers/gpu/drm/i915/selftests/scatterlist.c
260
pfn += npages;
drivers/gpu/drm/i915/selftests/scatterlist.c
264
pt->end = pfn;
drivers/gpu/drm/i915/selftests/scatterlist.c
48
unsigned long pfn, n;
drivers/gpu/drm/i915/selftests/scatterlist.c
50
pfn = pt->start;
drivers/gpu/drm/i915/selftests/scatterlist.c
55
if (page_to_pfn(page) != pfn) {
drivers/gpu/drm/i915/selftests/scatterlist.c
57
__func__, who, pfn, page_to_pfn(page));
drivers/gpu/drm/i915/selftests/scatterlist.c
70
pfn += npages;
drivers/gpu/drm/i915/selftests/scatterlist.c
72
if (pfn != pt->end) {
drivers/gpu/drm/i915/selftests/scatterlist.c
74
__func__, who, pt->end, pfn);
drivers/gpu/drm/i915/selftests/scatterlist.c
86
unsigned long pfn;
drivers/gpu/drm/i915/selftests/scatterlist.c
88
pfn = pt->start;
drivers/gpu/drm/i915/selftests/scatterlist.c
92
if (page != pfn_to_page(pfn)) {
drivers/gpu/drm/i915/selftests/scatterlist.c
94
__func__, who, pfn, page_to_pfn(page));
drivers/gpu/drm/imagination/pvr_vm_mips.c
157
s32 pfn;
drivers/gpu/drm/imagination/pvr_vm_mips.c
181
for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
drivers/gpu/drm/imagination/pvr_vm_mips.c
186
(pfn - start_pfn) <<
drivers/gpu/drm/imagination/pvr_vm_mips.c
196
WRITE_ONCE(mips_data->pt[pfn], pte);
drivers/gpu/drm/imagination/pvr_vm_mips.c
204
while (--pfn >= start_pfn)
drivers/gpu/drm/imagination/pvr_vm_mips.c
205
WRITE_ONCE(mips_data->pt[pfn], 0);
drivers/gpu/drm/imagination/pvr_vm_mips.c
232
for (u32 pfn = start_pfn; pfn < end_pfn; pfn++)
drivers/gpu/drm/imagination/pvr_vm_mips.c
233
WRITE_ONCE(mips_data->pt[pfn], 0);
drivers/gpu/drm/msm/msm_gem.c
335
unsigned long pfn;
drivers/gpu/drm/msm/msm_gem.c
365
pfn = page_to_pfn(pages[pgoff]);
drivers/gpu/drm/msm/msm_gem.c
368
pfn, pfn << PAGE_SHIFT);
drivers/gpu/drm/msm/msm_gem.c
370
ret = vmf_insert_pfn(vma, vmf->address, pfn);
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
97
u64 *pfn;
drivers/gpu/drm/nouveau/nouveau_dmem.c
303
unsigned long i, pfn_first, pfn;
drivers/gpu/drm/nouveau/nouveau_dmem.c
347
pfn = pfn_first;
drivers/gpu/drm/nouveau/nouveau_dmem.c
352
for (j = 0; j < DMEM_CHUNK_NPAGES - 1; j++, pfn++) {
drivers/gpu/drm/nouveau/nouveau_dmem.c
353
page = pfn_to_page(pfn);
drivers/gpu/drm/nouveau/nouveau_dmem.c
358
page = pfn_to_page(pfn);
drivers/gpu/drm/nouveau/nouveau_dmem.c
361
pfn += DMEM_CHUNK_NPAGES;
drivers/gpu/drm/nouveau/nouveau_dmem.c
729
struct nouveau_dmem_dma_info *dma_info, u64 *pfn)
drivers/gpu/drm/nouveau/nouveau_dmem.c
765
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
drivers/gpu/drm/nouveau/nouveau_dmem.c
768
*pfn |= NVIF_VMM_PFNMAP_V0_W;
drivers/gpu/drm/nouveau/nouveau_dmem.c
779
*pfn = NVIF_VMM_PFNMAP_V0_NONE;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1294
nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1307
page->desc->func->pfn == NULL))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1322
bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1332
if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1377
args.pfn = &pfn[pi];
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1382
desc->func->pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1385
page->desc->func->pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1405
pfn[pi++] = NVKM_VMM_PFN_NONE;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1434
nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1439
nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1442
nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
1935
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
261
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
268
if (pfn) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
378
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
418
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
426
return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
430
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
433
return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
526
u64 addr, u64 size, const char *name, bool ref, bool pfn,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
527
bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
587
if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
705
u64 addr, u64 size, bool sparse, bool pfn)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
710
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
766
u64 addr, u64 size, bool sparse, bool pfn)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
771
false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
778
u64 addr, u64 size, bool sparse, bool pfn)
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
781
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
784
__nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
184
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
221
int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn);
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
79
nvkm_vmm_pte_func pfn;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
103
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
193
.pfn = gp100_vmm_pgt_pfn,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
337
for (; ptes; ptes--, map->pfn++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
340
if (!(*map->pfn & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
343
if (!(*map->pfn & NVKM_VMM_PFN_W))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
346
if (!(*map->pfn & NVKM_VMM_PFN_A))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
349
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
350
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
360
data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
375
.pfn = gp100_vmm_pd0_pfn,
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
80
for (; ptes; ptes--, map->pfn++) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
83
if (!(*map->pfn & NVKM_VMM_PFN_V))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
86
if (!(*map->pfn & NVKM_VMM_PFN_W))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
89
if (!(*map->pfn & NVKM_VMM_PFN_A))
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
92
if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
93
addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
drivers/gpu/drm/omapdrm/omap_gem.c
358
unsigned long pfn;
drivers/gpu/drm/omapdrm/omap_gem.c
366
pfn = page_to_pfn(omap_obj->pages[pgoff]);
drivers/gpu/drm/omapdrm/omap_gem.c
369
pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
drivers/gpu/drm/omapdrm/omap_gem.c
373
pfn, pfn << PAGE_SHIFT);
drivers/gpu/drm/omapdrm/omap_gem.c
375
return vmf_insert_mixed(vma, vmf->address, pfn);
drivers/gpu/drm/omapdrm/omap_gem.c
387
unsigned long pfn;
drivers/gpu/drm/omapdrm/omap_gem.c
464
pfn = entry->dma_addr >> PAGE_SHIFT;
drivers/gpu/drm/omapdrm/omap_gem.c
467
pfn, pfn << PAGE_SHIFT);
drivers/gpu/drm/omapdrm/omap_gem.c
470
ret = vmf_insert_mixed(vma, vaddr, pfn);
drivers/gpu/drm/omapdrm/omap_gem.c
473
pfn += priv->usergart[fmt].stride_pfn;
drivers/gpu/drm/panthor/panthor_device.c
414
unsigned long pfn;
drivers/gpu/drm/panthor/panthor_device.c
429
pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
drivers/gpu/drm/panthor/panthor_device.c
431
pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
drivers/gpu/drm/panthor/panthor_device.c
443
ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
drivers/gpu/drm/ttm/ttm_bo_vm.c
191
unsigned long pfn;
drivers/gpu/drm/ttm/ttm_bo_vm.c
247
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
drivers/gpu/drm/ttm/ttm_bo_vm.c
255
pfn = page_to_pfn(page);
drivers/gpu/drm/ttm/ttm_bo_vm.c
266
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
drivers/gpu/drm/ttm/ttm_bo_vm.c
297
unsigned long pfn;
drivers/gpu/drm/ttm/ttm_bo_vm.c
310
pfn = page_to_pfn(page);
drivers/gpu/drm/ttm/ttm_bo_vm.c
315
ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
drivers/gpu/drm/v3d/v3d_mmu.c
100
u32 page_address = page_prot | pfn;
drivers/gpu/drm/v3d/v3d_mmu.c
103
BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24));
drivers/gpu/drm/v3d/v3d_mmu.c
119
pfn++;
drivers/gpu/drm/v3d/v3d_mmu.c
95
u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT;
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
631
static inline void hypervisor_ppn_add(PPN64 pfn)
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
633
vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
642
static inline void hypervisor_ppn_remove(PPN64 pfn)
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
644
vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
drivers/gpu/drm/xe/xe_device.c
285
unsigned long pfn;
drivers/gpu/drm/xe/xe_device.c
288
pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
drivers/gpu/drm/xe/xe_device.c
290
ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
drivers/gpu/drm/xe/xe_mmio_gem.c
171
unsigned long pfn;
drivers/gpu/drm/xe/xe_mmio_gem.c
181
pfn = page_to_pfn(page);
drivers/gpu/drm/xe/xe_mmio_gem.c
187
ret = vmf_insert_pfn(vma, addr, pfn);
drivers/gpu/drm/xe/xe_svm.c
459
u64 pfn = page_to_pfn(page);
drivers/gpu/drm/xe/xe_svm.c
464
xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= hpa_base);
drivers/gpu/drm/xe/xe_svm.c
466
offset = (pfn << PAGE_SHIFT) - hpa_base;
drivers/gpu/drm/xe/xe_svm.c
784
unsigned long npages, unsigned long *pfn)
drivers/gpu/drm/xe/xe_svm.c
800
pfn[j++] = block_pfn + i;
drivers/hv/channel.c
401
gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
drivers/hv/hv_balloon.c
583
unsigned long pfn)
drivers/hv/hv_balloon.c
588
if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
drivers/hv/hv_balloon.c
593
if (pfn >= gap->start_pfn && pfn < gap->end_pfn)
drivers/hv/hv_balloon.c
603
unsigned long pfn = start_pfn, count = 0;
drivers/hv/hv_balloon.c
607
while (pfn < start_pfn + nr_pages) {
drivers/hv/hv_balloon.c
614
while ((pfn >= has->start_pfn) &&
drivers/hv/hv_balloon.c
615
(pfn < has->end_pfn) &&
drivers/hv/hv_balloon.c
616
(pfn < start_pfn + nr_pages)) {
drivers/hv/hv_balloon.c
618
if (has_pfn_is_backed(has, pfn))
drivers/hv/hv_balloon.c
620
pfn++;
drivers/hv/hv_balloon.c
630
pfn++;
drivers/hv/hv_balloon.c
767
unsigned long pfn = page_to_pfn(pg);
drivers/hv/hv_balloon.c
772
if (pfn < has->start_pfn ||
drivers/hv/hv_balloon.c
773
(pfn + (1UL << order) > has->end_pfn))
drivers/hv/hv_balloon.c
776
hv_bring_pgs_online(has, pfn, 1UL << order);
drivers/hv/mshv_root_hv_call.c
895
u64 status, pfn;
drivers/hv/mshv_root_hv_call.c
908
pfn = output->map_location;
drivers/hv/mshv_root_hv_call.c
931
*addr = page_address(pfn_to_page(pfn));
drivers/hv/mshv_vtl_main.c
1215
static bool can_fault(struct vm_fault *vmf, unsigned long size, unsigned long *pfn)
drivers/hv/mshv_vtl_main.c
1227
*pfn = vmf->pgoff & ~(mask >> PAGE_SHIFT);
drivers/hv/mshv_vtl_main.c
1234
unsigned long pfn = vmf->pgoff;
drivers/hv/mshv_vtl_main.c
1239
return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
drivers/hv/mshv_vtl_main.c
1242
if (can_fault(vmf, PMD_SIZE, &pfn))
drivers/hv/mshv_vtl_main.c
1243
ret = vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
drivers/hv/mshv_vtl_main.c
1247
if (can_fault(vmf, PUD_SIZE, &pfn))
drivers/hv/mshv_vtl_main.c
1248
ret = vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
drivers/hv/mshv_vtl_main.c
219
overlay.pfn = page_to_hvpfn(reg_page);
drivers/hv/mshv_vtl_main.c
94
u64 pfn: 52;
drivers/infiniband/core/ib_core_uverbs.c
72
unsigned long pfn, unsigned long size, pgprot_t prot,
drivers/infiniband/core/ib_core_uverbs.c
95
if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
drivers/infiniband/core/umem_odp.c
445
unsigned long pfn = umem_odp->map.pfn_list[idx];
drivers/infiniband/core/umem_odp.c
450
if (pfn & HMM_PFN_WRITE) {
drivers/infiniband/core/umem_odp.c
451
struct page *page = hmm_pfn_to_page(pfn);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4592
u64 pfn;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4603
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4604
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4609
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4610
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4618
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
4619
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/efa/efa_verbs.c
2031
u64 pfn;
drivers/infiniband/hw/efa/efa_verbs.c
2048
pfn = entry->address >> PAGE_SHIFT;
drivers/infiniband/hw/efa/efa_verbs.c
2051
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
drivers/infiniband/hw/efa/efa_verbs.c
2057
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
drivers/infiniband/hw/efa/efa_verbs.c
2064
va += PAGE_SIZE, pfn++) {
drivers/infiniband/hw/efa/efa_verbs.c
2065
err = vm_insert_page(vma, va, pfn_to_page(pfn));
drivers/infiniband/hw/hfi1/user_exp_rcv.c
565
unsigned long pfn, this_pfn;
drivers/infiniband/hw/hfi1/user_exp_rcv.c
577
pfn = page_to_pfn(pages[0]);
drivers/infiniband/hw/hfi1/user_exp_rcv.c
585
if (this_pfn != ++pfn) {
drivers/infiniband/hw/hfi1/user_exp_rcv.c
619
pfn = this_pfn;
drivers/infiniband/hw/hns/hns_roce_device.h
205
u64 pfn;
drivers/infiniband/hw/hns/hns_roce_main.c
412
address = context->uar.pfn << PAGE_SHIFT;
drivers/infiniband/hw/hns/hns_roce_main.c
525
phys_addr_t pfn;
drivers/infiniband/hw/hns/hns_roce_main.c
541
pfn = entry->address >> PAGE_SHIFT;
drivers/infiniband/hw/hns/hns_roce_main.c
553
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
drivers/infiniband/hw/hns/hns_roce_pd.c
104
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
drivers/infiniband/hw/ionic/ionic_controlpath.c
341
unsigned long pfn, u8 mmap_flags, u64 *offset)
drivers/infiniband/hw/ionic/ionic_controlpath.c
351
entry->pfn = pfn;
drivers/infiniband/hw/ionic/ionic_controlpath.c
466
vma->vm_start, ionic_entry->pfn, ionic_entry->size);
drivers/infiniband/hw/ionic/ionic_controlpath.c
467
rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
drivers/infiniband/hw/ionic/ionic_ibdev.h
80
unsigned long pfn;
drivers/infiniband/hw/irdma/verbs.c
136
u64 pfn;
drivers/infiniband/hw/irdma/verbs.c
142
pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
drivers/infiniband/hw/irdma/verbs.c
145
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/irdma/verbs.c
190
u64 pfn;
drivers/infiniband/hw/irdma/verbs.c
212
pfn = (entry->bar_offset +
drivers/infiniband/hw/irdma/verbs.c
217
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/irdma/verbs.c
222
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/mana/main.c
521
phys_addr_t pfn;
drivers/infiniband/hw/mana/main.c
534
pfn = (gc->phys_db_page_base +
drivers/infiniband/hw/mana/main.c
539
ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
drivers/infiniband/hw/mana/main.c
545
pfn, PAGE_SIZE, ret);
drivers/infiniband/hw/mlx4/main.c
1159
to_mucontext(context)->uar.pfn,
drivers/infiniband/hw/mlx4/main.c
1169
to_mucontext(context)->uar.pfn +
drivers/infiniband/hw/mlx4/main.c
2654
ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
drivers/infiniband/hw/mlx5/main.c
2558
phys_addr_t pfn;
drivers/infiniband/hw/mlx5/main.c
2631
pfn = uar_index2pfn(dev, uar_index);
drivers/infiniband/hw/mlx5/main.c
2632
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
drivers/infiniband/hw/mlx5/main.c
2634
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
drivers/infiniband/hw/mlx5/main.c
2678
phys_addr_t pfn;
drivers/infiniband/hw/mlx5/main.c
2687
pfn = (mentry->address >> PAGE_SHIFT);
drivers/infiniband/hw/mlx5/main.c
2693
ret = rdma_user_mmap_io(ucontext, vma, pfn,
drivers/infiniband/hw/mlx5/main.c
2715
phys_addr_t pfn;
drivers/infiniband/hw/mlx5/main.c
2743
pfn = (dev->mdev->bar_addr +
drivers/infiniband/hw/mlx5/main.c
2746
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
drivers/infiniband/hw/mlx5/odp.c
171
unsigned long pfn = odp->map.pfn_list[start + i];
drivers/infiniband/hw/mlx5/odp.c
174
pfn = odp->map.pfn_list[start + i];
drivers/infiniband/hw/mlx5/odp.c
175
if (!(pfn & HMM_PFN_VALID))
drivers/infiniband/hw/mlx5/odp.c
185
if ((pfn & HMM_PFN_WRITE) && !downgrade)
drivers/infiniband/hw/mthca/mthca_main.c
714
dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/infiniband/hw/mthca/mthca_provider.c
341
to_mucontext(context)->uar.pfn,
drivers/infiniband/hw/mthca/mthca_provider.h
58
unsigned long pfn;
drivers/infiniband/hw/mthca/mthca_uar.c
44
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
drivers/infiniband/hw/qedr/verbs.c
395
u64 pfn;
drivers/infiniband/hw/qedr/verbs.c
414
pfn = entry->io_address >> PAGE_SHIFT;
drivers/infiniband/hw/qedr/verbs.c
415
rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
110
unsigned long pfn;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
419
u32 pfn; /* UAR page frame number */
drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
109
uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
847
dev->driver_uar.pfn =
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
851
ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
879
dev->dsr->uar_pfn = dev->driver_uar.pfn;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
881
dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
307
cmd->pfn = context->uar.pfn;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
309
cmd->pfn64 = context->uar.pfn;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
385
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
drivers/iommu/amd/init.c
3978
unsigned long paddr, pfn;
drivers/iommu/amd/init.c
3982
pfn = __sme_clr(paddr) >> PAGE_SHIFT;
drivers/iommu/amd/init.c
3984
if (!(pfn % PTRS_PER_PMD)) {
drivers/iommu/amd/init.c
3988
ret = snp_lookup_rmpentry(pfn, &assigned, &level);
drivers/iommu/amd/init.c
3990
pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
drivers/iommu/amd/init.c
3995
pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
drivers/iommu/amd/init.c
4000
ret = psmash(pfn);
drivers/iommu/amd/init.c
4005
pfn, ret, level);
drivers/iommu/amd/init.c
4011
return rmp_make_shared(pfn, PG_LEVEL_4K);
drivers/iommu/dma-iommu.c
1685
unsigned long pfn, off = vma->vm_pgoff;
drivers/iommu/dma-iommu.c
1701
pfn = vmalloc_to_pfn(cpu_addr);
drivers/iommu/dma-iommu.c
1703
pfn = page_to_pfn(virt_to_page(cpu_addr));
drivers/iommu/dma-iommu.c
1706
return remap_pfn_range(vma, vma->vm_start, pfn + off,
drivers/iommu/dma-iommu.c
199
unsigned long pfn, unsigned long pages,
drivers/iommu/dma-iommu.c
236
fq->entries[idx].iova_pfn = pfn;
drivers/iommu/generic_pt/kunit_iommu_pt.h
90
pt_vaddr_t pfn = log2_div(va, priv->smallest_pgsz_lg2);
drivers/iommu/generic_pt/kunit_iommu_pt.h
91
pt_vaddr_t end_pfn = pfn + log2_div(len, priv->smallest_pgsz_lg2);
drivers/iommu/generic_pt/kunit_iommu_pt.h
93
for (; pfn != end_pfn; pfn++) {
drivers/iommu/generic_pt/kunit_iommu_pt.h
95
pfn * priv->smallest_pgsz);
drivers/iommu/intel/cache.c
264
unsigned long pfn = IOVA_PFN(start);
drivers/iommu/intel/cache.c
271
if (unlikely(bitmask & pfn)) {
drivers/iommu/intel/cache.c
272
unsigned long end_pfn = pfn + pages - 1, shared_bits;
drivers/iommu/intel/cache.c
281
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
drivers/iommu/intel/iommu.c
553
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
drivers/iommu/intel/iommu.c
560
offset = pfn_level_offset(pfn, level);
drivers/iommu/intel/iommu.h
890
static inline int pfn_level_offset(u64 pfn, int level)
drivers/iommu/intel/iommu.h
892
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
drivers/iommu/iommufd/pages.c
361
static bool batch_add_pfn_num(struct pfn_batch *batch, unsigned long pfn,
drivers/iommu/iommufd/pages.c
373
if (end && pfn == batch->pfns[end - 1] + batch->npfns[end - 1] &&
drivers/iommu/iommufd/pages.c
377
batch->pfns[end] = pfn;
drivers/iommu/iommufd/pages.c
397
static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn)
drivers/iommu/iommufd/pages.c
399
return batch_add_pfn_num(batch, pfn, 1, BATCH_CPU_MEMORY);
drivers/iommu/iommufd/pages.c
673
unsigned long pfn = page_to_pfn(folio_page(folio, offset));
drivers/iommu/iommufd/pages.c
678
if (!batch_add_pfn_num(batch, pfn, nr, BATCH_CPU_MEMORY))
drivers/iommu/iommufd/selftest.c
1232
unsigned long pfn;
drivers/iommu/iommufd/selftest.c
1245
pfn = page_to_pfn(pages[0]);
drivers/iommu/iommufd/selftest.c
1250
pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
drivers/iommu/iova.c
23
unsigned long pfn,
drivers/iommu/iova.c
276
private_find_iova(struct iova_domain *iovad, unsigned long pfn)
drivers/iommu/iova.c
285
if (pfn < iova->pfn_lo)
drivers/iommu/iova.c
287
else if (pfn > iova->pfn_hi)
drivers/iommu/iova.c
310
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
drivers/iommu/iova.c
317
iova = private_find_iova(iovad, pfn);
drivers/iommu/iova.c
349
free_iova(struct iova_domain *iovad, unsigned long pfn)
drivers/iommu/iova.c
355
iova = private_find_iova(iovad, pfn);
drivers/iommu/iova.c
425
free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
drivers/iommu/iova.c
427
if (iova_rcache_insert(iovad, pfn, size))
drivers/iommu/iova.c
430
free_iova(iovad, pfn);
drivers/iommu/iova.c
655
unsigned long pfn;
drivers/iommu/iova.c
663
pfn = mag->pfns[i];
drivers/iommu/iova.c
666
return pfn;
drivers/iommu/iova.c
669
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
drivers/iommu/iova.c
671
mag->pfns[mag->size++] = pfn;
drivers/iommu/iova.c
807
static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
drivers/iommu/iova.c
815
return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
drivers/iommu/tegra-smmu.c
810
unsigned long pfn;
drivers/iommu/tegra-smmu.c
818
pfn = *pte & as->smmu->pfn_mask;
drivers/iommu/tegra-smmu.c
820
return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
drivers/md/dm-linear.c
173
unsigned long *pfn)
drivers/md/dm-linear.c
177
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
drivers/md/dm-log-writes.c
897
unsigned long *pfn)
drivers/md/dm-log-writes.c
901
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
drivers/md/dm-pcache/cache_dev.c
23
unsigned long pfn;
drivers/md/dm-pcache/cache_dev.c
32
DAX_ACCESS, NULL, &pfn);
drivers/md/dm-pcache/cache_dev.c
38
if (!pfn_valid(pfn)) {
drivers/md/dm-pcache/cache_dev.c
44
pages[i++] = pfn_to_page(pfn);
drivers/md/dm-pcache/cache_dev.c
45
pfn++;
drivers/md/dm-pcache/cache_dev.c
73
unsigned long pfn;
drivers/md/dm-pcache/cache_dev.c
89
DAX_ACCESS, &vaddr, &pfn);
drivers/md/dm-pcache/cache_dev.c
96
if (!pfn_valid(pfn)) {
drivers/md/dm-stripe.c
319
unsigned long *pfn)
drivers/md/dm-stripe.c
323
return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
drivers/md/dm-target.c
258
unsigned long *pfn)
drivers/md/dm-writecache.c
258
unsigned long pfn;
drivers/md/dm-writecache.c
286
&wc->memory_map, &pfn);
drivers/md/dm-writecache.c
292
if (!pfn_valid(pfn)) {
drivers/md/dm-writecache.c
311
p - i, DAX_ACCESS, NULL, &pfn);
drivers/md/dm-writecache.c
316
if (!pfn_valid(pfn)) {
drivers/md/dm-writecache.c
321
pages[i++] = pfn_to_page(pfn);
drivers/md/dm-writecache.c
322
pfn++;
drivers/md/dm.c
1212
unsigned long *pfn)
drivers/md/dm.c
1230
ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
drivers/media/pci/pt1/pt1.c
637
u32 first_pfn, pfn;
drivers/media/pci/pt1/pt1.c
655
ret = pt1_init_table(pt1, &tables[i], &pfn);
drivers/media/pci/pt1/pt1.c
658
tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
drivers/misc/genwqe/card_dev.c
438
unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
drivers/misc/genwqe/card_dev.c
467
pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
drivers/misc/genwqe/card_dev.c
470
pfn,
drivers/misc/sgi-gru/gruhandles.c
157
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
drivers/misc/sgi-gru/gruhandles.c
173
tfh->pfn = paddr >> GRU_PADDR_SHIFT;
drivers/misc/sgi-gru/gruhandles.h
289
unsigned long pfn:41; /* DW 3 */
drivers/misc/vmw_balloon.c
259
u64 pfn : 52;
drivers/misc/vmw_balloon.c
745
*p = pfn_to_page(b->batch_page[idx].pfn);
drivers/misc/vmw_balloon.c
781
unsigned long cmd, pfn;
drivers/misc/vmw_balloon.c
795
pfn = PHYS_PFN(virt_to_phys(b->batch_page));
drivers/misc/vmw_balloon.c
799
pfn = page_to_pfn(b->page);
drivers/misc/vmw_balloon.c
802
if (unlikely(pfn != (u32)pfn))
drivers/misc/vmw_balloon.c
806
return vmballoon_cmd(b, cmd, pfn, num_pages);
drivers/misc/vmw_balloon.c
825
{ .pfn = page_to_pfn(p) };
drivers/net/ethernet/cavium/liquidio/octeon_device.c
1140
octeon_dispatch_fn_t pfn;
drivers/net/ethernet/cavium/liquidio/octeon_device.c
1161
pfn = octeon_get_dispatch(oct, opcode, subcode);
drivers/net/ethernet/cavium/liquidio/octeon_device.c
1162
if (!pfn) {
drivers/net/ethernet/cavium/liquidio/octeon_device.c
1184
if (pfn == fn &&
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
697
u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
704
pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K);
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
707
HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
717
pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
721
HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
750
u64 cmdq_first_block_paddr, pfn;
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
755
pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
762
CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
766
pfn = CMDQ_PFN(cmdq_first_block_paddr);
drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
770
CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
608
static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
621
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
651
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
656
ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
661
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
666
ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
671
static int ehea_is_hugepage(unsigned long pfn)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
673
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
676
if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT)
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
686
unsigned long pfn, start_pfn, end_pfn, nr_pages;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
695
pfn = start_pfn;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
697
while (pfn < end_pfn) {
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
698
if (ehea_is_hugepage(pfn)) {
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
700
nr_pages = pfn - start_pfn;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
707
pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
708
start_pfn = pfn;
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
710
pfn += (EHEA_SECTSIZE / PAGE_SIZE);
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
714
nr_pages = pfn - start_pfn;
drivers/net/ethernet/ibm/ehea/ehea_qmr.h
384
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
drivers/net/ethernet/ibm/ehea/ehea_qmr.h
385
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
drivers/net/ethernet/mellanox/mlx4/en_main.c
288
mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
drivers/net/ethernet/mellanox/mlx4/main.c
2739
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx4/pd.c
159
uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
drivers/net/ethernet/mellanox/mlx4/pd.c
202
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
135
pfn = uar2pfn(mdev, up->index);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
137
up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
143
up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/uar.c
99
phys_addr_t pfn;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1011
pfn = pci_info[i].id;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1013
if (pfn >= ahw->max_vnic_func) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1016
__func__, pfn, ahw->max_vnic_func);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1025
if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1034
adapter->npars[j].pci_func = pfn;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
985
u8 pfn;
drivers/net/hyperv/netvsc.c
1034
char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
drivers/net/hyperv/netvsc.c
1050
pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
drivers/net/hyperv/netvsc.c
1083
mpb_entry->pfn_array[j] = pb[i].pfn + j;
drivers/net/hyperv/netvsc.c
968
char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
drivers/net/hyperv/netvsc_drv.c
344
pb[0].pfn = virt_to_hvpfn(hdr);
drivers/net/hyperv/netvsc_drv.c
349
pb[1].pfn = virt_to_hvpfn(skb->data);
drivers/net/hyperv/netvsc_drv.c
354
u64 pfn = page_to_hvpfn(skb_frag_page(frag));
drivers/net/hyperv/netvsc_drv.c
359
cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
drivers/net/hyperv/rndis_filter.c
237
pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
201
struct brcmf_pno_net_param_le pfn;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
204
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
205
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
206
pfn.wsec = cpu_to_le32(0);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
207
pfn.infra = cpu_to_le32(1);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
208
pfn.flags = 0;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
210
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
211
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
212
memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
215
err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
drivers/nvdimm/pmem.c
244
unsigned long *pfn)
drivers/nvdimm/pmem.c
255
if (pfn)
drivers/nvdimm/pmem.c
256
*pfn = PHYS_PFN(pmem->phys_addr + offset);
drivers/nvdimm/pmem.c
305
void **kaddr, unsigned long *pfn)
drivers/nvdimm/pmem.c
309
return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
drivers/nvdimm/pmem.c
434
unsigned long pfn, unsigned long nr_pages, int mf_flags)
drivers/nvdimm/pmem.c
438
u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
drivers/nvdimm/pmem.c
67
unsigned long pfn_start, pfn_end, pfn;
drivers/nvdimm/pmem.c
75
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
drivers/nvdimm/pmem.c
76
struct page *page = pfn_to_page(pfn);
drivers/nvdimm/pmem.c
84
clear_mce_nospec(pfn);
drivers/nvdimm/pmem.h
32
unsigned long *pfn);
drivers/nvdimm/region_devs.c
35
unsigned long pfn = PHYS_PFN(res->start);
drivers/nvdimm/region_devs.c
43
if (pfn == pfn_j)
drivers/nvdimm/region_devs.c
53
PFN_PHYS(pfn), PAGE_SIZE);
drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
35
#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(pfn) (0x4000 * (pfn))
drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
36
#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(pfn) ((0x4000 * (pfn)) + 0x04)
drivers/platform/x86/intel/pmt/class.c
118
unsigned long pfn = PFN_DOWN(phys);
drivers/platform/x86/intel/pmt/class.c
124
psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
drivers/platform/x86/intel/pmt/class.c
131
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
drivers/ras/cec.c
186
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
drivers/ras/cec.c
196
if (this_pfn < pfn)
drivers/ras/cec.c
198
else if (this_pfn > pfn)
drivers/ras/cec.c
200
else if (this_pfn == pfn) {
drivers/ras/cec.c
223
static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
drivers/ras/cec.c
231
return __find_elem(ca, pfn, to);
drivers/ras/cec.c
271
u64 pfn;
drivers/ras/cec.c
277
pfn = del_lru_elem_unlocked(ca);
drivers/ras/cec.c
280
return pfn;
drivers/ras/cec.c
321
static int cec_add_elem(u64 pfn)
drivers/ras/cec.c
342
err = find_elem(ca, pfn, &to);
drivers/ras/cec.c
351
ca->array[to] = pfn << PAGE_SHIFT;
drivers/ras/cec.c
362
u64 pfn = ca->array[to] >> PAGE_SHIFT;
drivers/ras/cec.c
364
if (!pfn_valid(pfn)) {
drivers/ras/cec.c
365
pr_warn("CEC: Invalid pfn: 0x%llx\n", pfn);
drivers/ras/cec.c
368
pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
drivers/ras/cec.c
369
memory_failure_queue(pfn, MF_SOFT_OFFLINE);
drivers/ras/cec.c
483
struct dentry *d, *pfn, *decay, *count, *array, *dfs;
drivers/ras/cec.c
514
pfn = debugfs_create_file("pfn", S_IRUSR | S_IWUSR, d, &dfs_pfn, &pfn_ops);
drivers/ras/cec.c
515
if (!pfn) {
drivers/s390/block/dcssblk.c
34
unsigned long *pfn);
drivers/s390/block/dcssblk.c
935
long nr_pages, void **kaddr, unsigned long *pfn)
drivers/s390/block/dcssblk.c
943
if (pfn)
drivers/s390/block/dcssblk.c
944
*pfn = PFN_DOWN(dev_info->start + offset);
drivers/s390/block/dcssblk.c
952
unsigned long *pfn)
drivers/s390/block/dcssblk.c
956
return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
drivers/s390/cio/vfio_ccw_cp.c
180
u64 pfn;
drivers/s390/cio/vfio_ccw_cp.c
184
pfn = pa->pa_iova[i] >> PAGE_SHIFT;
drivers/s390/cio/vfio_ccw_cp.c
185
if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
drivers/scsi/csiostor/csio_hw.c
1043
if (hw->pfn == mpfn)
drivers/scsi/csiostor/csio_hw.c
1045
hw->pfn, state_str);
drivers/scsi/csiostor/csio_hw.c
1049
hw->pfn, mpfn, state_str);
drivers/scsi/csiostor/csio_hw.c
1386
csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
drivers/scsi/csiostor/csio_hw.c
1802
hw->pfn, 0, 1, &param, &val, true,
drivers/scsi/csiostor/csio_hw.c
1944
csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
drivers/scsi/csiostor/csio_hw.c
2355
ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
drivers/scsi/csiostor/csio_hw.c
903
hw->pfn = src_pf;
drivers/scsi/csiostor/csio_hw.c
935
csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
drivers/scsi/csiostor/csio_hw.c
936
hw->pfn, CSIO_MASTER_MAY, NULL);
drivers/scsi/csiostor/csio_hw.c
952
if (hw->pfn == mpfn) {
drivers/scsi/csiostor/csio_hw.h
512
uint8_t pfn; /* Physical Function
drivers/scsi/csiostor/csio_hw_t5.c
300
win_pf = PFNUM_V(hw->pfn);
drivers/scsi/csiostor/csio_mb.c
1162
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
drivers/scsi/csiostor/csio_mb.c
1163
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
drivers/scsi/csiostor/csio_mb.c
1196
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
drivers/scsi/csiostor/csio_mb.c
1197
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
drivers/scsi/csiostor/csio_mb.c
1221
hw->pfn, *((uint8_t *)mbp->mb));
drivers/scsi/csiostor/csio_mb.c
1250
hw->pfn, *((uint8_t *)mbp->mb), owner);
drivers/scsi/csiostor/csio_mb.c
1257
hw->pfn, *((uint8_t *)mbp->mb),
drivers/scsi/csiostor/csio_mb.c
1274
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
drivers/scsi/csiostor/csio_mb.c
1310
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
drivers/scsi/csiostor/csio_mb.c
1338
hw->pfn, *((uint8_t *)cmd));
drivers/scsi/csiostor/csio_mb.c
1479
uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL_A);
drivers/scsi/csiostor/csio_mb.c
1480
uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA_A);
drivers/scsi/csiostor/csio_mb.c
1506
CSIO_DUMP_MB(hw, hw->pfn, data_reg);
drivers/scsi/csiostor/csio_mb.c
1593
csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
drivers/scsi/csiostor/csio_mb.c
1647
hw->pfn, FW_CMD_OP_G(ntohl(hdr->hi)));
drivers/scsi/csiostor/csio_mb.c
281
(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(hw->pfn));
drivers/scsi/csiostor/csio_mb.c
450
FW_IQ_CMD_PFN_V(iq_params->pfn) |
drivers/scsi/csiostor/csio_mb.c
507
FW_IQ_CMD_PFN_V(iq_params->pfn) |
drivers/scsi/csiostor/csio_mb.c
632
FW_IQ_CMD_PFN_V(iq_params->pfn) |
drivers/scsi/csiostor/csio_mb.c
667
FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
drivers/scsi/csiostor/csio_mb.c
715
FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
drivers/scsi/csiostor/csio_mb.c
818
FW_EQ_OFLD_CMD_PFN_V(eq_ofld_params->pfn) |
drivers/scsi/csiostor/csio_wr.c
453
iqp.pfn = hw->pfn;
drivers/scsi/csiostor/csio_wr.c
571
eqp.pfn = hw->pfn;
drivers/scsi/csiostor/csio_wr.c
645
iqp.pfn = hw->pfn;
drivers/scsi/csiostor/csio_wr.c
718
eqp.pfn = hw->pfn;
drivers/scsi/csiostor/csio_wr.h
116
uint8_t pfn:3;
drivers/scsi/csiostor/csio_wr.h
202
uint8_t pfn;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1028
unsigned int pfn;
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1050
pfn = page_to_pfn(bo->pages[i]);
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1051
if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
1054
virt, pfn, 1);
drivers/vdpa/alibaba/eni_vdpa.c
338
u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
drivers/vdpa/alibaba/eni_vdpa.c
340
vp_legacy_set_queue_address(ldev, qid, pfn);
drivers/vdpa/vdpa_user/iova_domain.c
163
unsigned long pfn = PFN_DOWN(orig);
drivers/vdpa/vdpa_user/iova_domain.c
171
page = pfn_to_page(pfn);
drivers/vdpa/vdpa_user/iova_domain.c
178
pfn++;
drivers/vdpa/vdpa_user/iova_domain.c
262
unsigned long pfn, bounce_pfns;
drivers/vdpa/vdpa_user/iova_domain.c
266
for (pfn = 0; pfn < bounce_pfns; pfn++) {
drivers/vdpa/vdpa_user/iova_domain.c
267
map = &domain->bounce_maps[pfn];
drivers/vdpa/vdpa_user/iova_domain.c
274
if (!((pfn << BOUNCE_MAP_SHIFT) & ~PAGE_MASK))
drivers/vdpa/vdpa_user/iova_domain.c
618
unsigned long pfn, bounce_pfns;
drivers/vdpa/vdpa_user/iova_domain.c
640
for (pfn = 0; pfn < bounce_pfns; pfn++) {
drivers/vdpa/vdpa_user/iova_domain.c
641
map = &domain->bounce_maps[pfn];
drivers/vfio/pci/nvgrace-gpu/main.c
108
if (pfn < start_pfn || pfn >= start_pfn + num_pages)
drivers/vfio/pci/nvgrace-gpu/main.c
111
*pfn_offset_in_region = pfn - start_pfn;
drivers/vfio/pci/nvgrace-gpu/main.c
120
unsigned long pfn,
drivers/vfio/pci/nvgrace-gpu/main.c
135
ret = pfn_memregion_offset(nvdev, index, pfn, &pfn_offset_in_region);
drivers/vfio/pci/nvgrace-gpu/main.c
154
unsigned long pfn, nr_pages;
drivers/vfio/pci/nvgrace-gpu/main.c
156
pfn = PHYS_PFN(region->memphys);
drivers/vfio/pci/nvgrace-gpu/main.c
159
region->pfn_address_space.node.start = pfn;
drivers/vfio/pci/nvgrace-gpu/main.c
160
region->pfn_address_space.node.last = pfn + nr_pages - 1;
drivers/vfio/pci/nvgrace-gpu/main.c
306
unsigned long pfn, addr;
drivers/vfio/pci/nvgrace-gpu/main.c
313
pfn = PHYS_PFN(memregion->memphys) + addr_to_pgoff(vma, addr);
drivers/vfio/pci/nvgrace-gpu/main.c
315
if (is_aligned_for_order(vma, addr, pfn, order)) {
drivers/vfio/pci/nvgrace-gpu/main.c
321
ret = vfio_pci_vmf_insert_pfn(vdev, vmf, pfn, order);
drivers/vfio/pci/nvgrace-gpu/main.c
327
__func__, order, pfn,
drivers/vfio/pci/nvgrace-gpu/main.c
95
unsigned long pfn,
drivers/vfio/pci/vfio_pci_core.c
1665
unsigned long pfn,
drivers/vfio/pci/vfio_pci_core.c
1674
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
drivers/vfio/pci/vfio_pci_core.c
1677
return vmf_insert_pfn_pmd(vmf, pfn, false);
drivers/vfio/pci/vfio_pci_core.c
1680
return vmf_insert_pfn_pud(vmf, pfn, false);
drivers/vfio/pci/vfio_pci_core.c
1693
unsigned long pfn = vma_to_pfn(vma) + pgoff;
drivers/vfio/pci/vfio_pci_core.c
1696
if (is_aligned_for_order(vma, addr, pfn, order)) {
drivers/vfio/pci/vfio_pci_core.c
1698
ret = vfio_pci_vmf_insert_pfn(vdev, vmf, pfn, order);
drivers/vfio/vfio_iommu_type1.c
131
unsigned long pfn; /* Host pfn */
drivers/vfio/vfio_iommu_type1.c
155
static int put_pfn(unsigned long pfn, int prot);
drivers/vfio/vfio_iommu_type1.c
1555
unsigned long pfn, long npage, int prot)
drivers/vfio/vfio_iommu_type1.c
1561
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
drivers/vfio/vfio_iommu_type1.c
1589
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
drivers/vfio/vfio_iommu_type1.c
1597
size >> PAGE_SHIFT, &pfn, limit,
drivers/vfio/vfio_iommu_type1.c
1606
ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
drivers/vfio/vfio_iommu_type1.c
1609
vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
drivers/vfio/vfio_iommu_type1.c
1859
unsigned long pfn;
drivers/vfio/vfio_iommu_type1.c
1866
&pfn, limit,
drivers/vfio/vfio_iommu_type1.c
1874
phys = pfn << PAGE_SHIFT;
drivers/vfio/vfio_iommu_type1.c
2453
if (!is_invalid_reserved_pfn(vpfn->pfn))
drivers/vfio/vfio_iommu_type1.c
387
unsigned long pfn)
drivers/vfio/vfio_iommu_type1.c
396
vpfn->pfn = pfn;
drivers/vfio/vfio_iommu_type1.c
425
ret = put_pfn(vpfn->pfn, dma->prot);
drivers/vfio/vfio_iommu_type1.c
473
static bool is_invalid_reserved_pfn(unsigned long pfn)
drivers/vfio/vfio_iommu_type1.c
475
if (pfn_valid(pfn))
drivers/vfio/vfio_iommu_type1.c
476
return PageReserved(pfn_to_page(pfn));
drivers/vfio/vfio_iommu_type1.c
481
static int put_pfn(unsigned long pfn, int prot)
drivers/vfio/vfio_iommu_type1.c
483
if (!is_invalid_reserved_pfn(pfn)) {
drivers/vfio/vfio_iommu_type1.c
484
struct page *page = pfn_to_page(pfn);
drivers/vfio/vfio_iommu_type1.c
527
unsigned long pfn = page_to_pfn(batch->pages[batch->offset]);
drivers/vfio/vfio_iommu_type1.c
529
put_pfn(pfn, dma->prot);
drivers/vfio/vfio_iommu_type1.c
542
unsigned long vaddr, unsigned long *pfn,
drivers/vfio/vfio_iommu_type1.c
570
*pfn = args.pfn;
drivers/vfio/vfio_iommu_type1.c
586
unsigned long npages, int prot, unsigned long *pfn,
drivers/vfio/vfio_iommu_type1.c
601
*pfn = page_to_pfn(batch->pages[0]);
drivers/vfio/vfio_iommu_type1.c
617
ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
drivers/vfio/vfio_iommu_type1.c
623
if (is_invalid_reserved_pfn(*pfn)) {
drivers/vfio/vfio_iommu_type1.c
626
epfn = (*pfn | (~addr_mask >> PAGE_SHIFT)) + 1;
drivers/vfio/vfio_iommu_type1.c
627
ret = min_t(long, npages, epfn - *pfn);
drivers/vfio/vfio_iommu_type1.c
680
unsigned long pfn;
drivers/vfio/vfio_iommu_type1.c
693
pfn = *pfn_base;
drivers/vfio/vfio_iommu_type1.c
713
&pfn, batch);
drivers/vfio/vfio_iommu_type1.c
718
*pfn_base = pfn;
drivers/vfio/vfio_iommu_type1.c
724
if (pfn != *pfn_base + pinned || !rsvd)
drivers/vfio/vfio_iommu_type1.c
746
if (pfn != *pfn_base + pinned ||
drivers/vfio/vfio_iommu_type1.c
747
rsvd != is_invalid_reserved_pfn(pfn))
drivers/vfio/vfio_iommu_type1.c
789
pfn = page_to_pfn(batch->pages[batch->offset]);
drivers/vfio/vfio_iommu_type1.c
800
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
drivers/vfio/vfio_iommu_type1.c
801
put_pfn(pfn, dma->prot);
drivers/vfio/vfio_iommu_type1.c
819
unsigned long pfn, unsigned long npage,
drivers/vfio/vfio_iommu_type1.c
828
if (put_pfn(pfn++, dma->prot))
drivers/vfio/vfio_iommu_type1.c
831
put_valid_unreserved_pfns(pfn, npage, dma->prot);
drivers/vfio/vfio_iommu_type1.c
960
pages[i] = pfn_to_page(vpfn->pfn);
drivers/vfio/vfio_main.c
438
void (*pfn)(struct kvm *kvm);
drivers/vfio/vfio_main.c
447
pfn = symbol_get(kvm_put_kvm);
drivers/vfio/vfio_main.c
448
if (WARN_ON(!pfn))
drivers/vfio/vfio_main.c
464
device->put_kvm = pfn;
drivers/vhost/vdpa.c
1195
unsigned long pfn;
drivers/vhost/vdpa.c
1207
for (pfn = map_pfn; pfn <= last_pfn; pfn++)
drivers/vhost/vdpa.c
1208
unpin_user_page(pfn_to_page(pfn));
drivers/vhost/vdpa.c
926
unsigned long pfn, pinned;
drivers/vhost/vdpa.c
930
for (pfn = PFN_DOWN(map->addr);
drivers/vhost/vdpa.c
931
pinned > 0; pfn++, pinned--) {
drivers/vhost/vdpa.c
932
page = pfn_to_page(pfn);
drivers/vhost/vringh.c
1079
u64 pfn = io_addr >> PAGE_SHIFT;
drivers/vhost/vringh.c
1082
bvec_set_page(&bvec[ret], pfn_to_page(pfn), io_len,
drivers/virt/acrn/mm.c
197
cur_pfn = args.pfn;
drivers/virtio/virtio_balloon.c
139
unsigned long pfn = page_to_pfn(page);
drivers/virtio/virtio_balloon.c
143
return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
drivers/virtio/virtio_mem.c
1144
static void virtio_mem_set_fake_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
1148
for (; nr_pages--; pfn++) {
drivers/virtio/virtio_mem.c
1149
struct page *page = pfn_to_page(pfn);
drivers/virtio/virtio_mem.c
1169
static void virtio_mem_clear_fake_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
1172
for (; nr_pages--; pfn++) {
drivers/virtio/virtio_mem.c
1173
struct page *page = pfn_to_page(pfn);
drivers/virtio/virtio_mem.c
1187
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
drivers/virtio/virtio_mem.c
1197
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
drivers/virtio/virtio_mem.c
1201
struct page *page = pfn_to_page(pfn + i);
drivers/virtio/virtio_mem.c
1210
virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
drivers/virtio/virtio_mem.c
1213
virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
drivers/virtio/virtio_mem.c
1214
free_contig_range(pfn + i, 1 << order);
drivers/virtio/virtio_mem.c
1224
static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
drivers/virtio/virtio_mem.c
1227
const bool is_movable = is_zone_movable_page(pfn_to_page(pfn));
drivers/virtio/virtio_mem.c
1246
rc = alloc_contig_range(pfn, pfn + nr_pages, ACR_FLAGS_NONE,
drivers/virtio/virtio_mem.c
1256
virtio_mem_set_fake_offline(pfn, nr_pages, true);
drivers/virtio/virtio_mem.c
1257
adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
drivers/virtio/virtio_mem.c
1268
static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
1276
page = pfn_to_page(pfn + i);
drivers/virtio/virtio_mem.c
1286
static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
1296
page_ref_inc(pfn_to_page(pfn + i));
drivers/virtio/virtio_mem.c
1723
unsigned long pfn, nr_pages;
drivers/virtio/virtio_mem.c
1748
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
drivers/virtio/virtio_mem.c
1751
virtio_mem_fake_online(pfn, nr_pages);
drivers/virtio/virtio_mem.c
2164
unsigned long pfn;
drivers/virtio/virtio_mem.c
2181
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
drivers/virtio/virtio_mem.c
2182
page = pfn_to_online_page(pfn);
drivers/virtio/virtio_mem.c
2186
rc = virtio_mem_fake_offline(vm, pfn, PAGES_PER_SECTION);
drivers/virtio/virtio_mem.c
2188
end_pfn = pfn;
drivers/virtio/virtio_mem.c
2210
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
drivers/virtio/virtio_mem.c
2211
page = pfn_to_online_page(pfn);
drivers/virtio/virtio_mem.c
2214
virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
drivers/virtio/virtio_mem.c
2229
unsigned long pfn;
drivers/virtio/virtio_mem.c
2231
for (pfn = start_pfn; pfn < start_pfn + nr_pages;
drivers/virtio/virtio_mem.c
2232
pfn += PAGES_PER_SECTION) {
drivers/virtio/virtio_mem.c
2233
if (pfn_to_online_page(pfn))
drivers/virtio/virtio_mem.c
2249
unsigned long pfn;
drivers/virtio/virtio_mem.c
2251
for (pfn = start_pfn; pfn < start_pfn + nr_pages;
drivers/virtio/virtio_mem.c
2252
pfn += PAGES_PER_SECTION) {
drivers/virtio/virtio_mem.c
2253
page = pfn_to_online_page(pfn);
drivers/virtio/virtio_mem.c
2696
unsigned long pfn)
drivers/virtio/virtio_mem.c
2700
uint64_t addr = PFN_PHYS(pfn);
drivers/virtio/virtio_mem.c
281
static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
283
static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
drivers/virtio/virtio_mem.c
931
unsigned long pfn;
drivers/virtio/virtio_mem.c
937
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
drivers/virtio/virtio_mem.c
939
virtio_mem_fake_offline_going_offline(pfn, nr_pages);
drivers/virtio/virtio_mem.c
947
unsigned long pfn;
drivers/virtio/virtio_mem.c
953
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
drivers/virtio/virtio_mem.c
955
virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
drivers/virtio/virtio_mem.c
961
unsigned long pfn,
drivers/virtio/virtio_mem.c
971
virtio_mem_fake_offline_going_offline(pfn, nr_pages);
drivers/virtio/virtio_mem.c
976
unsigned long pfn,
drivers/virtio/virtio_mem.c
982
virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
drivers/xen/balloon.c
306
unsigned long pfn, i;
drivers/xen/balloon.c
308
pfn = PFN_DOWN(resource->start);
drivers/xen/balloon.c
310
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
drivers/xen/balloon.c
685
unsigned long pfn, extra_pfn_end;
drivers/xen/balloon.c
702
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
drivers/xen/balloon.c
703
balloon_append(pfn_to_page(pfn));
drivers/xen/gntdev-dmabuf.c
628
unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
drivers/xen/gntdev-dmabuf.c
630
gfns[i++] = pfn_to_gfn(pfn);
drivers/xen/grant-dma-ops.c
108
pfn_to_gfn(pfn + i), 0);
drivers/xen/grant-dma-ops.c
84
unsigned long pfn;
drivers/xen/grant-dma-ops.c
99
pfn = virt_to_pfn(ret);
drivers/xen/grant-table.c
1062
unsigned long pfn, start_pfn;
drivers/xen/grant-table.c
1084
for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
drivers/xen/grant-table.c
1085
pfn++, i++) {
drivers/xen/grant-table.c
1086
struct page *page = pfn_to_page(pfn);
drivers/xen/grant-table.c
1465
xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
drivers/xen/grant-table.c
820
xen_pfn_t *pfn;
drivers/xen/grant-table.c
834
pfn = kzalloc_objs(pfn[0], max_nr_gframes);
drivers/xen/grant-table.c
835
if (!pfn) {
drivers/xen/grant-table.c
840
pfn[i] = XEN_PFN_DOWN(addr) + i;
drivers/xen/grant-table.c
843
xen_auto_xlat_grant_frames.pfn = pfn;
drivers/xen/grant-table.c
854
kfree(xen_auto_xlat_grant_frames.pfn);
drivers/xen/grant-table.c
857
xen_auto_xlat_grant_frames.pfn = NULL;
drivers/xen/mem-reservation.c
37
unsigned long pfn = page_to_pfn(page);
drivers/xen/mem-reservation.c
48
set_phys_to_machine(pfn, frames[i]);
drivers/xen/mem-reservation.c
51
(unsigned long)__va(pfn << PAGE_SHIFT),
drivers/xen/mem-reservation.c
65
unsigned long pfn = page_to_pfn(page);
drivers/xen/mem-reservation.c
75
(unsigned long)__va(pfn << PAGE_SHIFT),
drivers/xen/mem-reservation.c
79
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
drivers/xen/privcmd.c
809
xen_pfn_t pfn =
drivers/xen/privcmd.c
812
pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
drivers/xen/unpopulated-alloc.c
112
xen_pfn_t pfn = PFN_DOWN(res->start);
drivers/xen/unpopulated-alloc.c
115
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
drivers/xen/xen-scsiback.c
208
unsigned long pfn = page_to_pfn(page);
drivers/xen/xen-scsiback.c
210
return (unsigned long)pfn_to_kaddr(pfn);
fs/dax.c
1042
const struct iomap_iter *iter, void *entry, unsigned long pfn,
fs/dax.c
1046
void *new_entry = dax_make_entry(pfn, flags);
fs/dax.c
1102
unsigned long pfn, index, count, end;
fs/dax.c
1159
pfn = dax_to_pfn(entry);
fs/dax.c
1167
pfn_mkclean_range(pfn, count, index, vma);
fs/dax.c
1172
dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
fs/dax.c
1362
unsigned long pfn = zero_pfn(vaddr);
fs/dax.c
1365
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
fs/dax.c
1367
ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), false);
fs/dax.c
1757
unsigned long pfn)
fs/dax.c
1761
*pfnp = pfn;
fs/dax.c
1816
unsigned long pfn;
fs/dax.c
1835
err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
fs/dax.c
1839
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
fs/dax.c
1849
return dax_fault_synchronous_pfnp(pfnp, pfn);
fs/dax.c
1853
ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn), write);
fs/dax.c
1855
ret = vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn), write);
fs/dax.c
2093
unsigned long pfn, unsigned int order)
fs/dax.c
2115
folio = pfn_folio(pfn);
fs/dax.c
2142
unsigned long pfn)
fs/dax.c
2151
return dax_insert_pfn_mkwrite(vmf, pfn, order);
fs/dax.c
77
static void *dax_make_entry(unsigned long pfn, unsigned long flags)
fs/dax.c
79
return xa_mk_value(flags | (pfn << DAX_SHIFT));
fs/ext4/file.c
745
unsigned long pfn;
fs/ext4/file.c
762
result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops);
fs/ext4/file.c
771
result = dax_finish_sync_fault(vmf, order, pfn);
fs/fuse/dax.c
759
unsigned long pfn;
fs/fuse/dax.c
778
ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops);
fs/fuse/dax.c
787
ret = dax_finish_sync_fault(vmf, order, pfn);
fs/fuse/virtio_fs.c
1008
void **kaddr, unsigned long *pfn)
fs/fuse/virtio_fs.c
1016
if (pfn)
fs/fuse/virtio_fs.c
1017
*pfn = PHYS_PFN(fs->window_phys_addr + offset);
fs/hugetlbfs/inode.c
348
unsigned long addr, unsigned long pfn)
fs/hugetlbfs/inode.c
360
if (pte_pfn(pte) == pfn)
fs/hugetlbfs/inode.c
405
unsigned long pfn = folio_pfn(folio);
fs/hugetlbfs/inode.c
421
if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
fs/hugetlbfs/inode.c
471
if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
fs/proc/kcore.c
163
unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
fs/proc/kcore.c
169
start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
fs/proc/kcore.c
170
end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
fs/proc/kcore.c
202
kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
fs/proc/kcore.c
208
if (!pfn_valid(pfn))
fs/proc/kcore.c
211
p = pfn_to_page(pfn);
fs/proc/kcore.c
487
unsigned long pfn;
fs/proc/kcore.c
557
pfn = phys >> PAGE_SHIFT;
fs/proc/kcore.c
558
page = pfn_to_online_page(pfn);
fs/proc/kcore.c
566
is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
fs/proc/kcore.c
567
pfn_is_unaccepted_memory(pfn)) {
fs/proc/kcore.c
77
static int (*mem_pfn_is_ram)(unsigned long pfn);
fs/proc/kcore.c
79
int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
fs/proc/kcore.c
87
static int pfn_is_ram(unsigned long pfn)
fs/proc/kcore.c
90
return mem_pfn_is_ram(pfn);
fs/proc/page.c
110
pfn++;
fs/proc/page.c
194
} else if (is_huge_zero_pfn(ps.pfn)) {
fs/proc/page.c
197
} else if (is_zero_pfn(ps.pfn)) {
fs/proc/page.c
69
unsigned long pfn;
fs/proc/page.c
73
pfn = src / KPMSIZE;
fs/proc/page.c
85
page = pfn_to_online_page(pfn);
fs/proc/vmcore.c
118
static bool pfn_is_ram(unsigned long pfn)
fs/proc/vmcore.c
127
ret = cb->pfn_is_ram(cb, pfn);
fs/proc/vmcore.c
162
unsigned long pfn, offset;
fs/proc/vmcore.c
171
pfn = (unsigned long)(*ppos / PAGE_SIZE);
fs/proc/vmcore.c
181
if (!pfn_is_ram(pfn)) {
fs/proc/vmcore.c
185
tmp = copy_oldmem_page_encrypted(iter, pfn,
fs/proc/vmcore.c
189
tmp = copy_oldmem_page(iter, pfn, nr_bytes,
fs/proc/vmcore.c
200
++pfn;
fs/proc/vmcore.c
253
unsigned long from, unsigned long pfn,
fs/proc/vmcore.c
257
return remap_pfn_range(vma, from, pfn, size, prot);
fs/proc/vmcore.c
264
unsigned long pfn, size_t csize, unsigned long offset)
fs/proc/vmcore.c
266
return copy_oldmem_page(iter, pfn, csize, offset);
fs/proc/vmcore.c
523
unsigned long from, unsigned long pfn,
fs/proc/vmcore.c
531
pos_start = pfn;
fs/proc/vmcore.c
532
pos_end = pfn + (size >> PAGE_SHIFT);
fs/proc/vmcore.c
573
unsigned long from, unsigned long pfn,
fs/proc/vmcore.c
584
ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
fs/proc/vmcore.c
586
ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
fs/proc/vmcore.c
612
u64 pfn;
fs/proc/vmcore.c
615
pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
fs/proc/vmcore.c
616
if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
fs/ramfs/file-nommu.c
206
unsigned long maxpages, lpages, nr_folios, loop, ret, nr_pages, pfn;
fs/ramfs/file-nommu.c
236
pfn = folio_pfn(fbatch.folios[0]);
fs/ramfs/file-nommu.c
240
if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) {
fs/xfs/xfs_file.c
1878
unsigned long pfn;
fs/xfs/xfs_file.c
1884
ret = dax_iomap_fault(vmf, order, &pfn, NULL,
fs/xfs/xfs_file.c
1889
ret = dax_finish_sync_fault(vmf, order, pfn);
include/asm-generic/cacheflush.h
45
unsigned long pfn)
include/asm-generic/io.h
1283
extern int devmem_is_allowed(unsigned long pfn);
include/asm-generic/memory_model.h
18
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
include/asm-generic/memory_model.h
26
static inline int pfn_valid(unsigned long pfn)
include/asm-generic/memory_model.h
30
return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
include/asm-generic/memory_model.h
35
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
include/asm-generic/memory_model.h
36
for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
include/asm-generic/memory_model.h
37
(pfn) < min_t(unsigned long, (end_pfn), \
include/asm-generic/memory_model.h
39
(pfn)++)
include/asm-generic/memory_model.h
46
#define __pfn_to_page(pfn) (vmemmap + (pfn))
include/asm-generic/memory_model.h
60
#define __pfn_to_page(pfn) \
include/asm-generic/memory_model.h
61
({ unsigned long __pfn = (pfn); \
include/asm-generic/memory_model.h
71
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
include/drm/drm_pagemap.h
206
unsigned long npages, unsigned long *pfn);
include/hyperv/hvgdk_mini.h
202
u64 pfn : 52;
include/hyperv/hvgdk_mini.h
430
u64 pfn : 52;
include/linux/crash_dump.h
121
bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
include/linux/crash_dump.h
28
unsigned long from, unsigned long pfn,
include/linux/crash_dump.h
31
ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
include/linux/crash_dump.h
33
ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
include/linux/dax.h
246
enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
include/linux/dax.h
263
unsigned int order, unsigned long pfn);
include/linux/device-mapper.h
159
unsigned long *pfn);
include/linux/gfp.h
466
void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages);
include/linux/gfp.h
467
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
include/linux/highmem-internal.h
121
static inline void *kmap_atomic_pfn(unsigned long pfn)
include/linux/highmem-internal.h
129
return __kmap_local_pfn_prot(pfn, kmap_prot);
include/linux/highmem-internal.h
206
static inline void *kmap_local_pfn(unsigned long pfn)
include/linux/highmem-internal.h
208
return kmap_local_page(pfn_to_page(pfn));
include/linux/highmem-internal.h
233
static inline void *kmap_atomic_pfn(unsigned long pfn)
include/linux/highmem-internal.h
235
return kmap_atomic(pfn_to_page(pfn));
include/linux/highmem-internal.h
9
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
include/linux/highmem-internal.h
95
static inline void *kmap_local_pfn(unsigned long pfn)
include/linux/highmem-internal.h
97
return __kmap_local_pfn_prot(pfn, kmap_prot);
include/linux/huge_mm.h
40
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
include/linux/huge_mm.h
42
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
include/linux/huge_mm.h
535
static inline bool is_huge_zero_pfn(unsigned long pfn)
include/linux/huge_mm.h
537
return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
include/linux/huge_mm.h
720
static inline bool is_huge_zero_pfn(unsigned long pfn)
include/linux/hugetlb.h
156
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
include/linux/hugetlb.h
425
static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
include/linux/hyperv.h
608
u64 pfn[];
include/linux/hyperv.h
80
u64 pfn;
include/linux/io.h
187
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
include/linux/io.h
189
u64 from = ((u64)pfn) << PAGE_SHIFT;
include/linux/io.h
194
if (!devmem_is_allowed(pfn))
include/linux/io.h
197
pfn++;
include/linux/io.h
202
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
include/linux/iova.h
103
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
include/linux/iova.h
115
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
include/linux/iova.h
132
unsigned long pfn,
include/linux/iova.h
159
unsigned long pfn)
include/linux/iova.h
89
void free_iova(struct iova_domain *iovad, unsigned long pfn);
include/linux/iova.h
94
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
include/linux/kcore.h
26
extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
include/linux/kho/abi/memfd.h
54
u64 pfn:52;
include/linux/kho_radix_tree.h
42
int kho_radix_add_page(struct kho_radix_tree *tree, unsigned long pfn,
include/linux/kho_radix_tree.h
45
void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn,
include/linux/kho_radix_tree.h
53
static inline int kho_radix_add_page(struct kho_radix_tree *tree, long pfn,
include/linux/kho_radix_tree.h
60
unsigned long pfn, unsigned int order) { }
include/linux/kvm_host.h
107
static inline bool is_error_pfn(kvm_pfn_t pfn)
include/linux/kvm_host.h
109
return !!(pfn & KVM_PFN_ERR_MASK);
include/linux/kvm_host.h
116
static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
include/linux/kvm_host.h
118
return pfn == KVM_PFN_ERR_SIGPENDING;
include/linux/kvm_host.h
126
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
include/linux/kvm_host.h
128
return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
include/linux/kvm_host.h
132
static inline bool is_noslot_pfn(kvm_pfn_t pfn)
include/linux/kvm_host.h
134
return pfn == KVM_PFN_NOSLOT;
include/linux/kvm_host.h
1914
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
include/linux/kvm_host.h
1916
return (hpa_t)pfn << PAGE_SHIFT;
include/linux/kvm_host.h
2542
gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
include/linux/kvm_host.h
2547
kvm_pfn_t *pfn, struct page **page,
include/linux/kvm_host.h
2556
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
include/linux/kvm_host.h
2581
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
include/linux/kvm_host.h
295
kvm_pfn_t pfn;
include/linux/kvm_types.h
94
kvm_pfn_t pfn;
include/linux/memblock.h
308
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
include/linux/memory-failure.h
11
unsigned long pfn, pgoff_t *pgoff);
include/linux/memory.h
188
static inline unsigned long pfn_to_block_id(unsigned long pfn)
include/linux/memory.h
190
return memory_block_id(pfn_to_section_nr(pfn));
include/linux/memory_hotplug.h
109
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
include/linux/memory_hotplug.h
111
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
include/linux/memory_hotplug.h
112
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
include/linux/memory_hotplug.h
180
#define pfn_to_online_page(pfn) \
include/linux/memory_hotplug.h
183
if (pfn_valid(pfn)) \
include/linux/memory_hotplug.h
184
___page = pfn_to_page(pfn); \
include/linux/memory_hotplug.h
306
extern int sparse_add_section(int nid, unsigned long pfn,
include/linux/memory_hotplug.h
309
extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
include/linux/memory_hotplug.h
32
struct page *pfn_to_online_page(unsigned long pfn);
include/linux/memremap.h
100
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
include/linux/memremap.h
233
struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
include/linux/memremap.h
234
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
include/linux/memremap.h
282
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
include/linux/memremap.h
287
static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
include/linux/migrate.h
146
static inline unsigned long migrate_pfn(unsigned long pfn)
include/linux/migrate.h
148
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
include/linux/mlx4/device.h
711
unsigned long pfn;
include/linux/mm.h
1788
extern int page_is_ram(unsigned long pfn);
include/linux/mm.h
2530
static inline struct folio *pfn_folio(unsigned long pfn)
include/linux/mm.h
2532
return page_folio(pfn_to_page(pfn));
include/linux/mm.h
2728
unsigned long node, unsigned long pfn)
include/linux/mm.h
2733
set_page_section(page, pfn_to_section_nr(pfn));
include/linux/mm.h
3119
unsigned long pfn;
include/linux/mm.h
3997
static inline int early_pfn_to_nid(unsigned long pfn)
include/linux/mm.h
4003
extern int __meminit early_pfn_to_nid(unsigned long pfn);
include/linux/mm.h
4497
unsigned long pfn, unsigned long size, pgprot_t pgprot);
include/linux/mm.h
4512
unsigned long pfn);
include/linux/mm.h
4514
unsigned long pfn, pgprot_t pgprot);
include/linux/mm.h
4516
unsigned long pfn);
include/linux/mm.h
4518
unsigned long addr, unsigned long pfn);
include/linux/mm.h
4535
static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
include/linux/mm.h
4538
return pfn;
include/linux/mm.h
4546
const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
include/linux/mm.h
4549
return remap_pfn_range(vma, addr, pfn, size, prot);
include/linux/mm.h
4858
struct page * __populate_section_memmap(unsigned long pfn,
include/linux/mm.h
4966
extern int memory_failure(unsigned long pfn, int flags);
include/linux/mm.h
4967
extern int unpoison_memory(unsigned long pfn);
include/linux/mm.h
4969
extern int soft_offline_page(unsigned long pfn, int flags);
include/linux/mm.h
4975
extern void memory_failure_queue(unsigned long pfn, int flags);
include/linux/mm.h
4976
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
include/linux/mm.h
4978
void num_poisoned_pages_inc(unsigned long pfn);
include/linux/mm.h
4979
void num_poisoned_pages_sub(unsigned long pfn, long i);
include/linux/mm.h
4981
static inline void memory_failure_queue(unsigned long pfn, int flags)
include/linux/mm.h
4985
static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
include/linux/mm.h
4991
static inline void num_poisoned_pages_inc(unsigned long pfn)
include/linux/mm.h
4995
static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
include/linux/mm.h
5001
extern void memblk_nr_poison_inc(unsigned long pfn);
include/linux/mm.h
5002
extern void memblk_nr_poison_sub(unsigned long pfn, long i);
include/linux/mm.h
5004
static inline void memblk_nr_poison_inc(unsigned long pfn)
include/linux/mm.h
5008
static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
include/linux/mm.h
5014
static inline int arch_memory_failure(unsigned long pfn, int flags)
include/linux/mm.h
5129
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
include/linux/mm.h
5131
return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
include/linux/mm.h
5219
unsigned long pfn;
include/linux/mm_types.h
807
unsigned long pfn;
include/linux/mmzone.h
1196
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
include/linux/mmzone.h
1198
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
include/linux/mmzone.h
156
# define is_migrate_cma_folio(folio, pfn) \
include/linux/mmzone.h
157
(get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
include/linux/mmzone.h
161
# define is_migrate_cma_folio(folio, pfn) false
include/linux/mmzone.h
1926
#define pfn_to_nid(pfn) (0)
include/linux/mmzone.h
1950
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
include/linux/mmzone.h
1952
return pfn >> PFN_SECTION_SHIFT;
include/linux/mmzone.h
1959
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
include/linux/mmzone.h
1960
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
include/linux/mmzone.h
1975
#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
include/linux/mmzone.h
1976
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
include/linux/mmzone.h
2184
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
include/linux/mmzone.h
2186
return __nr_to_section(pfn_to_section_nr(pfn));
include/linux/mmzone.h
2191
static inline int subsection_map_index(unsigned long pfn)
include/linux/mmzone.h
2193
return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
include/linux/mmzone.h
2197
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
include/linux/mmzone.h
2199
int idx = subsection_map_index(pfn);
include/linux/mmzone.h
2205
static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
include/linux/mmzone.h
2208
int idx = subsection_map_index(*pfn);
include/linux/mmzone.h
2222
*pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
include/linux/mmzone.h
2226
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
include/linux/mmzone.h
2231
static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
include/linux/mmzone.h
2252
static inline int pfn_valid(unsigned long pfn)
include/linux/mmzone.h
2263
if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
include/linux/mmzone.h
2266
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
include/linux/mmzone.h
2268
ms = __pfn_to_section(pfn);
include/linux/mmzone.h
2278
ret = early_section(ms) || pfn_section_valid(ms, pfn);
include/linux/mmzone.h
2285
static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn)
include/linux/mmzone.h
2287
unsigned long nr = pfn_to_section_nr(pfn);
include/linux/mmzone.h
2291
while (nr <= __highest_present_section_nr && pfn < end_pfn) {
include/linux/mmzone.h
2292
struct mem_section *ms = __pfn_to_section(pfn);
include/linux/mmzone.h
2295
(early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
include/linux/mmzone.h
2297
return pfn;
include/linux/mmzone.h
2302
pfn = section_nr_to_pfn(nr);
include/linux/mmzone.h
2309
static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn)
include/linux/mmzone.h
2311
pfn++;
include/linux/mmzone.h
2313
if (pfn >= end_pfn)
include/linux/mmzone.h
2322
if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ?
include/linux/mmzone.h
2324
return pfn;
include/linux/mmzone.h
2326
return first_valid_pfn(pfn, end_pfn);
include/linux/mmzone.h
2337
static inline int pfn_in_present_section(unsigned long pfn)
include/linux/mmzone.h
2339
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
include/linux/mmzone.h
2341
return present_section(__pfn_to_section(pfn));
include/linux/mmzone.h
2365
#define pfn_to_nid(pfn) \
include/linux/mmzone.h
2367
unsigned long __pfn_to_nid_pfn = (pfn); \
include/linux/mmzone.h
2371
#define pfn_to_nid(pfn) (0)
include/linux/mmzone.h
37
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
include/linux/page_ext.h
129
unsigned long pfn)
include/linux/page_ext.h
132
iter->start_pfn = pfn;
include/linux/page_ext.h
133
iter->page_ext = page_ext_lookup(pfn);
include/linux/page_ext.h
148
unsigned long pfn;
include/linux/page_ext.h
154
pfn = iter->start_pfn + iter->index;
include/linux/page_ext.h
156
if (page_ext_iter_next_fast_possible(pfn))
include/linux/page_ext.h
159
iter->page_ext = page_ext_lookup(pfn);
include/linux/page_ext.h
98
extern struct page_ext *page_ext_lookup(unsigned long pfn);
include/linux/pageblock-flags.h
76
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
include/linux/pageblock-flags.h
77
#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
include/linux/pageblock-flags.h
78
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
include/linux/pageblock-flags.h
79
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
include/linux/pageblock-flags.h
85
unsigned long pfn);
include/linux/pageblock-flags.h
86
bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
include/linux/pageblock-flags.h
88
void set_pfnblock_bit(const struct page *page, unsigned long pfn,
include/linux/pageblock-flags.h
90
void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
include/linux/pgtable.h
1869
static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
include/linux/pgtable.h
1875
static inline int pfnmap_track(unsigned long pfn, unsigned long size,
include/linux/pgtable.h
1881
static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
include/linux/pgtable.h
1908
int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
include/linux/pgtable.h
1929
int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
include/linux/pgtable.h
1938
void pfnmap_untrack(unsigned long pfn, unsigned long size);
include/linux/pgtable.h
1951
static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
include/linux/pgtable.h
1953
pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
include/linux/pgtable.h
1971
static inline int is_zero_pfn(unsigned long pfn)
include/linux/pgtable.h
1974
unsigned long offset_from_zero_pfn = pfn - zero_page_pfn;
include/linux/pgtable.h
1982
static inline int is_zero_pfn(unsigned long pfn)
include/linux/pgtable.h
1986
return pfn == zero_page_pfn;
include/linux/pgtable.h
2157
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
include/linux/pgtable.h
2167
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
include/linux/rmap.h
865
unsigned long pfn;
include/linux/rmap.h
878
.pfn = folio_pfn(_folio), \
include/linux/rmap.h
932
unsigned long pfn, unsigned long nr_pages);
include/linux/rmap.h
934
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
include/linux/set_memory.h
62
int set_mce_nospec(unsigned long pfn);
include/linux/set_memory.h
63
int clear_mce_nospec(unsigned long pfn);
include/linux/set_memory.h
65
static inline int set_mce_nospec(unsigned long pfn)
include/linux/set_memory.h
69
static inline int clear_mce_nospec(unsigned long pfn)
include/linux/suspend.h
398
int pfn_is_nosave(unsigned long pfn);
include/linux/vfio_pci_core.h
181
struct vm_fault *vmf, unsigned long pfn,
include/linux/vfio_pci_core.h
229
unsigned long pfn,
include/linux/vfio_pci_core.h
234
!IS_ALIGNED(pfn, 1 << order)));
include/linux/vmalloc.h
115
u64 pfn, unsigned int max_page_shift)
include/rdma/ib_verbs.h
3059
unsigned long pfn, unsigned long size, pgprot_t prot,
include/trace/events/cma.h
106
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
include/trace/events/cma.h
109
TP_ARGS(name, pfn, page, count, align),
include/trace/events/cma.h
113
__field(unsigned long, pfn)
include/trace/events/cma.h
121
__entry->pfn = pfn;
include/trace/events/cma.h
129
__entry->pfn,
include/trace/events/cma.h
13
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
include/trace/events/cma.h
16
TP_ARGS(name, pfn, page, count),
include/trace/events/cma.h
20
__field(unsigned long, pfn)
include/trace/events/cma.h
27
__entry->pfn = pfn;
include/trace/events/cma.h
34
__entry->pfn,
include/trace/events/cma.h
72
TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
include/trace/events/cma.h
75
TP_ARGS(name, pfn, page, count, align, errorno),
include/trace/events/cma.h
79
__field(unsigned long, pfn)
include/trace/events/cma.h
88
__entry->pfn = pfn;
include/trace/events/cma.h
97
__entry->pfn,
include/trace/events/filemap.h
24
__field(unsigned long, pfn)
include/trace/events/filemap.h
31
__entry->pfn = folio_pfn(folio);
include/trace/events/filemap.h
44
__entry->pfn,
include/trace/events/huge_memory.h
122
__field(unsigned long, pfn)
include/trace/events/huge_memory.h
129
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/huge_memory.h
136
__entry->pfn,
include/trace/events/huge_memory.h
178
__field(unsigned long, pfn)
include/trace/events/huge_memory.h
187
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/huge_memory.h
196
__entry->pfn,
include/trace/events/huge_memory.h
65
__field(unsigned long, pfn)
include/trace/events/huge_memory.h
74
__entry->pfn = folio ? folio_pfn(folio) : -1;
include/trace/events/huge_memory.h
83
__entry->pfn,
include/trace/events/kmem.h
146
__field( unsigned long, pfn )
include/trace/events/kmem.h
151
__entry->pfn = page_to_pfn(page);
include/trace/events/kmem.h
156
pfn_to_page(__entry->pfn),
include/trace/events/kmem.h
157
__entry->pfn,
include/trace/events/kmem.h
168
__field( unsigned long, pfn )
include/trace/events/kmem.h
172
__entry->pfn = page_to_pfn(page);
include/trace/events/kmem.h
176
pfn_to_page(__entry->pfn),
include/trace/events/kmem.h
177
__entry->pfn)
include/trace/events/kmem.h
188
__field( unsigned long, pfn )
include/trace/events/kmem.h
195
__entry->pfn = page ? page_to_pfn(page) : -1UL;
include/trace/events/kmem.h
202
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
include/trace/events/kmem.h
203
__entry->pfn != -1UL ? __entry->pfn : 0,
include/trace/events/kmem.h
217
__field( unsigned long, pfn )
include/trace/events/kmem.h
224
__entry->pfn = page ? page_to_pfn(page) : -1UL;
include/trace/events/kmem.h
231
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
include/trace/events/kmem.h
232
__entry->pfn != -1UL ? __entry->pfn : 0,
include/trace/events/kmem.h
253
__field( unsigned long, pfn )
include/trace/events/kmem.h
259
__entry->pfn = page ? page_to_pfn(page) : -1UL;
include/trace/events/kmem.h
265
pfn_to_page(__entry->pfn), __entry->pfn,
include/trace/events/kmem.h
280
__field( unsigned long, pfn )
include/trace/events/kmem.h
289
__entry->pfn = page_to_pfn(page);
include/trace/events/kmem.h
299
pfn_to_page(__entry->pfn),
include/trace/events/kmem.h
300
__entry->pfn,
include/trace/events/ksm.h
132
TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err),
include/trace/events/ksm.h
134
TP_ARGS(pfn, rmap_item, mm, err),
include/trace/events/ksm.h
137
__field(unsigned long, pfn)
include/trace/events/ksm.h
144
__entry->pfn = pfn;
include/trace/events/ksm.h
151
__entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
include/trace/events/ksm.h
167
TP_PROTO(void *ksm_page, unsigned long pfn, void *rmap_item, void *mm, int err),
include/trace/events/ksm.h
169
TP_ARGS(ksm_page, pfn, rmap_item, mm, err),
include/trace/events/ksm.h
173
__field(unsigned long, pfn)
include/trace/events/ksm.h
181
__entry->pfn = pfn;
include/trace/events/ksm.h
189
__entry->pfn, __entry->rmap_item, __entry->mm, __entry->err)
include/trace/events/ksm.h
201
TP_PROTO(unsigned long pfn),
include/trace/events/ksm.h
203
TP_ARGS(pfn),
include/trace/events/ksm.h
206
__field(unsigned long, pfn)
include/trace/events/ksm.h
210
__entry->pfn = pfn;
include/trace/events/ksm.h
213
TP_printk("pfn %lu", __entry->pfn)
include/trace/events/ksm.h
228
TP_PROTO(unsigned long pfn, void *rmap_item, void *mm),
include/trace/events/ksm.h
230
TP_ARGS(pfn, rmap_item, mm),
include/trace/events/ksm.h
233
__field(unsigned long, pfn)
include/trace/events/ksm.h
239
__entry->pfn = pfn;
include/trace/events/ksm.h
245
__entry->pfn, __entry->rmap_item, __entry->mm)
include/trace/events/memory-failure.h
71
TP_PROTO(unsigned long pfn,
include/trace/events/memory-failure.h
75
TP_ARGS(pfn, type, result),
include/trace/events/memory-failure.h
78
__field(unsigned long, pfn)
include/trace/events/memory-failure.h
84
__entry->pfn = pfn;
include/trace/events/memory-failure.h
90
__entry->pfn,
include/trace/events/page_pool.h
53
__field(unsigned long, pfn)
include/trace/events/page_pool.h
60
__entry->pfn = netmem_pfn_trace(netmem);
include/trace/events/page_pool.h
65
__entry->netmem & NET_IOV, __entry->pfn, __entry->release)
include/trace/events/page_pool.h
79
__field(unsigned long, pfn)
include/trace/events/page_pool.h
86
__entry->pfn = netmem_pfn_trace(netmem);
include/trace/events/page_pool.h
91
__entry->netmem & NET_IOV, __entry->pfn, __entry->hold)
include/trace/events/page_ref.h
20
__field(unsigned long, pfn)
include/trace/events/page_ref.h
30
__entry->pfn = page_to_pfn(page);
include/trace/events/page_ref.h
40
__entry->pfn,
include/trace/events/page_ref.h
68
__field(unsigned long, pfn)
include/trace/events/page_ref.h
79
__entry->pfn = page_to_pfn(page);
include/trace/events/page_ref.h
90
__entry->pfn,
include/trace/events/pagemap.h
36
__field(unsigned long, pfn )
include/trace/events/pagemap.h
43
__entry->pfn = folio_pfn(folio);
include/trace/events/pagemap.h
51
__entry->pfn,
include/trace/events/pagemap.h
69
__field(unsigned long, pfn )
include/trace/events/pagemap.h
74
__entry->pfn = folio_pfn(folio);
include/trace/events/pagemap.h
77
TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
include/trace/events/vmscan.h
364
__field(unsigned long, pfn)
include/trace/events/vmscan.h
369
__entry->pfn = folio_pfn(folio);
include/trace/events/vmscan.h
375
pfn_to_page(__entry->pfn),
include/trace/events/vmscan.h
376
__entry->pfn,
include/trace/events/xen.h
287
TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
include/trace/events/xen.h
288
TP_ARGS(mm, pfn, level, pinned),
include/trace/events/xen.h
291
__field(unsigned long, pfn)
include/trace/events/xen.h
296
__entry->pfn = pfn;
include/trace/events/xen.h
300
__entry->mm, __entry->pfn, __entry->level,
include/trace/events/xen.h
305
TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
include/trace/events/xen.h
306
TP_ARGS(pfn, level, pinned),
include/trace/events/xen.h
308
__field(unsigned long, pfn)
include/trace/events/xen.h
312
TP_fast_assign(__entry->pfn = pfn;
include/trace/events/xen.h
316
__entry->pfn, __entry->level,
include/xen/arm/page.h
103
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
include/xen/arm/page.h
104
bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
include/xen/arm/page.h
107
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
include/xen/arm/page.h
109
return __set_phys_to_machine(pfn, mfn);
include/xen/arm/page.h
15
#define phys_to_machine_mapping_valid(pfn) (1)
include/xen/arm/page.h
43
unsigned long __pfn_to_mfn(unsigned long pfn);
include/xen/arm/page.h
47
static inline unsigned long pfn_to_gfn(unsigned long pfn)
include/xen/arm/page.h
49
return pfn;
include/xen/arm/page.h
58
static inline unsigned long pfn_to_bfn(unsigned long pfn)
include/xen/arm/page.h
63
mfn = __pfn_to_mfn(pfn);
include/xen/arm/page.h
68
return pfn;
include/xen/grant_table.h
215
xen_pfn_t *pfn;
include/xen/interface/hvm/hvm_op.h
45
uint64_t pfn;
include/xen/xen-ops.h
47
xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
include/xen/xen-ops.h
51
xen_pfn_t *pfn, int nr, int *err_ptr,
kernel/bpf/sysfs_btf.c
25
unsigned long pfn = addr >> PAGE_SHIFT;
kernel/bpf/sysfs_btf.c
36
if (pfn + pages < pfn)
kernel/bpf/sysfs_btf.c
43
return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
kernel/debug/kdb/kdb_support.c
347
unsigned long pfn;
kernel/debug/kdb/kdb_support.c
351
pfn = (addr >> PAGE_SHIFT);
kernel/debug/kdb/kdb_support.c
352
if (!pfn_valid(pfn))
kernel/debug/kdb/kdb_support.c
354
page = pfn_to_page(pfn);
kernel/dma/coherent.c
246
unsigned long pfn = mem->pfn_base + start + off;
kernel/dma/coherent.c
247
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
kernel/dma/direct.c
523
unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
kernel/dma/direct.c
537
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
kernel/dma/swiotlb.c
864
unsigned long pfn = PFN_DOWN(orig_addr);
kernel/dma/swiotlb.c
894
if (PageHighMem(pfn_to_page(pfn))) {
kernel/dma/swiotlb.c
904
page = pfn_to_page(pfn);
kernel/dma/swiotlb.c
921
pfn++;
kernel/iomem.c
31
unsigned long pfn = PHYS_PFN(offset);
kernel/iomem.c
34
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
kernel/kexec_core.c
357
unsigned long pfn, epfn, addr, eaddr;
kernel/kexec_core.c
362
pfn = page_to_boot_pfn(pages);
kernel/kexec_core.c
363
epfn = pfn + count;
kernel/kexec_core.c
364
addr = pfn << PAGE_SHIFT;
kernel/liveupdate/kexec_handover.c
1417
unsigned long pfn;
kernel/liveupdate/kexec_handover.c
1428
for (pfn = base_pfn; pfn < base_pfn + count;
kernel/liveupdate/kexec_handover.c
1429
pfn += pageblock_nr_pages)
kernel/liveupdate/kexec_handover.c
1430
init_cma_reserved_pageblock(pfn_to_page(pfn));
kernel/liveupdate/kexec_handover.c
1472
ulong pfn;
kernel/liveupdate/kexec_handover.c
1474
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
kernel/liveupdate/kexec_handover.c
1475
init_pageblock_migratetype(pfn_to_page(pfn),
kernel/liveupdate/kexec_handover.c
158
unsigned long pfn, unsigned int order)
kernel/liveupdate/kexec_handover.c
162
unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
kernel/liveupdate/kexec_handover.c
237
void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn,
kernel/liveupdate/kexec_handover.c
240
unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
kernel/liveupdate/kexec_handover.c
359
unsigned long pfn, unsigned long end_pfn)
kernel/liveupdate/kexec_handover.c
363
while (pfn < end_pfn) {
kernel/liveupdate/kexec_handover.c
364
order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
kernel/liveupdate/kexec_handover.c
366
kho_radix_del_page(tree, pfn, order);
kernel/liveupdate/kexec_handover.c
368
pfn += 1 << order;
kernel/liveupdate/kexec_handover.c
458
unsigned long pfn = start_pfn;
kernel/liveupdate/kexec_handover.c
460
while (pfn < end_pfn) {
kernel/liveupdate/kexec_handover.c
462
min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
kernel/liveupdate/kexec_handover.c
463
struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
kernel/liveupdate/kexec_handover.c
467
pfn += 1 << order;
kernel/liveupdate/kexec_handover.c
820
const unsigned long pfn = folio_pfn(folio);
kernel/liveupdate/kexec_handover.c
823
if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
kernel/liveupdate/kexec_handover.c
826
return kho_radix_add_page(tree, pfn, order);
kernel/liveupdate/kexec_handover.c
841
const unsigned long pfn = folio_pfn(folio);
kernel/liveupdate/kexec_handover.c
844
kho_radix_del_page(tree, pfn, order);
kernel/liveupdate/kexec_handover.c
863
unsigned long pfn = start_pfn;
kernel/liveupdate/kexec_handover.c
872
while (pfn < end_pfn) {
kernel/liveupdate/kexec_handover.c
874
min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
kernel/liveupdate/kexec_handover.c
881
while (pfn_to_nid(pfn) != pfn_to_nid(pfn + (1UL << order) - 1))
kernel/liveupdate/kexec_handover.c
884
err = kho_radix_add_page(tree, pfn, order);
kernel/liveupdate/kexec_handover.c
886
failed_pfn = pfn;
kernel/liveupdate/kexec_handover.c
890
pfn += 1 << order;
kernel/liveupdate/kexec_handover.c
976
unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
kernel/liveupdate/kexec_handover.c
978
__kho_unpreserve(tree, pfn, pfn + 1);
kernel/liveupdate/kexec_handover.c
981
pfn = PHYS_PFN(chunk->phys[i]);
kernel/liveupdate/kexec_handover.c
982
__kho_unpreserve(tree, pfn, pfn + (1 << order));
kernel/power/snapshot.c
1090
unsigned long pfn;
kernel/power/snapshot.c
1097
for_each_valid_pfn(pfn, region->start_pfn, region->end_pfn) {
kernel/power/snapshot.c
1104
mem_bm_set_bit_check(bm, pfn);
kernel/power/snapshot.c
1197
unsigned long pfn;
kernel/power/snapshot.c
1204
pfn = memory_bm_next_pfn(bm);
kernel/power/snapshot.c
1205
while (pfn != BM_END_OF_MAP) {
kernel/power/snapshot.c
1206
if (pfn_valid(pfn))
kernel/power/snapshot.c
1207
clear_or_poison_free_page(pfn_to_page(pfn));
kernel/power/snapshot.c
1209
pfn = memory_bm_next_pfn(bm);
kernel/power/snapshot.c
1246
unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
kernel/power/snapshot.c
1257
for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
kernel/power/snapshot.c
1258
page = pfn_to_page(pfn);
kernel/power/snapshot.c
1277
pfn = page_to_pfn(page);
kernel/power/snapshot.c
1283
swsusp_set_page_free(pfn_to_page(pfn + i));
kernel/power/snapshot.c
1316
static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1320
if (!pfn_valid(pfn))
kernel/power/snapshot.c
1323
page = pfn_to_online_page(pfn);
kernel/power/snapshot.c
1350
unsigned long pfn, max_zone_pfn;
kernel/power/snapshot.c
1357
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1358
if (saveable_highmem_page(zone, pfn))
kernel/power/snapshot.c
1375
static struct page *saveable_page(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1379
if (!pfn_valid(pfn))
kernel/power/snapshot.c
1382
page = pfn_to_online_page(pfn);
kernel/power/snapshot.c
1395
&& (!kernel_page_present(page) || pfn_is_nosave(pfn)))
kernel/power/snapshot.c
1410
unsigned long pfn, max_zone_pfn;
kernel/power/snapshot.c
1419
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1420
if (saveable_page(zone, pfn))
kernel/power/snapshot.c
1467
static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1470
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
kernel/power/snapshot.c
1504
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
kernel/power/snapshot.c
1525
unsigned long pfn, copy_pfn;
kernel/power/snapshot.c
1532
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1533
if (page_is_saveable(zone, pfn))
kernel/power/snapshot.c
1534
memory_bm_set_bit(orig_bm, pfn);
kernel/power/snapshot.c
1540
pfn = memory_bm_next_pfn(orig_bm);
kernel/power/snapshot.c
1541
if (unlikely(pfn == BM_END_OF_MAP))
kernel/power/snapshot.c
1543
if (copy_data_page(copy_pfn, pfn)) {
kernel/power/snapshot.c
1544
memory_bm_set_bit(zero_bm, pfn);
kernel/power/snapshot.c
1751
unsigned long pfn = memory_bm_next_pfn(&copy_bm);
kernel/power/snapshot.c
1752
struct page *page = pfn_to_page(pfn);
kernel/power/snapshot.c
1765
memory_bm_clear_bit(&copy_bm, pfn);
kernel/power/snapshot.c
2285
unsigned long pfn;
kernel/power/snapshot.c
2288
pfn = memory_bm_next_pfn(src);
kernel/power/snapshot.c
2289
while (pfn != BM_END_OF_MAP) {
kernel/power/snapshot.c
2290
memory_bm_set_bit(dst, pfn);
kernel/power/snapshot.c
2291
pfn = memory_bm_next_pfn(src);
kernel/power/snapshot.c
2303
unsigned long pfn;
kernel/power/snapshot.c
2307
pfn = memory_bm_next_pfn(free_pages_map);
kernel/power/snapshot.c
2308
while (pfn != BM_END_OF_MAP) {
kernel/power/snapshot.c
2310
pfn = memory_bm_next_pfn(free_pages_map);
kernel/power/snapshot.c
2417
unsigned long pfn;
kernel/power/snapshot.c
2421
pfn = memory_bm_next_pfn(bm);
kernel/power/snapshot.c
2422
while (pfn != BM_END_OF_MAP) {
kernel/power/snapshot.c
2423
if (PageHighMem(pfn_to_page(pfn)))
kernel/power/snapshot.c
2426
pfn = memory_bm_next_pfn(bm);
kernel/power/snapshot.c
2719
unsigned long pfn = memory_bm_next_pfn(bm);
kernel/power/snapshot.c
2721
if (pfn == BM_END_OF_MAP)
kernel/power/snapshot.c
2724
page = pfn_to_page(pfn);
kernel/power/snapshot.c
748
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
kernel/power/snapshot.c
757
if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
kernel/power/snapshot.c
764
if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
kernel/power/snapshot.c
786
((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
kernel/power/snapshot.c
790
block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
kernel/power/snapshot.c
805
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
kernel/power/snapshot.c
806
bm->cur.cur_pfn = pfn;
kernel/power/snapshot.c
810
*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
kernel/power/snapshot.c
815
static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
kernel/power/snapshot.c
821
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
kernel/power/snapshot.c
826
static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
kernel/power/snapshot.c
832
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
kernel/power/snapshot.c
839
static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
kernel/power/snapshot.c
845
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
kernel/power/snapshot.c
863
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
kernel/power/snapshot.c
869
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
kernel/power/snapshot.c
874
static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
kernel/power/snapshot.c
879
return !memory_bm_find_bit(bm, pfn, &addr, &bit);
kernel/power/snapshot.c
931
unsigned long bits, pfn, pages;
kernel/power/snapshot.c
940
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
kernel/power/snapshot.c
942
bm->cur.cur_pfn = pfn;
kernel/power/snapshot.c
943
return pfn;
kernel/resource.c
566
unsigned long pfn, end_pfn;
kernel/resource.c
574
pfn = PFN_UP(res.start);
kernel/resource.c
576
if (end_pfn > pfn)
kernel/resource.c
577
ret = (*func)(pfn, end_pfn - pfn, arg);
kernel/resource.c
585
static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
kernel/resource.c
594
int __weak page_is_ram(unsigned long pfn)
kernel/resource.c
596
return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
lib/devmem_is_allowed.c
21
int devmem_is_allowed(unsigned long pfn)
lib/devmem_is_allowed.c
23
if (iomem_is_exclusive(PFN_PHYS(pfn)))
lib/devmem_is_allowed.c
25
if (!page_is_ram(pfn))
lib/test_hmm.c
213
unsigned long pfn;
lib/test_hmm.c
215
for (pfn = (range->start >> PAGE_SHIFT);
lib/test_hmm.c
216
pfn < (range->end >> PAGE_SHIFT);
lib/test_hmm.c
217
pfn++, pfns++) {
lib/test_hmm.c
236
entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
lib/test_hmm.c
247
unsigned long pfn;
lib/test_hmm.c
255
xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
lib/test_hmm.c
257
xa_erase(&dmirror->pt, pfn);
lib/test_hmm.c
366
unsigned long pfn;
lib/test_hmm.c
371
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
lib/test_hmm.c
375
entry = xa_load(&dmirror->pt, pfn);
lib/test_hmm.c
432
unsigned long pfn;
lib/test_hmm.c
437
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
lib/test_hmm.c
441
entry = xa_load(&dmirror->pt, pfn);
lib/test_hmm.c
501
unsigned long pfn;
lib/test_hmm.c
577
for (pfn = pfn_first; pfn < pfn_last; ) {
lib/test_hmm.c
578
struct page *page = pfn_to_page(pfn);
lib/test_hmm.c
580
if (is_large && IS_ALIGNED(pfn, HPAGE_PMD_NR)
lib/test_hmm.c
581
&& (pfn + HPAGE_PMD_NR <= pfn_last)) {
lib/test_hmm.c
584
pfn += HPAGE_PMD_NR;
lib/test_hmm.c
590
pfn++;
lib/test_hmm.c
800
unsigned long pfn;
lib/test_hmm.c
802
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
lib/test_hmm.c
805
entry = xa_load(&dmirror->pt, pfn);
lib/test_hmm.c
839
unsigned long pfn;
lib/test_hmm.c
846
for (pfn = start_pfn; pfn < end_pfn; pfn++, src++, dst++) {
lib/test_hmm.c
873
entry = xa_store(&dmirror->pt, pfn + i, entry, GFP_ATOMIC);
mm/bootmem_info.c
69
unsigned long i, pfn, end_pfn, nr_pages;
mm/bootmem_info.c
79
pfn = pgdat->node_start_pfn;
mm/bootmem_info.c
83
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
mm/bootmem_info.c
90
if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
mm/bootmem_info.c
91
register_page_bootmem_info_section(pfn);
mm/cma.c
1017
unsigned long i, pfn;
mm/cma.c
1023
pfn = page_to_pfn(pages);
mm/cma.c
1024
for (i = 0; i < count; i++, pfn++)
mm/cma.c
1025
ret += !put_page_testzero(pfn_to_page(pfn));
mm/cma.c
142
unsigned long pfn, end_pfn, early_pfn[CMA_MAX_RANGES];
mm/cma.c
167
for (pfn = early_pfn[r]; pfn < cmr->base_pfn + cmr->count;
mm/cma.c
168
pfn += pageblock_nr_pages)
mm/cma.c
169
init_cma_reserved_pageblock(pfn_to_page(pfn));
mm/cma.c
193
for (pfn = early_pfn[r]; pfn < end_pfn; pfn++)
mm/cma.c
194
free_reserved_page(pfn_to_page(pfn));
mm/cma.c
785
unsigned long start, pfn, mask, offset;
mm/cma.c
815
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
mm/cma.c
816
page = pfn_to_page(pfn);
mm/cma.c
826
__func__, cma->name, pfn, pfn + count - 1);
mm/cma.c
83
unsigned long pfn, unsigned long count)
mm/cma.c
840
ret = alloc_contig_frozen_range(pfn, pfn + count, ACR_FLAGS_CMA, gfp);
mm/cma.c
845
cma_clear_bitmap(cma, cmr, pfn, count);
mm/cma.c
850
__func__, pfn, page);
mm/cma.c
852
trace_cma_alloc_busy_retry(cma->name, pfn, page, count, align);
mm/cma.c
88
bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
mm/cma.c
959
unsigned long pfn, end_pfn;
mm/cma.c
967
pfn = page_to_pfn(pages);
mm/cma.c
972
if (pfn >= cmr->base_pfn && pfn < end_pfn) {
mm/cma.c
973
if (pfn + count <= end_pfn)
mm/cma.c
992
unsigned long pfn = page_to_pfn(pages);
mm/cma.c
996
free_contig_frozen_range(pfn, count);
mm/cma.c
997
cma_clear_bitmap(cma, cmr, pfn, count);
mm/cma.c
999
trace_cma_release(cma->name, pfn, pages, count);
mm/compaction.c
100
unsigned long pfn = page_to_pfn(page);
mm/compaction.c
109
if (pfn > high_pfn)
mm/compaction.c
110
high_pfn = pfn;
mm/compaction.c
1323
unsigned long pfn, block_start_pfn, block_end_pfn;
mm/compaction.c
1327
pfn = start_pfn;
mm/compaction.c
1328
block_start_pfn = pageblock_start_pfn(pfn);
mm/compaction.c
1331
block_end_pfn = pageblock_end_pfn(pfn);
mm/compaction.c
1333
for (; pfn < end_pfn; pfn = block_end_pfn,
mm/compaction.c
1343
ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
mm/compaction.c
1458
fast_isolate_around(struct compact_control *cc, unsigned long pfn)
mm/compaction.c
1472
start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
mm/compaction.c
1473
end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
mm/compaction.c
1561
unsigned long pfn;
mm/compaction.c
1565
pfn = page_to_pfn(freepage);
mm/compaction.c
1567
if (pfn >= highest)
mm/compaction.c
1568
highest = max(pageblock_start_pfn(pfn),
mm/compaction.c
1571
if (pfn >= low_pfn) {
mm/compaction.c
1578
if (pfn >= min_pfn && pfn > high_pfn) {
mm/compaction.c
1579
high_pfn = pfn;
mm/compaction.c
1895
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
mm/compaction.c
1901
cc->fast_start_pfn = pfn;
mm/compaction.c
1903
cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
mm/compaction.c
1928
unsigned long pfn = cc->migrate_pfn;
mm/compaction.c
1935
return pfn;
mm/compaction.c
1942
return pfn;
mm/compaction.c
1949
if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
mm/compaction.c
1950
return pfn;
mm/compaction.c
1958
return pfn;
mm/compaction.c
1967
return pfn;
mm/compaction.c
2016
pfn = pageblock_start_pfn(free_pfn);
mm/compaction.c
2017
if (pfn < cc->zone->zone_start_pfn)
mm/compaction.c
2018
pfn = cc->zone->zone_start_pfn;
mm/compaction.c
2035
pfn = reinit_migrate_pfn(cc);
mm/compaction.c
2037
return pfn;
mm/compaction.c
276
__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
mm/compaction.c
279
struct page *page = pfn_to_online_page(pfn);
mm/compaction.c
307
block_pfn = pageblock_start_pfn(pfn);
mm/compaction.c
312
pfn = block_pfn;
mm/compaction.c
316
block_pfn = pageblock_end_pfn(pfn) - 1;
mm/compaction.c
434
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
mm/compaction.c
442
pfn = pageblock_end_pfn(pfn);
mm/compaction.c
445
if (pfn > zone->compact_cached_migrate_pfn[0])
mm/compaction.c
446
zone->compact_cached_migrate_pfn[0] = pfn;
mm/compaction.c
448
pfn > zone->compact_cached_migrate_pfn[1])
mm/compaction.c
449
zone->compact_cached_migrate_pfn[1] = pfn;
mm/compaction.c
457
struct page *page, unsigned long pfn)
mm/compaction.c
466
if (pfn < zone->compact_cached_free_pfn)
mm/compaction.c
467
zone->compact_cached_free_pfn = pfn;
mm/compaction.c
482
struct page *page, unsigned long pfn)
mm/compaction.c
486
static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
mm/compaction.c
67
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
mm/compaction.c
68
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
mm/compaction.c
698
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
mm/compaction.c
704
pfn = start_pfn;
mm/compaction.c
705
block_start_pfn = pageblock_start_pfn(pfn);
mm/compaction.c
708
block_end_pfn = pageblock_end_pfn(pfn);
mm/compaction.c
710
for (; pfn < end_pfn; pfn += isolated,
mm/compaction.c
714
unsigned long isolate_start_pfn = pfn;
mm/compaction.c
721
if (pfn >= block_end_pfn) {
mm/compaction.c
722
block_start_pfn = pageblock_start_pfn(pfn);
mm/compaction.c
723
block_end_pfn = pageblock_end_pfn(pfn);
mm/compaction.c
750
if (pfn < end_pfn) {
mm/compaction.c
757
return pfn;
mm/damon/ops-common.c
26
struct folio *damon_get_folio(unsigned long pfn)
mm/damon/ops-common.c
28
struct page *page = pfn_to_online_page(pfn);
mm/damon/ops-common.c
49
unsigned long pfn;
mm/damon/ops-common.c
52
pfn = pte_pfn(pteval);
mm/damon/ops-common.c
54
pfn = softleaf_to_pfn(softleaf_from_pte(pteval));
mm/damon/ops-common.c
56
folio = damon_get_folio(pfn);
mm/damon/ops-common.c
81
unsigned long pfn;
mm/damon/ops-common.c
84
pfn = pmd_pfn(pmdval);
mm/damon/ops-common.c
86
pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
mm/damon/ops-common.c
88
folio = damon_get_folio(pfn);
mm/damon/ops-common.h
10
struct folio *damon_get_folio(unsigned long pfn);
mm/debug.c
116
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
mm/debug.c
138
__dump_folio(&ps.folio_snapshot, &ps.page_snapshot, ps.pfn, ps.idx);
mm/debug.c
71
unsigned long pfn, unsigned long idx)
mm/debug.c
82
folio->index + idx, pfn);
mm/debug_vm_pgtable.c
976
unsigned long pfn, int order)
mm/debug_vm_pgtable.c
980
free_contig_range(pfn, 1 << order);
mm/debug_vm_pgtable.c
984
__free_pages(pfn_to_page(pfn), order);
mm/gup.c
2247
unsigned long pfn = page_to_pfn(pofs->pages[i]);
mm/gup.c
2250
if (pfn < start_pfn || pfn >= end_pfn)
mm/gup.c
656
unsigned long pfn = pud_pfn(pud);
mm/gup.c
665
!can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags))
mm/gup.c
668
pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
mm/gup.c
669
page = pfn_to_page(pfn);
mm/highmem.c
40
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
mm/highmem.c
550
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
mm/highmem.c
562
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
mm/highmem.c
566
pteval = pfn_pte(pfn, prot);
mm/hmm.c
203
unsigned long pfn, npages, i;
mm/hmm.c
214
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
mm/hmm.c
215
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
217
hmm_pfns[i] |= pfn | cpu_flags;
mm/hmm.c
351
unsigned long pfn = softleaf_to_pfn(entry);
mm/hmm.c
362
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
364
hmm_pfns[i] |= pfn | cpu_flags;
mm/hmm.c
506
unsigned long i, npages, pfn;
mm/hmm.c
523
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
mm/hmm.c
524
for (i = 0; i < npages; ++i, ++pfn) {
mm/hmm.c
526
hmm_pfns[i] |= pfn | cpu_flags;
mm/hmm.c
547
unsigned long addr = start, i, pfn;
mm/hmm.c
584
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
mm/hmm.c
585
for (; addr < end; addr += PAGE_SIZE, i++, pfn++) {
mm/hmm.c
587
range->hmm_pfns[i] |= pfn | cpu_flags;
mm/huge_memory.c
1582
unsigned long pfn;
mm/huge_memory.c
1607
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
mm/huge_memory.c
1608
fop.pfn;
mm/huge_memory.c
1611
if (pmd_pfn(*pmd) != pfn) {
mm/huge_memory.c
1634
entry = pmd_mkhuge(pfn_pmd(fop.pfn, prot));
mm/huge_memory.c
1668
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
mm/huge_memory.c
1675
.pfn = pfn,
mm/huge_memory.c
1688
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
mm/huge_memory.c
1731
const unsigned long pfn = fop.is_folio ? folio_pfn(fop.folio) :
mm/huge_memory.c
1732
fop.pfn;
mm/huge_memory.c
1735
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn))
mm/huge_memory.c
1752
entry = pud_mkhuge(pfn_pud(fop.pfn, prot));
mm/huge_memory.c
1776
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
mm/huge_memory.c
1783
.pfn = pfn,
mm/huge_memory.c
1796
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
mm/huge_memory.c
4623
unsigned long pfn, max_zone_pfn;
mm/huge_memory.c
4631
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
mm/huge_memory.c
4634
page = pfn_to_online_page(pfn);
mm/huge_memory.c
4657
pfn += nr_pages - 1;
mm/hugetlb.c
2067
unsigned long pfn;
mm/hugetlb.c
2080
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
mm/hugetlb.c
2081
folio = pfn_folio(pfn);
mm/hugetlb.c
3143
unsigned long pfn, end_pfn = head_pfn + end_page_number;
mm/hugetlb.c
3150
for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
mm/hugetlb.c
3151
__init_single_page(page, pfn, zone, nid);
mm/hugetlb.c
3283
unsigned long pfn;
mm/hugetlb.c
3286
pfn = page_to_pfn(page);
mm/hugetlb.c
3287
__init_page_from_nid(pfn, nid);
mm/hugetlb.c
7139
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
mm/hugetlb.c
7145
ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
mm/hugetlb_vmemmap.c
754
unsigned long ns, i, pnum, pfn, nr_pages;
mm/hugetlb_vmemmap.c
770
pfn = PHYS_PFN(paddr);
mm/hugetlb_vmemmap.c
771
map = pfn_to_page(pfn);
mm/hugetlb_vmemmap.c
773
pnum = pfn_to_section_nr(pfn);
mm/hugetlb_vmemmap.c
787
static struct zone *pfn_to_zone(unsigned nid, unsigned long pfn)
mm/hugetlb_vmemmap.c
794
if (zone_spans_pfn(zone, pfn))
mm/hugetlb_vmemmap.c
805
unsigned long pfn, nr_mmap;
mm/hugetlb_vmemmap.c
819
pfn = PHYS_PFN(phys);
mm/hugetlb_vmemmap.c
821
map = pfn_to_page(pfn);
mm/hugetlb_vmemmap.c
840
if (!zone || !zone_spans_pfn(zone, pfn))
mm/hugetlb_vmemmap.c
841
zone = pfn_to_zone(nid, pfn);
mm/hwpoison-inject.c
104
unsigned long pfn = val;
mm/hwpoison-inject.c
112
if (!pfn_valid(pfn))
mm/hwpoison-inject.c
115
p = pfn_to_page(pfn);
mm/hwpoison-inject.c
139
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
mm/hwpoison-inject.c
140
err = memory_failure(pfn, MF_SW_SIMULATED);
mm/internal.h
1007
void sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages);
mm/internal.h
1009
static inline void sparse_init_subsection_map(unsigned long pfn,
mm/internal.h
1328
void init_deferred_page(unsigned long pfn, int nid);
mm/internal.h
1402
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
mm/internal.h
1421
static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
mm/internal.h
1748
void __meminit __init_single_page(struct page *page, unsigned long pfn,
mm/internal.h
1750
void __meminit __init_page_from_nid(unsigned long pfn, int nid);
mm/internal.h
1853
const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
mm/internal.h
1856
action->remap.start_pfn = pfn;
mm/internal.h
604
unsigned long pfn = page_to_pfn(page);
mm/internal.h
606
for (; nr_pages--; pfn++)
mm/internal.h
607
set_page_refcounted(pfn_to_page(pfn));
mm/internal.h
798
unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
mm/internal.h
800
unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
mm/internal.h
803
buddy = page + (__buddy_pfn - pfn);
mm/internal.h
836
extern void memblock_free_pages(unsigned long pfn, unsigned int order);
mm/khugepaged.c
519
unsigned long pfn;
mm/khugepaged.c
524
pfn = pte_pfn(pteval);
mm/khugepaged.c
525
if (is_zero_pfn(pfn))
mm/khugepaged.c
527
folio = pfn_folio(pfn);
mm/kmemleak.c
1753
unsigned long pfn;
mm/kmemleak.c
1755
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
mm/kmemleak.c
1756
struct page *page = pfn_to_online_page(pfn);
mm/kmemleak.c
1758
if (!(pfn & 63))
mm/madvise.c
1458
unsigned long pfn;
mm/madvise.c
1465
pfn = page_to_pfn(page);
mm/madvise.c
1476
pfn, start);
mm/madvise.c
1477
ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
mm/madvise.c
1480
pfn, start);
mm/madvise.c
1481
ret = memory_failure(pfn, MF_ACTION_REQUIRED | MF_COUNT_INCREASED | MF_SW_SIMULATED);
mm/memblock.c
1974
int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
mm/memblock.c
1978
int mid = memblock_search(type, PFN_PHYS(pfn));
mm/memblock.c
966
unsigned long pfn;
mm/memblock.c
980
for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
mm/memblock.c
981
init_deferred_page(pfn, nid);
mm/memfd_luo.c
199
pfolio->pfn = folio_pfn(folio);
mm/memfd_luo.c
246
if (!pfolio->pfn)
mm/memfd_luo.c
249
folio = pfn_folio(pfolio->pfn);
mm/memfd_luo.c
361
if (!pfolio->pfn)
mm/memfd_luo.c
364
phys = PFN_PHYS(pfolio->pfn);
mm/memfd_luo.c
422
if (!pfolio->pfn)
mm/memfd_luo.c
425
phys = PFN_PHYS(pfolio->pfn);
mm/memfd_luo.c
485
folio = kho_restore_folio(pfolio->pfn);
mm/memory-failure.c
1251
static void update_per_node_mf_stats(unsigned long pfn,
mm/memory-failure.c
1257
nid = pfn_to_nid(pfn);
mm/memory-failure.c
1259
WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
mm/memory-failure.c
1288
static int action_result(unsigned long pfn, enum mf_action_page_type type,
mm/memory-failure.c
1291
trace_memory_failure_event(pfn, type, result);
mm/memory-failure.c
1294
num_poisoned_pages_inc(pfn);
mm/memory-failure.c
1295
update_per_node_mf_stats(pfn, result);
mm/memory-failure.c
1299
pfn, action_page_types[type], action_name[result]);
mm/memory-failure.c
1305
unsigned long pfn)
mm/memory-failure.c
1317
return action_result(pfn, ps->type, result);
mm/memory-failure.c
1524
int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
mm/memory-failure.c
1530
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
mm/memory-failure.c
1548
pfn);
mm/memory-failure.c
1581
unsigned long pfn, int flags)
mm/memory-failure.c
1612
unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL);
mm/memory-failure.c
1615
pfn, folio_mapcount(folio));
mm/memory-failure.c
1636
kill_procs(&tokill, forcekill, pfn, flags);
mm/memory-failure.c
1641
static int identify_page_state(unsigned long pfn, struct page *p,
mm/memory-failure.c
1661
return page_action(ps, p, pfn);
mm/memory-failure.c
1684
static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
mm/memory-failure.c
1706
kill_procs(to_kill, flags & MF_MUST_KILL, pfn, flags);
mm/memory-failure.c
1718
static int mf_generic_kill_procs(unsigned long long pfn, int flags,
mm/memory-failure.c
1721
struct folio *folio = pfn_folio(pfn);
mm/memory-failure.c
1770
unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
mm/memory-failure.c
1972
int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
mm/memory-failure.c
1975
struct page *page = pfn_to_page(pfn);
mm/memory-failure.c
2035
static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
mm/memory-failure.c
2038
struct page *p = pfn_to_page(pfn);
mm/memory-failure.c
2045
res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
mm/memory-failure.c
2055
return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
mm/memory-failure.c
2060
rv = kill_accessing_process(current, pfn, flags);
mm/memory-failure.c
2062
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
mm/memory-failure.c
2064
action_result(pfn, MF_MSG_HUGE, MF_FAILED);
mm/memory-failure.c
2096
return action_result(pfn, MF_MSG_FREE_HUGE, res);
mm/memory-failure.c
2101
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
mm/memory-failure.c
2103
return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
mm/memory-failure.c
2106
return identify_page_state(pfn, p, page_flags);
mm/memory-failure.c
2110
static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
mm/memory-failure.c
2122
static void put_ref_page(unsigned long pfn, int flags)
mm/memory-failure.c
2127
put_page(pfn_to_page(pfn));
mm/memory-failure.c
2130
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
mm/memory-failure.c
2136
if (!pgmap_pfn_valid(pgmap, pfn))
mm/memory-failure.c
2144
rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
mm/memory-failure.c
2153
rc = mf_generic_kill_procs(pfn, flags, pgmap);
mm/memory-failure.c
2158
action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
mm/memory-failure.c
2169
static void kill_procs_now(struct page *p, unsigned long pfn, int flags,
mm/memory-failure.c
2178
kill_procs(&tokill, true, pfn, flags);
mm/memory-failure.c
2240
unsigned long pfn, struct list_head *to_kill)
mm/memory-failure.c
2258
!pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff))
mm/memory-failure.c
2276
static int memory_failure_pfn(unsigned long pfn, int flags)
mm/memory-failure.c
2290
for (node = interval_tree_iter_first(&pfn_space_itree, pfn, pfn); node;
mm/memory-failure.c
2291
node = interval_tree_iter_next(node, pfn, pfn)) {
mm/memory-failure.c
2295
collect_procs_pfn(pfn_space, pfn, &tokill);
mm/memory-failure.c
2301
return action_result(pfn, MF_MSG_PFN_MAP, MF_IGNORED);
mm/memory-failure.c
2312
kill_procs(&tokill, true, pfn, flags);
mm/memory-failure.c
2314
return action_result(pfn, MF_MSG_PFN_MAP, MF_RECOVERED);
mm/memory-failure.c
2342
int memory_failure(unsigned long pfn, int flags)
mm/memory-failure.c
2353
panic("Memory failure on page %lx", pfn);
mm/memory-failure.c
2360
p = pfn_to_online_page(pfn);
mm/memory-failure.c
2362
res = arch_memory_failure(pfn, flags);
mm/memory-failure.c
2366
if (!pfn_valid(pfn) && !arch_is_platform_page(PFN_PHYS(pfn))) {
mm/memory-failure.c
2370
res = memory_failure_pfn(pfn, flags);
mm/memory-failure.c
2374
if (pfn_valid(pfn)) {
mm/memory-failure.c
2375
pgmap = get_dev_pagemap(pfn);
mm/memory-failure.c
2376
put_ref_page(pfn, flags);
mm/memory-failure.c
2378
res = memory_failure_dev_pagemap(pfn, flags,
mm/memory-failure.c
2383
pr_err("%#lx: memory outside kernel control\n", pfn);
mm/memory-failure.c
2389
res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
mm/memory-failure.c
2396
res = kill_accessing_process(current, pfn, flags);
mm/memory-failure.c
2399
action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
mm/memory-failure.c
2429
res = action_result(pfn, MF_MSG_BUDDY, res);
mm/memory-failure.c
2431
res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
mm/memory-failure.c
2435
res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
mm/memory-failure.c
2481
kill_procs_now(p, pfn, flags, folio);
mm/memory-failure.c
2483
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED);
mm/memory-failure.c
2536
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
mm/memory-failure.c
2537
res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED);
mm/memory-failure.c
2546
res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
mm/memory-failure.c
2551
res = identify_page_state(pfn, p, page_flags);
mm/memory-failure.c
2566
unsigned long pfn;
mm/memory-failure.c
2595
void memory_failure_queue(unsigned long pfn, int flags)
mm/memory-failure.c
2601
.pfn = pfn,
mm/memory-failure.c
2614
pfn);
mm/memory-failure.c
2633
soft_offline_page(entry.pfn, entry.flags);
mm/memory-failure.c
2635
memory_failure(entry.pfn, entry.flags);
mm/memory-failure.c
2659
#define unpoison_pr_info(fmt, pfn, rs) \
mm/memory-failure.c
2662
pr_info(fmt, pfn); \
mm/memory-failure.c
2677
int unpoison_memory(unsigned long pfn)
mm/memory-failure.c
2687
p = pfn_to_online_page(pfn);
mm/memory-failure.c
2696
pfn, &unpoison_rs);
mm/memory-failure.c
2703
pfn, &unpoison_rs);
mm/memory-failure.c
2710
pfn, &unpoison_rs);
mm/memory-failure.c
2716
pfn, &unpoison_rs);
mm/memory-failure.c
2726
pfn, &unpoison_rs);
mm/memory-failure.c
2732
pfn, &unpoison_rs);
mm/memory-failure.c
2751
pfn, &unpoison_rs);
mm/memory-failure.c
2774
num_poisoned_pages_sub(pfn, 1);
mm/memory-failure.c
2793
unsigned long pfn = page_to_pfn(page);
mm/memory-failure.c
2817
pr_info("%#lx: thp split failed\n", pfn);
mm/memory-failure.c
2829
pr_info("%#lx: page already poisoned\n", pfn);
mm/memory-failure.c
2842
pr_info("%#lx: invalidated\n", pfn);
mm/memory-failure.c
286
static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
mm/memory-failure.c
2871
pfn, msg_page[huge], ret, &page->flags.f);
mm/memory-failure.c
2877
pfn, msg_page[huge], page_count(page), &page->flags.f);
mm/memory-failure.c
2908
int soft_offline_page(unsigned long pfn, int flags)
mm/memory-failure.c
2914
if (!pfn_valid(pfn)) {
mm/memory-failure.c
2920
page = pfn_to_online_page(pfn);
mm/memory-failure.c
2922
put_ref_page(pfn, flags);
mm/memory-failure.c
2928
put_ref_page(pfn, flags);
mm/memory-failure.c
293
pfn, t->comm, task_pid_nr(t));
mm/memory-failure.c
2935
pr_info("%#lx: page already poisoned\n", pfn);
mm/memory-failure.c
2936
put_ref_page(pfn, flags);
mm/memory-failure.c
463
unsigned long pfn, int flags)
mm/memory-failure.c
471
pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
mm/memory-failure.c
482
else if (kill_proc(tk, pfn, flags) < 0)
mm/memory-failure.c
484
pfn, tk->tsk->comm, task_pid_nr(tk->tsk));
mm/memory-failure.c
681
unsigned long pfn;
mm/memory-failure.c
694
unsigned long pfn = 0;
mm/memory-failure.c
699
pfn = pte_pfn(pte);
mm/memory-failure.c
704
pfn = softleaf_to_pfn(entry);
mm/memory-failure.c
708
if (!pfn || pfn != (poisoned_pfn & mask))
mm/memory-failure.c
711
hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT);
mm/memory-failure.c
721
unsigned long pfn;
mm/memory-failure.c
726
pfn = pmd_pfn(pmd);
mm/memory-failure.c
727
if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
mm/memory-failure.c
728
hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
mm/memory-failure.c
764
hwp->pfn, &hwp->tk);
mm/memory-failure.c
788
hwp->pfn, &hwp->tk);
mm/memory-failure.c
823
static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
mm/memory-failure.c
828
.pfn = pfn,
mm/memory-failure.c
83
void num_poisoned_pages_inc(unsigned long pfn)
mm/memory-failure.c
845
kill_proc(&priv.tk, pfn, flags);
mm/memory-failure.c
86
memblk_nr_poison_inc(pfn);
mm/memory-failure.c
89
void num_poisoned_pages_sub(unsigned long pfn, long i)
mm/memory-failure.c
92
if (pfn != -1UL)
mm/memory-failure.c
93
memblk_nr_poison_sub(pfn, i);
mm/memory-failure.c
935
static int truncate_error_folio(struct folio *folio, unsigned long pfn,
mm/memory-failure.c
944
pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
mm/memory-failure.c
946
pr_info("%#lx: failed to release buffers\n", pfn);
mm/memory-failure.c
957
pr_info("%#lx: Failed to invalidate\n", pfn);
mm/memory.c
2655
unsigned long pfn, pgprot_t prot, bool mkwrite)
mm/memory.c
2677
if (pte_pfn(entry) != pfn) {
mm/memory.c
2690
entry = pte_mkspecial(pfn_pte(pfn, prot));
mm/memory.c
2739
unsigned long pfn, pgprot_t pgprot)
mm/memory.c
2751
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
mm/memory.c
2756
if (!pfn_modify_allowed(pfn, pgprot))
mm/memory.c
2759
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
mm/memory.c
2761
return insert_pfn(vma, addr, pfn, pgprot, false);
mm/memory.c
2786
unsigned long pfn)
mm/memory.c
2788
return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
mm/memory.c
2792
static bool vm_mixed_ok(struct vm_area_struct *vma, unsigned long pfn,
mm/memory.c
2795
if (unlikely(is_zero_pfn(pfn)) &&
mm/memory.c
2801
if (is_zero_pfn(pfn))
mm/memory.c
2807
unsigned long addr, unsigned long pfn, bool mkwrite)
mm/memory.c
2812
if (!vm_mixed_ok(vma, pfn, mkwrite))
mm/memory.c
2818
pfnmap_setup_cachemode_pfn(pfn, &pgprot);
mm/memory.c
2820
if (!pfn_modify_allowed(pfn, pgprot))
mm/memory.c
2830
if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pfn_valid(pfn)) {
mm/memory.c
2838
page = pfn_to_page(pfn);
mm/memory.c
2841
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
mm/memory.c
2873
unsigned long pfn)
mm/memory.c
2875
return __vm_insert_mixed(vma, addr, pfn, false);
mm/memory.c
2885
unsigned long addr, unsigned long pfn)
mm/memory.c
2887
return __vm_insert_mixed(vma, addr, pfn, true);
mm/memory.c
2897
unsigned long pfn, pgprot_t prot)
mm/memory.c
2909
if (!pfn_modify_allowed(pfn, prot)) {
mm/memory.c
2913
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
mm/memory.c
2914
pfn++;
mm/memory.c
2923
unsigned long pfn, pgprot_t prot)
mm/memory.c
2929
pfn -= addr >> PAGE_SHIFT;
mm/memory.c
2937
pfn + (addr >> PAGE_SHIFT), prot);
mm/memory.c
2946
unsigned long pfn, pgprot_t prot)
mm/memory.c
2952
pfn -= addr >> PAGE_SHIFT;
mm/memory.c
2959
pfn + (addr >> PAGE_SHIFT), prot);
mm/memory.c
2968
unsigned long pfn, pgprot_t prot)
mm/memory.c
2974
pfn -= addr >> PAGE_SHIFT;
mm/memory.c
2981
pfn + (addr >> PAGE_SHIFT), prot);
mm/memory.c
2990
unsigned long pfn, pgoff_t *vm_pgoff_p)
mm/memory.c
3001
*vm_pgoff_p = pfn;
mm/memory.c
3008
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3022
pfn -= addr >> PAGE_SHIFT;
mm/memory.c
3028
pfn + (addr >> PAGE_SHIFT), prot);
mm/memory.c
3041
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3043
int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
mm/memory.c
3058
static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn,
mm/memory.c
3063
if (pfnmap_track(pfn, size, prot))
mm/memory.c
3068
pfnmap_untrack(pfn, size);
mm/memory.c
3072
ctx->pfn = pfn;
mm/memory.c
3082
pfnmap_untrack(ctx->pfn, ctx->size);
mm/memory.c
3087
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3106
ctx = pfnmap_track_ctx_alloc(pfn, size, &prot);
mm/memory.c
3109
} else if (pfnmap_setup_cachemode(pfn, size, &prot)) {
mm/memory.c
3113
err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
mm/memory.c
3124
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3126
return remap_pfn_range_track(vma, addr, pfn, size, prot);
mm/memory.c
3130
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3132
return remap_pfn_range_notrack(vma, addr, pfn, size, prot);
mm/memory.c
3141
const unsigned long pfn = action->remap.start_pfn;
mm/memory.c
3148
err = get_remap_pgoff(is_cow, start, end, desc->start, desc->end, pfn,
mm/memory.c
3158
unsigned long addr, unsigned long pfn,
mm/memory.c
3166
pfn, &vma->vm_pgoff);
mm/memory.c
3187
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/memory.c
3191
err = remap_pfn_range_prepare_vma(vma, addr, pfn, size);
mm/memory.c
3195
return do_remap_pfn_range(vma, addr, pfn, size, prot);
mm/memory.c
3203
const unsigned long pfn = action->remap.start_pfn;
mm/memory.c
3207
return do_remap_pfn_range(vma, start, pfn, size, prot);
mm/memory.c
3214
unsigned long pfn, pages;
mm/memory.c
3225
pfn = start_phys >> PAGE_SHIFT;
mm/memory.c
3227
if (pfn + pages < pfn)
mm/memory.c
3233
pfn += vm_pgoff;
mm/memory.c
3240
*pfnp = pfn;
mm/memory.c
3249
unsigned long pfn;
mm/memory.c
3253
start, size, &pfn);
mm/memory.c
3258
mmap_action_ioremap_full(desc, pfn);
mm/memory.c
3282
unsigned long pfn;
mm/memory.c
3285
err = __simple_ioremap_prep(vm_len, vma->vm_pgoff, start, len, &pfn);
mm/memory.c
3290
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
mm/memory.c
6829
args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
mm/memory.c
684
unsigned long addr, unsigned long pfn, bool special,
mm/memory.c
695
if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
mm/memory.c
7013
phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
mm/memory.c
7028
(phys_addr != (args.pfn << PAGE_SHIFT)) ||
mm/memory.c
709
if (!pfn_valid(pfn))
mm/memory.c
715
if (pfn == vma->vm_pgoff + off)
mm/memory.c
722
if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
mm/memory.c
726
if (unlikely(pfn > highest_memmap_pfn)) {
mm/memory.c
735
VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
mm/memory.c
736
return pfn_to_page(pfn);
mm/memory_hotplug.c
1014
return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
mm/memory_hotplug.c
1082
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
mm/memory_hotplug.c
1085
unsigned long end_pfn = pfn + nr_pages;
mm/memory_hotplug.c
1088
ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
mm/memory_hotplug.c
1092
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE,
mm/memory_hotplug.c
1096
struct page *page = pfn_to_page(pfn + i);
mm/memory_hotplug.c
1108
online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
mm/memory_hotplug.c
1113
void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
mm/memory_hotplug.c
1115
unsigned long end_pfn = pfn + nr_pages;
mm/memory_hotplug.c
1123
offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
mm/memory_hotplug.c
1129
remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
mm/memory_hotplug.c
1130
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
mm/memory_hotplug.c
1136
int online_pages(unsigned long pfn, unsigned long nr_pages,
mm/memory_hotplug.c
1140
.start_pfn = pfn,
mm/memory_hotplug.c
1158
if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
mm/memory_hotplug.c
1159
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
mm/memory_hotplug.c
1164
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE,
mm/memory_hotplug.c
1199
online_pages_range(pfn, nr_pages);
mm/memory_hotplug.c
1200
adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
mm/memory_hotplug.c
1215
undo_isolate_page_range(pfn, pfn + nr_pages);
mm/memory_hotplug.c
1242
(unsigned long long) pfn << PAGE_SHIFT,
mm/memory_hotplug.c
1243
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
mm/memory_hotplug.c
1247
remove_pfn_range_from_zone(zone, pfn, nr_pages);
mm/memory_hotplug.c
1745
unsigned long pfn;
mm/memory_hotplug.c
1747
for (pfn = start; pfn < end; pfn++) {
mm/memory_hotplug.c
1752
page = pfn_to_page(pfn);
mm/memory_hotplug.c
1781
pfn |= nr_pages - 1;
mm/memory_hotplug.c
1785
*movable_pfn = pfn;
mm/memory_hotplug.c
1792
unsigned long pfn;
mm/memory_hotplug.c
1797
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
mm/memory_hotplug.c
1800
page = pfn_to_page(pfn);
mm/memory_hotplug.c
1810
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
mm/memory_hotplug.c
1823
unmap_poisoned_folio(folio, pfn, false);
mm/memory_hotplug.c
1901
unsigned long pfn, managed_pages, system_ram_pages = 0;
mm/memory_hotplug.c
1995
pfn = start_pfn;
mm/memory_hotplug.c
2010
ret = scan_movable_pages(pfn, end_pfn, &pfn);
mm/memory_hotplug.c
2016
do_migrate_range(pfn, end_pfn);
mm/memory_hotplug.c
319
static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
mm/memory_hotplug.c
328
if (!IS_ALIGNED(pfn | nr_pages, PAGES_PER_SUBSECTION))
mm/memory_hotplug.c
338
struct page *pfn_to_online_page(unsigned long pfn)
mm/memory_hotplug.c
340
unsigned long nr = pfn_to_section_nr(pfn);
mm/memory_hotplug.c
355
if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
mm/memory_hotplug.c
358
if (!pfn_section_valid(ms, pfn))
mm/memory_hotplug.c
362
return pfn_to_page(pfn);
mm/memory_hotplug.c
370
pgmap = get_dev_pagemap(pfn);
mm/memory_hotplug.c
377
return pfn_to_page(pfn);
mm/memory_hotplug.c
381
int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
mm/memory_hotplug.c
384
const unsigned long end_pfn = pfn + nr_pages;
mm/memory_hotplug.c
392
VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
mm/memory_hotplug.c
398
if (altmap->base_pfn != pfn
mm/memory_hotplug.c
406
if (check_pfn_span(pfn, nr_pages)) {
mm/memory_hotplug.c
407
WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
mm/memory_hotplug.c
411
for (; pfn < end_pfn; pfn += cur_nr_pages) {
mm/memory_hotplug.c
413
cur_nr_pages = min(end_pfn - pfn,
mm/memory_hotplug.c
414
SECTION_ALIGN_UP(pfn + 1) - pfn);
mm/memory_hotplug.c
415
err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
mm/memory_hotplug.c
451
unsigned long pfn;
mm/memory_hotplug.c
454
pfn = end_pfn - 1;
mm/memory_hotplug.c
455
for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
mm/memory_hotplug.c
456
if (unlikely(!pfn_to_online_page(pfn)))
mm/memory_hotplug.c
459
if (unlikely(pfn_to_nid(pfn) != nid))
mm/memory_hotplug.c
462
if (zone != page_zone(pfn_to_page(pfn)))
mm/memory_hotplug.c
465
return pfn;
mm/memory_hotplug.c
474
unsigned long pfn;
mm/memory_hotplug.c
484
pfn = find_smallest_section_pfn(nid, zone, end_pfn,
mm/memory_hotplug.c
486
if (pfn) {
mm/memory_hotplug.c
487
zone->spanned_pages = zone_end_pfn(zone) - pfn;
mm/memory_hotplug.c
488
zone->zone_start_pfn = pfn;
mm/memory_hotplug.c
500
pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
mm/memory_hotplug.c
502
if (pfn)
mm/memory_hotplug.c
503
zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
mm/memory_hotplug.c
545
unsigned long pfn, cur_nr_pages;
mm/memory_hotplug.c
548
for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
mm/memory_hotplug.c
553
min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
mm/memory_hotplug.c
554
page_init_poison(pfn_to_page(pfn),
mm/memory_hotplug.c
585
void __remove_pages(unsigned long pfn, unsigned long nr_pages,
mm/memory_hotplug.c
588
const unsigned long end_pfn = pfn + nr_pages;
mm/memory_hotplug.c
591
if (check_pfn_span(pfn, nr_pages)) {
mm/memory_hotplug.c
592
WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
mm/memory_hotplug.c
596
for (; pfn < end_pfn; pfn += cur_nr_pages) {
mm/memory_hotplug.c
599
cur_nr_pages = min(end_pfn - pfn,
mm/memory_hotplug.c
600
SECTION_ALIGN_UP(pfn + 1) - pfn);
mm/memory_hotplug.c
601
sparse_remove_section(pfn, cur_nr_pages, altmap);
mm/memory_hotplug.c
653
unsigned long pfn;
mm/memory_hotplug.c
664
for (pfn = start_pfn; pfn < end_pfn;) {
mm/memory_hotplug.c
665
struct page *page = pfn_to_page(pfn);
mm/memory_hotplug.c
675
if (pfn)
mm/memory_hotplug.c
676
order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
mm/memory_hotplug.c
688
pfn += (1UL << order);
mm/memory_hotplug.c
719
static void section_taint_zone_device(unsigned long pfn)
mm/memory_hotplug.c
721
struct mem_section *ms = __pfn_to_section(pfn);
mm/memory_hotplug.c
726
static inline void section_taint_zone_device(unsigned long pfn)
mm/memory_hotplug.c
958
unsigned long pfn,
mm/memory_hotplug.c
984
pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
mm/memory_hotplug.c
985
end_pfn = pfn + group->d.unit_pages;
mm/memory_hotplug.c
986
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
mm/memory_hotplug.c
987
page = pfn_to_online_page(pfn);
mm/memremap.c
401
struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
mm/memremap.c
404
resource_size_t phys = PFN_PHYS(pfn);
mm/memremap.c
51
unsigned long pfn = PHYS_PFN(range->start);
mm/memremap.c
54
return pfn;
mm/memremap.c
55
return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
mm/memremap.c
58
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
mm/memremap.c
65
if (pfn >= PHYS_PFN(range->start) &&
mm/memremap.c
66
pfn <= PHYS_PFN(range->end))
mm/memremap.c
67
return pfn >= pfn_first(pgmap, i);
mm/migrate_device.c
1067
unsigned long pfn = pte_pfn(orig_pte);
mm/migrate_device.c
1069
if (!is_zero_pfn(pfn))
mm/migrate_device.c
1362
static unsigned long migrate_device_pfn_lock(unsigned long pfn)
mm/migrate_device.c
1366
folio = folio_get_nontail_page(pfn_to_page(pfn));
mm/migrate_device.c
1375
return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mm/migrate_device.c
1400
unsigned long i, j, pfn;
mm/migrate_device.c
1402
for (pfn = start, i = 0; i < npages; pfn++, i++) {
mm/migrate_device.c
1403
struct page *page = pfn_to_page(pfn);
mm/migrate_device.c
1407
src_pfns[i] = migrate_device_pfn_lock(pfn);
mm/migrate_device.c
1414
pfn += j - 1;
mm/migrate_device.c
210
unsigned long pfn = page_to_pfn(folio_page(folio, 0));
mm/migrate_device.c
212
migrate->src[migrate->npages] = migrate_pfn(pfn) | write
mm/migrate_device.c
279
unsigned long mpfn = 0, pfn;
mm/migrate_device.c
336
pfn = pte_pfn(pte);
mm/migrate_device.c
337
if (is_zero_pfn(pfn) &&
mm/migrate_device.c
373
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mm/migrate_device.c
913
unsigned long pfn;
mm/migrate_device.c
928
pfn = migrate->src[idx] >> MIGRATE_PFN_SHIFT;
mm/migrate_device.c
930
migrate->src[i+idx] = migrate_pfn(pfn + i) | flags;
mm/mm_init.c
1015
static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
mm/mm_init.c
1020
__init_single_page(page, pfn, zone_idx, nid);
mm/mm_init.c
1049
if (pageblock_aligned(pfn)) {
mm/mm_init.c
1099
unsigned long pfn, end_pfn = head_pfn + nr_pages;
mm/mm_init.c
1109
for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
mm/mm_init.c
1110
struct page *page = pfn_to_page(pfn);
mm/mm_init.c
1112
__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
mm/mm_init.c
1124
unsigned long pfn, end_pfn = start_pfn + nr_pages;
mm/mm_init.c
1145
for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
mm/mm_init.c
1146
struct page *page = pfn_to_page(pfn);
mm/mm_init.c
1148
__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
mm/mm_init.c
1153
memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
mm/mm_init.c
1997
static void __init deferred_free_pages(unsigned long pfn,
mm/mm_init.c
2006
page = pfn_to_page(pfn);
mm/mm_init.c
2009
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
mm/mm_init.c
2018
accept_memory(PFN_PHYS(pfn), nr_pages * PAGE_SIZE);
mm/mm_init.c
2020
for (i = 0; i < nr_pages; i++, page++, pfn++) {
mm/mm_init.c
2021
if (pageblock_aligned(pfn))
mm/mm_init.c
2044
unsigned long pfn, unsigned long end_pfn)
mm/mm_init.c
2047
unsigned long nr_pages = end_pfn - pfn;
mm/mm_init.c
2049
struct page *page = pfn_to_page(pfn);
mm/mm_init.c
2051
for (; pfn < end_pfn; pfn++, page++)
mm/mm_init.c
2052
__init_single_page(page, pfn, zid, nid);
mm/mm_init.c
2492
void __init memblock_free_pages(unsigned long pfn, unsigned int order)
mm/mm_init.c
2494
struct page *page = pfn_to_page(pfn);
mm/mm_init.c
2497
int nid = early_pfn_to_nid(pfn);
mm/mm_init.c
2499
if (!early_page_initialised(pfn, nid))
mm/mm_init.c
592
void __meminit __init_single_page(struct page *page, unsigned long pfn,
mm/mm_init.c
596
set_page_links(page, zone, nid, pfn);
mm/mm_init.c
606
set_page_address(page, __va(pfn << PAGE_SHIFT));
mm/mm_init.c
627
static int __meminit __early_pfn_to_nid(unsigned long pfn,
mm/mm_init.c
633
if (state->last_start <= pfn && pfn < state->last_end)
mm/mm_init.c
636
nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
mm/mm_init.c
646
int __meminit early_pfn_to_nid(unsigned long pfn)
mm/mm_init.c
652
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
mm/mm_init.c
680
void __meminit __init_page_from_nid(unsigned long pfn, int nid)
mm/mm_init.c
690
if (zone_spans_pfn(zone, pfn))
mm/mm_init.c
693
__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
mm/mm_init.c
695
if (pageblock_aligned(pfn))
mm/mm_init.c
696
init_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE,
mm/mm_init.c
707
static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
mm/mm_init.c
709
if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
mm/mm_init.c
720
defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
mm/mm_init.c
749
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
mm/mm_init.c
750
NODE_DATA(nid)->first_deferred_pfn = pfn;
mm/mm_init.c
756
static void __meminit __init_deferred_page(unsigned long pfn, int nid)
mm/mm_init.c
758
if (early_page_initialised(pfn, nid))
mm/mm_init.c
761
__init_page_from_nid(pfn, nid);
mm/mm_init.c
766
static inline bool early_page_initialised(unsigned long pfn, int nid)
mm/mm_init.c
771
static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
mm/mm_init.c
776
static inline void __init_deferred_page(unsigned long pfn, int nid)
mm/mm_init.c
781
void __meminit init_deferred_page(unsigned long pfn, int nid)
mm/mm_init.c
783
__init_deferred_page(pfn, nid);
mm/mm_init.c
795
unsigned long pfn;
mm/mm_init.c
797
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
mm/mm_init.c
798
struct page *page = pfn_to_page(pfn);
mm/mm_init.c
800
__init_deferred_page(pfn, nid);
mm/mm_init.c
813
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
mm/mm_init.c
818
if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
mm/mm_init.c
820
if (*pfn < memblock_region_memory_end_pfn(r))
mm/mm_init.c
824
if (*pfn >= memblock_region_memory_base_pfn(r) &&
mm/mm_init.c
826
*pfn = memblock_region_memory_end_pfn(r);
mm/mm_init.c
860
unsigned long pfn;
mm/mm_init.c
863
for_each_valid_pfn(pfn, spfn, epfn) {
mm/mm_init.c
864
__init_single_page(pfn_to_page(pfn), pfn, zone, node);
mm/mm_init.c
865
__SetPageReserved(pfn_to_page(pfn));
mm/mm_init.c
889
unsigned long pfn, end_pfn = start_pfn + size;
mm/mm_init.c
913
for (pfn = start_pfn; pfn < end_pfn; ) {
mm/mm_init.c
919
if (overlap_memmap_init(zone, &pfn))
mm/mm_init.c
921
if (defer_init(nid, pfn, zone_end_pfn)) {
mm/mm_init.c
927
page = pfn_to_page(pfn);
mm/mm_init.c
928
__init_single_page(page, pfn, zone, nid);
mm/mm_init.c
943
if (pageblock_aligned(pfn)) {
mm/mm_init.c
948
pfn++;
mm/nommu.c
1591
unsigned long pfn, unsigned long size, pgprot_t prot)
mm/nommu.c
1593
if (addr != (pfn << PAGE_SHIFT))
mm/nommu.c
1603
unsigned long pfn = start >> PAGE_SHIFT;
mm/nommu.c
1606
pfn += vma->vm_pgoff;
mm/nommu.c
1607
return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
mm/page_alloc.c
1009
combined_pfn = buddy_pfn & pfn;
mm/page_alloc.c
1010
page = page + (combined_pfn - pfn);
mm/page_alloc.c
1011
pfn = combined_pfn;
mm/page_alloc.c
1023
to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
mm/page_alloc.c
1490
unsigned long pfn;
mm/page_alloc.c
1494
pfn = page_to_pfn(page);
mm/page_alloc.c
1495
mt = get_pfnblock_migratetype(page, pfn);
mm/page_alloc.c
1502
__free_one_page(page, pfn, zone, order, mt, FPI_NONE);
mm/page_alloc.c
1512
unsigned long pfn, int order, fpi_t fpi)
mm/page_alloc.c
1514
unsigned long end = pfn + (1 << order);
mm/page_alloc.c
1516
VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order));
mm/page_alloc.c
1524
int mt = get_pfnblock_migratetype(page, pfn);
mm/page_alloc.c
1526
__free_one_page(page, pfn, zone, order, mt, fpi);
mm/page_alloc.c
1527
pfn += 1 << order;
mm/page_alloc.c
1528
if (pfn == end)
mm/page_alloc.c
1530
page = pfn_to_page(pfn);
mm/page_alloc.c
1544
unsigned long pfn, unsigned int order,
mm/page_alloc.c
1573
split_large_buddy(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
1582
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
1586
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
1947
unsigned long pfn, end_pfn;
mm/page_alloc.c
1954
for (pfn = start_pfn; pfn < end_pfn;) {
mm/page_alloc.c
1955
page = pfn_to_page(pfn);
mm/page_alloc.c
1957
pfn++;
mm/page_alloc.c
1969
pfn += 1 << order;
mm/page_alloc.c
1980
unsigned long pfn, start, end;
mm/page_alloc.c
1982
pfn = page_to_pfn(page);
mm/page_alloc.c
1983
start = pageblock_start_pfn(pfn);
mm/page_alloc.c
1984
end = pageblock_end_pfn(pfn);
mm/page_alloc.c
2003
for (pfn = start; pfn < end;) {
mm/page_alloc.c
2004
page = pfn_to_page(pfn);
mm/page_alloc.c
2009
pfn += nr;
mm/page_alloc.c
2019
pfn++;
mm/page_alloc.c
2054
unsigned long pfn = start_pfn;
mm/page_alloc.c
2056
while (!PageBuddy(page = pfn_to_page(pfn))) {
mm/page_alloc.c
2060
pfn &= ~0UL << order;
mm/page_alloc.c
2066
if (pfn + (1 << buddy_order(page)) > start_pfn)
mm/page_alloc.c
2067
return pfn;
mm/page_alloc.c
2937
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
2956
migratetype = get_pfnblock_migratetype(page, pfn);
mm/page_alloc.c
2959
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
2977
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
3003
unsigned long pfn = folio_pfn(folio);
mm/page_alloc.c
3014
pfn, order, FPI_NONE);
mm/page_alloc.c
3027
unsigned long pfn = folio_pfn(folio);
mm/page_alloc.c
3032
migratetype = get_pfnblock_migratetype(&folio->page, pfn);
mm/page_alloc.c
3048
free_one_page(zone, &folio->page, pfn,
mm/page_alloc.c
3059
free_one_page(zone, &folio->page, pfn,
mm/page_alloc.c
330
unsigned long pfn)
mm/page_alloc.c
333
return section_to_usemap(__pfn_to_section(pfn));
mm/page_alloc.c
339
static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
mm/page_alloc.c
342
pfn &= (PAGES_PER_SECTION-1);
mm/page_alloc.c
344
pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
mm/page_alloc.c
346
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
mm/page_alloc.c
355
get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn,
mm/page_alloc.c
367
VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
mm/page_alloc.c
369
bitmap = get_pageblock_bitmap(page, pfn);
mm/page_alloc.c
370
*bitidx = pfn_to_bitidx(page, pfn);
mm/page_alloc.c
387
unsigned long pfn,
mm/page_alloc.c
394
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mm/page_alloc.c
412
bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
mm/page_alloc.c
421
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mm/page_alloc.c
437
get_pfnblock_migratetype(const struct page *page, unsigned long pfn)
mm/page_alloc.c
442
flags = __get_pfnblock_flags_mask(page, pfn, mask);
mm/page_alloc.c
459
static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn,
mm/page_alloc.c
466
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mm/page_alloc.c
482
void set_pfnblock_bit(const struct page *page, unsigned long pfn,
mm/page_alloc.c
491
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mm/page_alloc.c
502
void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
mm/page_alloc.c
511
get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx);
mm/page_alloc.c
574
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
581
ret = !zone_spans_pfn(zone, pfn);
mm/page_alloc.c
586
pfn, zone_to_nid(zone), zone->name,
mm/page_alloc.c
6825
unsigned long pfn = start;
mm/page_alloc.c
6836
while (pfn < end || !list_empty(&cc->migratepages)) {
mm/page_alloc.c
6844
ret = isolate_migratepages_range(cc, pfn, end);
mm/page_alloc.c
6847
pfn = cc->migrate_pfn;
mm/page_alloc.c
6939
static void __free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
mm/page_alloc.c
6941
for (; nr_pages--; pfn++)
mm/page_alloc.c
6942
free_frozen_pages(pfn_to_page(pfn), 0);
mm/page_alloc.c
7227
unsigned long ret, pfn, flags;
mm/page_alloc.c
7240
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
mm/page_alloc.c
7241
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
mm/page_alloc.c
7242
if (pfn_range_valid_contig(zone, pfn, nr_pages,
mm/page_alloc.c
7253
ret = alloc_contig_frozen_range_noprof(pfn,
mm/page_alloc.c
7254
pfn + nr_pages,
mm/page_alloc.c
7258
return pfn_to_page(pfn);
mm/page_alloc.c
7261
pfn += nr_pages;
mm/page_alloc.c
7320
void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages)
mm/page_alloc.c
7322
struct page *first_page = pfn_to_page(pfn);
mm/page_alloc.c
7334
__free_contig_frozen_range(pfn, nr_pages);
mm/page_alloc.c
7345
void free_contig_range(unsigned long pfn, unsigned long nr_pages)
mm/page_alloc.c
7347
if (WARN_ON_ONCE(PageHead(pfn_to_page(pfn))))
mm/page_alloc.c
7350
for (; nr_pages--; pfn++)
mm/page_alloc.c
7351
__free_page(pfn_to_page(pfn));
mm/page_alloc.c
7410
unsigned long pfn = start_pfn;
mm/page_alloc.c
7415
offline_mem_sections(pfn, end_pfn);
mm/page_alloc.c
7416
zone = page_zone(pfn_to_page(pfn));
mm/page_alloc.c
7418
while (pfn < end_pfn) {
mm/page_alloc.c
7419
page = pfn_to_page(pfn);
mm/page_alloc.c
7425
pfn++;
mm/page_alloc.c
7436
pfn++;
mm/page_alloc.c
7445
pfn += (1 << order);
mm/page_alloc.c
7458
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
7462
const struct page *head = page - (pfn & ((1 << order) - 1));
mm/page_alloc.c
7518
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
7525
struct page *page_head = page - (pfn & ((1 << order) - 1));
mm/page_alloc.c
7559
unsigned long pfn = page_to_pfn(page);
mm/page_alloc.c
7560
int migratetype = get_pfnblock_migratetype(page, pfn);
mm/page_alloc.c
7563
__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
mm/page_alloc.c
893
buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
mm/page_alloc.c
902
higher_page_pfn = buddy_pfn & pfn;
mm/page_alloc.c
903
higher_page = page + (higher_page_pfn - pfn);
mm/page_alloc.c
945
unsigned long pfn,
mm/page_alloc.c
959
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
mm/page_alloc.c
972
buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
mm/page_ext.c
174
unsigned long pfn = page_to_pfn(page);
mm/page_ext.c
188
index = pfn - round_down(node_start_pfn(page_to_nid(page)),
mm/page_ext.c
254
unsigned long pfn = page_to_pfn(page);
mm/page_ext.c
255
struct mem_section *section = __pfn_to_section(pfn);
mm/page_ext.c
267
return get_entry(page_ext, pfn);
mm/page_ext.c
287
static int __meminit init_section_page_ext(unsigned long pfn, int nid)
mm/page_ext.c
293
section = __pfn_to_section(pfn);
mm/page_ext.c
317
pfn &= PAGE_SECTION_MASK;
mm/page_ext.c
318
section->page_ext = (void *)base - page_ext_size * pfn;
mm/page_ext.c
341
static void __free_page_ext(unsigned long pfn)
mm/page_ext.c
346
ms = __pfn_to_section(pfn);
mm/page_ext.c
359
base = get_entry(base, pfn);
mm/page_ext.c
363
static void __invalidate_page_ext(unsigned long pfn)
mm/page_ext.c
368
ms = __pfn_to_section(pfn);
mm/page_ext.c
379
unsigned long start, end, pfn;
mm/page_ext.c
385
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
mm/page_ext.c
386
fail = init_section_page_ext(pfn, nid);
mm/page_ext.c
391
end = pfn - PAGES_PER_SECTION;
mm/page_ext.c
392
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
mm/page_ext.c
393
__free_page_ext(pfn);
mm/page_ext.c
401
unsigned long start, end, pfn;
mm/page_ext.c
415
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
mm/page_ext.c
416
__invalidate_page_ext(pfn);
mm/page_ext.c
420
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
mm/page_ext.c
421
__free_page_ext(pfn);
mm/page_ext.c
454
unsigned long pfn;
mm/page_ext.c
470
for (pfn = start_pfn; pfn < end_pfn;
mm/page_ext.c
471
pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
mm/page_ext.c
473
if (!pfn_valid(pfn))
mm/page_ext.c
481
if (pfn_to_nid(pfn) != nid)
mm/page_ext.c
483
if (init_section_page_ext(pfn, nid))
mm/page_ext.c
511
struct page_ext *page_ext_lookup(unsigned long pfn)
mm/page_ext.c
513
return lookup_page_ext(pfn_to_page(pfn));
mm/page_idle.c
124
unsigned long pfn, end_pfn;
mm/page_idle.c
130
pfn = pos * BITS_PER_BYTE;
mm/page_idle.c
131
if (pfn >= max_pfn)
mm/page_idle.c
134
end_pfn = pfn + count * BITS_PER_BYTE;
mm/page_idle.c
138
for (; pfn < end_pfn; pfn++) {
mm/page_idle.c
139
bit = pfn % BITMAP_CHUNK_BITS;
mm/page_idle.c
142
folio = page_idle_get_folio(pfn);
mm/page_idle.c
169
unsigned long pfn, end_pfn;
mm/page_idle.c
175
pfn = pos * BITS_PER_BYTE;
mm/page_idle.c
176
if (pfn >= max_pfn)
mm/page_idle.c
179
end_pfn = pfn + count * BITS_PER_BYTE;
mm/page_idle.c
183
for (; pfn < end_pfn; pfn++) {
mm/page_idle.c
184
bit = pfn % BITMAP_CHUNK_BITS;
mm/page_idle.c
186
folio = page_idle_get_folio(pfn);
mm/page_idle.c
34
static struct folio *page_idle_get_folio(unsigned long pfn)
mm/page_idle.c
36
struct page *page = pfn_to_online_page(pfn);
mm/page_isolation.c
287
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
mm/page_isolation.c
294
page = pfn_to_online_page(pfn + i);
mm/page_isolation.c
330
unsigned long pfn;
mm/page_isolation.c
382
for (pfn = start_pfn; pfn < boundary_pfn;) {
mm/page_isolation.c
383
struct page *page = __first_valid_page(pfn, boundary_pfn - pfn);
mm/page_isolation.c
386
pfn = page_to_pfn(page);
mm/page_isolation.c
389
pfn += MAX_ORDER_NR_PAGES;
mm/page_isolation.c
397
VM_WARN_ON_ONCE(pfn + (1 << order) > boundary_pfn);
mm/page_isolation.c
399
pfn += 1UL << order;
mm/page_isolation.c
423
pfn = head_pfn + nr_pages;
mm/page_isolation.c
440
pfn++;
mm/page_isolation.c
489
unsigned long pfn;
mm/page_isolation.c
514
for (pfn = isolate_start + pageblock_nr_pages;
mm/page_isolation.c
515
pfn < isolate_end - pageblock_nr_pages;
mm/page_isolation.c
516
pfn += pageblock_nr_pages) {
mm/page_isolation.c
517
page = __first_valid_page(pfn, pageblock_nr_pages);
mm/page_isolation.c
520
undo_isolate_page_range(isolate_start, pfn);
mm/page_isolation.c
538
unsigned long pfn;
mm/page_isolation.c
543
for (pfn = isolate_start;
mm/page_isolation.c
544
pfn < isolate_end;
mm/page_isolation.c
545
pfn += pageblock_nr_pages) {
mm/page_isolation.c
546
page = __first_valid_page(pfn, pageblock_nr_pages);
mm/page_isolation.c
560
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
mm/page_isolation.c
565
while (pfn < end_pfn) {
mm/page_isolation.c
566
page = pfn_to_page(pfn);
mm/page_isolation.c
573
pfn += 1 << buddy_order(page);
mm/page_isolation.c
577
pfn++;
mm/page_isolation.c
585
pfn++;
mm/page_isolation.c
590
return pfn;
mm/page_isolation.c
611
unsigned long pfn, flags;
mm/page_isolation.c
631
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
mm/page_isolation.c
632
page = __first_valid_page(pfn, pageblock_nr_pages);
mm/page_isolation.c
637
if ((pfn < end_pfn) || !page) {
mm/page_isolation.c
645
pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, mode);
mm/page_isolation.c
648
ret = pfn < end_pfn ? -EBUSY : 0;
mm/page_isolation.c
651
trace_test_pages_isolated(start_pfn, end_pfn, pfn);
mm/page_owner.c
431
unsigned long pfn, block_end_pfn;
mm/page_owner.c
438
pfn = zone->zone_start_pfn;
mm/page_owner.c
445
for (; pfn < end_pfn; ) {
mm/page_owner.c
446
page = pfn_to_online_page(pfn);
mm/page_owner.c
448
pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
mm/page_owner.c
452
block_end_pfn = pageblock_end_pfn(pfn);
mm/page_owner.c
457
for (; pfn < block_end_pfn; pfn++) {
mm/page_owner.c
459
page = pfn_to_page(pfn);
mm/page_owner.c
469
pfn += (1UL << freepage_order) - 1;
mm/page_owner.c
491
pfn = block_end_pfn;
mm/page_owner.c
495
pfn += (1UL << page_owner->order) - 1;
mm/page_owner.c
548
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
mm/page_owner.c
572
pfn,
mm/page_owner.c
574
pfn >> pageblock_order,
mm/page_owner.c
662
unsigned long pfn;
mm/page_owner.c
673
pfn = min_low_pfn;
mm/page_owner.c
675
pfn = *ppos;
mm/page_owner.c
677
while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
mm/page_owner.c
678
pfn++;
mm/page_owner.c
681
for (; pfn < max_pfn; pfn++) {
mm/page_owner.c
694
if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
mm/page_owner.c
695
pfn += MAX_ORDER_NR_PAGES - 1;
mm/page_owner.c
699
page = pfn_to_page(pfn);
mm/page_owner.c
704
pfn += (1UL << freepage_order) - 1;
mm/page_owner.c
732
if (!IS_ALIGNED(pfn, 1 << page_owner->order))
mm/page_owner.c
744
*ppos = pfn + 1;
mm/page_owner.c
748
return print_page_owner(buf, count, pfn, page,
mm/page_owner.c
774
unsigned long pfn = zone->zone_start_pfn;
mm/page_owner.c
783
for (; pfn < end_pfn; ) {
mm/page_owner.c
786
if (!pfn_valid(pfn)) {
mm/page_owner.c
787
pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
mm/page_owner.c
791
block_end_pfn = pageblock_end_pfn(pfn);
mm/page_owner.c
794
for (; pfn < block_end_pfn; pfn++) {
mm/page_owner.c
795
struct page *page = pfn_to_page(pfn);
mm/page_owner.c
812
pfn += (1UL << order) - 1;
mm/page_table_check.c
105
if (!pfn_valid(pfn))
mm/page_table_check.c
108
page = pfn_to_page(pfn);
mm/page_table_check.c
63
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
mm/page_table_check.c
70
if (!pfn_valid(pfn))
mm/page_table_check.c
73
page = pfn_to_page(pfn);
mm/page_table_check.c
97
static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
mm/page_vma_mapped.c
109
unsigned long pfn;
mm/page_vma_mapped.c
118
pfn = softleaf_to_pfn(entry);
mm/page_vma_mapped.c
120
pfn = pte_pfn(ptent);
mm/page_vma_mapped.c
129
pfn = softleaf_to_pfn(entry);
mm/page_vma_mapped.c
132
if ((pfn + pte_nr - 1) < pvmw->pfn)
mm/page_vma_mapped.c
134
if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
mm/page_vma_mapped.c
140
static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
mm/page_vma_mapped.c
142
if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
mm/page_vma_mapped.c
144
if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
mm/page_vma_mapped.c
346
.pfn = page_to_pfn(page),
mm/rmap.c
1227
unsigned long pfn;
mm/rmap.c
1236
.pfn = state->pfn,
mm/rmap.c
1274
unsigned long pfn, unsigned long nr_pages)
mm/rmap.c
1279
.pfn = pfn,
mm/rmap.c
1310
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
mm/rmap.c
1314
.pfn = pfn,
mm/rmap.c
1995
unsigned long pfn;
mm/rmap.c
2102
pfn = pte_pfn(pteval);
mm/rmap.c
2106
pfn = softleaf_to_pfn(entry);
mm/rmap.c
2110
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/rmap.c
2423
unsigned long pfn;
mm/rmap.c
2462
__maybe_unused unsigned long pfn;
mm/rmap.c
2481
pfn = pmd_pfn(pmdval);
mm/rmap.c
2483
pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval));
mm/rmap.c
2485
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/rmap.c
2508
pfn = pte_pfn(pteval);
mm/rmap.c
2512
pfn = softleaf_to_pfn(entry);
mm/rmap.c
2516
subpage = folio_page(folio, pfn - folio_pfn(folio));
mm/rmap.c
2574
flush_cache_page(vma, address, pfn);
mm/shuffle.c
37
unsigned long pfn, int order)
mm/shuffle.c
39
struct page *page = pfn_to_online_page(pfn);
mm/sparse-vmemmap.c
119
unsigned long pfn, nr_pfns, nr_align;
mm/sparse-vmemmap.c
127
pfn = vmem_altmap_next_pfn(altmap);
mm/sparse-vmemmap.c
130
nr_align = ALIGN(pfn, nr_align) - pfn;
mm/sparse-vmemmap.c
136
pfn += nr_align;
mm/sparse-vmemmap.c
139
__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
mm/sparse-vmemmap.c
140
return __va(__pfn_to_phys(pfn));
mm/sparse-vmemmap.c
146
unsigned long pfn = pte_pfn(ptep_get(pte));
mm/sparse-vmemmap.c
147
int actual_node = early_pfn_to_nid(pfn);
mm/sparse-vmemmap.c
547
struct page * __meminit __populate_section_memmap(unsigned long pfn,
mm/sparse-vmemmap.c
551
unsigned long start = (unsigned long) pfn_to_page(pfn);
mm/sparse-vmemmap.c
555
if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
mm/sparse-vmemmap.c
560
r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
mm/sparse-vmemmap.c
567
return pfn_to_page(pfn);
mm/sparse-vmemmap.c
595
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
mm/sparse-vmemmap.c
598
int idx = subsection_map_index(pfn);
mm/sparse-vmemmap.c
599
int end = subsection_map_index(pfn + nr_pages - 1);
mm/sparse-vmemmap.c
604
void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
mm/sparse-vmemmap.c
606
int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
mm/sparse-vmemmap.c
607
unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
mm/sparse-vmemmap.c
614
- (pfn & ~PAGE_SECTION_MASK));
mm/sparse-vmemmap.c
616
subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
mm/sparse-vmemmap.c
619
pfns, subsection_map_index(pfn),
mm/sparse-vmemmap.c
620
subsection_map_index(pfn + pfns - 1));
mm/sparse-vmemmap.c
622
pfn += pfns;
mm/sparse-vmemmap.c
632
unsigned long pfn;
mm/sparse-vmemmap.c
634
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
mm/sparse-vmemmap.c
635
unsigned long section_nr = pfn_to_section_nr(pfn);
mm/sparse-vmemmap.c
645
unsigned long pfn;
mm/sparse-vmemmap.c
647
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
mm/sparse-vmemmap.c
648
unsigned long section_nr = pfn_to_section_nr(pfn);
mm/sparse-vmemmap.c
655
static struct page * __meminit populate_section_memmap(unsigned long pfn,
mm/sparse-vmemmap.c
659
return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
mm/sparse-vmemmap.c
662
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
mm/sparse-vmemmap.c
665
unsigned long start = (unsigned long) pfn_to_page(pfn);
mm/sparse-vmemmap.c
678
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
mm/sparse-vmemmap.c
682
struct mem_section *ms = __pfn_to_section(pfn);
mm/sparse-vmemmap.c
686
subsection_mask_set(map, pfn, nr_pages);
mm/sparse-vmemmap.c
692
pfn, nr_pages))
mm/sparse-vmemmap.c
705
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
mm/sparse-vmemmap.c
707
struct mem_section *ms = __pfn_to_section(pfn);
mm/sparse-vmemmap.c
712
subsection_mask_set(map, pfn, nr_pages);
mm/sparse-vmemmap.c
739
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
mm/sparse-vmemmap.c
742
struct mem_section *ms = __pfn_to_section(pfn);
mm/sparse-vmemmap.c
747
if (clear_subsection_map(pfn, nr_pages))
mm/sparse-vmemmap.c
770
memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
mm/sparse-vmemmap.c
779
depopulate_section_memmap(pfn, nr_pages, altmap);
mm/sparse-vmemmap.c
790
static struct page * __meminit section_activate(int nid, unsigned long pfn,
mm/sparse-vmemmap.c
794
struct mem_section *ms = __pfn_to_section(pfn);
mm/sparse-vmemmap.c
806
rc = fill_subsection_map(pfn, nr_pages);
mm/sparse-vmemmap.c
822
return pfn_to_page(pfn);
mm/sparse-vmemmap.c
824
memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
mm/sparse-vmemmap.c
826
section_deactivate(pfn, nr_pages, altmap);
mm/sparse-vmemmap.c
887
void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
mm/sparse-vmemmap.c
890
struct mem_section *ms = __pfn_to_section(pfn);
mm/sparse-vmemmap.c
895
section_deactivate(pfn, nr_pages, altmap);
mm/sparse.c
173
unsigned long pfn;
mm/sparse.c
177
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
mm/sparse.c
178
unsigned long section_nr = pfn_to_section_nr(pfn);
mm/sparse.c
239
struct page __init *__populate_section_memmap(unsigned long pfn,
mm/sparse.c
370
unsigned long pfn = section_nr_to_pfn(pnum);
mm/sparse.c
377
map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
mm/swap_state.c
145
unsigned long pfn = folio_pfn(folio);
mm/swap_state.c
156
__swap_table_set(ci, ci_off, pfn_to_swp_tb(pfn, __swp_tb_get_count(old_tb)));
mm/swap_state.c
325
unsigned long pfn = folio_pfn(new);
mm/swap_state.c
336
__swap_table_set(ci, ci_off, pfn_to_swp_tb(pfn, __swp_tb_get_count(old_tb)));
mm/swap_table.h
105
static inline unsigned long pfn_to_swp_tb(unsigned long pfn, unsigned int count)
mm/swap_table.h
113
swp_tb = (pfn << SWP_TB_PFN_MARK_BITS) | SWP_TB_PFN_MARK;
mm/util.c
1332
ps->pfn = page_to_pfn(page);
mm/util.c
1583
unsigned long pfn;
mm/util.c
1591
for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION);
mm/util.c
1592
pfn < end_pfn; pfn += PAGES_PER_SECTION)
mm/util.c
1593
if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn)))
mm/vmalloc.c
106
pfn = phys_addr >> PAGE_SHIFT;
mm/vmalloc.c
115
if (pfn_valid(pfn)) {
mm/vmalloc.c
116
page = pfn_to_page(pfn);
mm/vmalloc.c
123
size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
mm/vmalloc.c
125
pte_t entry = pfn_pte(pfn, prot);
mm/vmalloc.c
129
pfn += PFN_DOWN(size);
mm/vmalloc.c
133
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
mm/vmalloc.c
134
pfn++;
mm/vmalloc.c
3575
unsigned long pfn = data->pfns[data->idx];
mm/vmalloc.c
3578
if (WARN_ON_ONCE(pfn_valid(pfn)))
mm/vmalloc.c
3581
ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
mm/vmalloc.c
99
u64 pfn;
mm/vmscan.c
3390
unsigned long pfn = pte_pfn(pte);
mm/vmscan.c
3394
if (!pte_present(pte) || is_zero_pfn(pfn))
mm/vmscan.c
3403
if (WARN_ON_ONCE(!pfn_valid(pfn)))
mm/vmscan.c
3406
if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
mm/vmscan.c
3409
return pfn;
mm/vmscan.c
3415
unsigned long pfn = pmd_pfn(pmd);
mm/vmscan.c
3425
if (WARN_ON_ONCE(!pfn_valid(pfn)))
mm/vmscan.c
3428
if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
mm/vmscan.c
3431
return pfn;
mm/vmscan.c
3434
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
mm/vmscan.c
3437
struct folio *folio = pfn_folio(pfn);
mm/vmscan.c
3519
unsigned long pfn;
mm/vmscan.c
3528
pfn = get_pte_pfn(ptent, args->vma, addr, pgdat);
mm/vmscan.c
3529
if (pfn == -1)
mm/vmscan.c
3532
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmscan.c
3612
unsigned long pfn;
mm/vmscan.c
3628
pfn = get_pmd_pfn(pmd[i], vma, addr, pgdat);
mm/vmscan.c
3629
if (pfn == -1)
mm/vmscan.c
3632
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmscan.c
3698
unsigned long pfn = get_pmd_pfn(val, vma, addr, pgdat);
mm/vmscan.c
3702
if (pfn != -1)
mm/vmscan.c
4215
struct folio *folio = pfn_folio(pvmw->pfn);
mm/vmscan.c
4262
unsigned long pfn;
mm/vmscan.c
4266
pfn = get_pte_pfn(ptent, vma, addr, pgdat);
mm/vmscan.c
4267
if (pfn == -1)
mm/vmscan.c
4270
folio = get_pfn_folio(pfn, memcg, pgdat);
mm/vmstat.c
1636
unsigned long pfn;
mm/vmstat.c
1641
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
mm/vmstat.c
1644
page = pfn_to_online_page(pfn);
mm/zpdesc.h
150
static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
mm/zpdesc.h
152
return page_zpdesc(pfn_to_page(pfn));
sound/soc/intel/catpt/pcm.c
173
u32 pfn, offset;
sound/soc/intel/catpt/pcm.c
176
pfn = PFN_DOWN(snd_sgbuf_get_addr(databuf, i * PAGE_SIZE));
sound/soc/intel/catpt/pcm.c
182
*page_table |= (pfn << 4);
sound/soc/intel/catpt/pcm.c
184
*page_table |= pfn;
sound/soc/sof/sof-utils.c
45
u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
sound/soc/sof/sof-utils.c
65
put_unaligned_le32((pg_table[0] & 0xf) | pfn << 4,
sound/soc/sof/sof-utils.c
68
put_unaligned_le32(pfn, pg_table);
tools/include/linux/mm.h
21
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
tools/include/linux/mm.h
45
static inline int early_pfn_to_nid(unsigned long pfn)
tools/mm/page-types.c
1027
pfn = pagemap_pfn(buf[i]);
tools/mm/page-types.c
1028
if (!pfn)
tools/mm/page-types.c
1030
if (!kpageflags_read(&flags, pfn, 1))
tools/mm/page-types.c
1032
if (!kpagecgroup_read(&cgroup, pfn, 1))
tools/mm/page-types.c
1034
if (!kpagecount_read(&mapcnt, pfn, 1))
tools/mm/page-types.c
1040
add_page(off / page_size + i, pfn,
tools/mm/page-types.c
310
unsigned long pfn;
tools/mm/page-types.c
313
pfn = PM_PFRAME(val);
tools/mm/page-types.c
315
pfn = 0;
tools/mm/page-types.c
317
return pfn;
tools/mm/page-types.c
734
unsigned long pfn;
tools/mm/page-types.c
744
pfn = pagemap_pfn(buf[i]);
tools/mm/page-types.c
745
if (pfn)
tools/mm/page-types.c
746
walk_pfn(index + i, pfn, 1, buf[i]);
tools/mm/page-types.c
979
unsigned long nr_pages, pfn, i;
tools/testing/memblock/internal.h
18
void memblock_free_pages(unsigned long pfn, unsigned int order)
tools/testing/memblock/linux/mmzone.h
26
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
tools/testing/memblock/linux/mmzone.h
27
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
tools/testing/nvdimm/pmem-dax.c
13
unsigned long *pfn)
tools/testing/nvdimm/pmem-dax.c
31
if (pfn)
tools/testing/nvdimm/pmem-dax.c
32
*pfn = page_to_pfn(page);
tools/testing/nvdimm/pmem-dax.c
41
if (pfn)
tools/testing/nvdimm/pmem-dax.c
42
*pfn = PHYS_PFN(pmem->phys_addr + offset);
tools/testing/scatterlist/linux/mm.h
53
#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
tools/testing/scatterlist/main.c
11
unsigned *pfn;
tools/testing/scatterlist/main.c
42
printf(" %x", test->pfn[i]);
tools/testing/scatterlist/main.c
56
{ -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
tools/testing/scatterlist/main.c
57
{ 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
tools/testing/scatterlist/main.c
58
{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
59
{ 0, 1, pfn(0), NULL, 1, sgmax, 1 },
tools/testing/scatterlist/main.c
60
{ 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
61
{ 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
62
{ 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
63
{ 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
64
{ 0, 3, pfn(0, 1, 2), pfn(3, 4, 5), 3 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
65
{ 0, 3, pfn(0, 1, 2), pfn(4, 5, 6), 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
66
{ 0, 3, pfn(0, 2, 1), NULL, 3 * PAGE_SIZE, sgmax, 3 },
tools/testing/scatterlist/main.c
67
{ 0, 3, pfn(0, 1, 3), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
68
{ 0, 3, pfn(1, 2, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
69
{ 0, 3, pfn(1, 3, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
70
{ 0, 4, pfn(0, 1, 3, 4), NULL, 4 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
71
{ 0, 5, pfn(0, 1, 3, 4, 5), NULL, 5 * PAGE_SIZE, sgmax, 2 },
tools/testing/scatterlist/main.c
72
{ 0, 5, pfn(0, 1, 3, 4, 6), NULL, 5 * PAGE_SIZE, sgmax, 3 },
tools/testing/scatterlist/main.c
73
{ 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, sgmax, 1 },
tools/testing/scatterlist/main.c
74
{ 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, 2 * PAGE_SIZE,
tools/testing/scatterlist/main.c
76
{ 0, 6, pfn(0, 1, 2, 3, 4, 5), NULL, 6 * PAGE_SIZE,
tools/testing/scatterlist/main.c
78
{ 0, 6, pfn(0, 2, 3, 4, 5, 6), NULL, 6 * PAGE_SIZE,
tools/testing/scatterlist/main.c
80
{ 0, 6, pfn(0, 1, 3, 4, 5, 6), pfn(7, 8, 9, 10, 11, 12),
tools/testing/scatterlist/main.c
92
set_pages(pages, test->pfn, test->num_pages);
tools/testing/selftests/kvm/access_tracking_perf_test.c
130
uint64_t pfn;
tools/testing/selftests/kvm/access_tracking_perf_test.c
136
pfn = entry & PAGEMAP_PFN_MASK;
tools/testing/selftests/kvm/access_tracking_perf_test.c
137
__TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
tools/testing/selftests/kvm/access_tracking_perf_test.c
139
return pfn;
tools/testing/selftests/kvm/access_tracking_perf_test.c
142
static bool is_page_idle(int page_idle_fd, uint64_t pfn)
tools/testing/selftests/kvm/access_tracking_perf_test.c
144
uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
tools/testing/selftests/kvm/access_tracking_perf_test.c
146
return !!((bits >> (pfn % 64)) & 1);
tools/testing/selftests/kvm/access_tracking_perf_test.c
149
static void mark_page_idle(int page_idle_fd, uint64_t pfn)
tools/testing/selftests/kvm/access_tracking_perf_test.c
151
uint64_t bits = 1ULL << (pfn % 64);
tools/testing/selftests/kvm/access_tracking_perf_test.c
153
TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
tools/testing/selftests/kvm/access_tracking_perf_test.c
154
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
tools/testing/selftests/kvm/access_tracking_perf_test.c
197
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
tools/testing/selftests/kvm/access_tracking_perf_test.c
199
if (!pfn) {
tools/testing/selftests/kvm/access_tracking_perf_test.c
204
if (is_page_idle(page_idle_fd, pfn)) {
tools/testing/selftests/kvm/access_tracking_perf_test.c
209
mark_page_idle(page_idle_fd, pfn);
tools/testing/selftests/mm/hugepage-vmemmap.c
112
pfn = virt_to_pfn(addr);
tools/testing/selftests/mm/hugepage-vmemmap.c
113
if (pfn == -1UL) {
tools/testing/selftests/mm/hugepage-vmemmap.c
119
printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
tools/testing/selftests/mm/hugepage-vmemmap.c
121
if (check_page_flags(pfn) < 0) {
tools/testing/selftests/mm/hugepage-vmemmap.c
52
static int check_page_flags(unsigned long pfn)
tools/testing/selftests/mm/hugepage-vmemmap.c
61
lseek(fd, pfn * sizeof(pageflags), SEEK_SET);
tools/testing/selftests/mm/hugepage-vmemmap.c
93
unsigned long pfn;
tools/testing/selftests/mm/memory-failure.c
135
self->pfn = pagemap_get_pfn(self->pagemap_fd, vaddr);
tools/testing/selftests/mm/memory-failure.c
136
ASSERT_NE(self->pfn, -1UL);
tools/testing/selftests/mm/memory-failure.c
174
ASSERT_NE(pagemap_get_pfn(self->pagemap_fd, vaddr), self->pfn);
tools/testing/selftests/mm/memory-failure.c
199
ASSERT_EQ(pageflags_get(self->pfn, self->kpageflags_fd, &pfn_flags), 0);
tools/testing/selftests/mm/memory-failure.c
209
ASSERT_EQ(unpoison_memory(self->pfn), 0);
tools/testing/selftests/mm/memory-failure.c
212
ASSERT_EQ(pageflags_get(self->pfn, self->kpageflags_fd, &pfn_flags), 0);
tools/testing/selftests/mm/memory-failure.c
46
unsigned long pfn;
tools/testing/selftests/mm/rmap.c
294
unsigned long pfn;
tools/testing/selftests/mm/rmap.c
304
pfn = pagemap_get_pfn(pagemap_fd, data->region);
tools/testing/selftests/mm/rmap.c
305
if (pfn != *data->expected_pfn)
tools/testing/selftests/mm/split_huge_page_test.c
109
unsigned long pfn;
tools/testing/selftests/mm/split_huge_page_test.c
111
pfn = pagemap_get_pfn(pagemap_fd, vaddr);
tools/testing/selftests/mm/split_huge_page_test.c
114
if (pfn == -1UL)
tools/testing/selftests/mm/split_huge_page_test.c
117
if (pageflags_get(pfn, kpageflags_fd, flags))
tools/testing/selftests/mm/split_huge_page_test.c
52
unsigned long pfn;
tools/testing/selftests/mm/split_huge_page_test.c
55
pfn = pagemap_get_pfn(pagemap_fd, vaddr);
tools/testing/selftests/mm/split_huge_page_test.c
58
if (pfn == -1UL)
tools/testing/selftests/mm/split_huge_page_test.c
61
if (pageflags_get(pfn, kpageflags_fd, &pfn_flags))
tools/testing/selftests/mm/split_huge_page_test.c
75
pfn_head = pfn & ~(nr_pages - 1);
tools/testing/selftests/mm/transhuge-stress.c
101
pfn = allocate_transhuge(p, pagemap_fd);
tools/testing/selftests/mm/transhuge-stress.c
103
if (pfn < 0) {
tools/testing/selftests/mm/transhuge-stress.c
106
size_t idx = pfn >> (HPAGE_SHIFT - pshift());
tools/testing/selftests/mm/transhuge-stress.c
99
int64_t pfn;
tools/testing/selftests/mm/vm_util.c
25
const unsigned long pfn = (unsigned long)start / getpagesize();
tools/testing/selftests/mm/vm_util.c
29
ret = pread(fd, &entry, sizeof(entry), pfn * sizeof(entry));
tools/testing/selftests/mm/vm_util.c
341
int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags)
tools/testing/selftests/mm/vm_util.c
346
pfn * sizeof(*flags));
tools/testing/selftests/mm/vm_util.c
751
int unpoison_memory(unsigned long pfn)
tools/testing/selftests/mm/vm_util.c
761
len = sprintf(buf, "0x%lx\n", pfn);
tools/testing/selftests/mm/vm_util.h
159
int unpoison_memory(unsigned long pfn);
tools/testing/selftests/mm/vm_util.h
99
int pageflags_get(unsigned long pfn, int kpageflags_fd, uint64_t *flags);
tools/testing/vma/include/stubs.h
110
static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
tools/testing/vma/include/stubs.h
115
unsigned long pfn, unsigned long size, pgprot_t pgprot)
tools/testing/vma/linux/mmzone.h
26
#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
tools/testing/vma/linux/mmzone.h
27
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
virt/kvm/guest_memfd.c
530
kvm_pfn_t pfn = page_to_pfn(page);
virt/kvm/guest_memfd.c
533
kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
virt/kvm/guest_memfd.c
67
kvm_pfn_t pfn = folio_file_pfn(folio, index);
virt/kvm/guest_memfd.c
69
int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
virt/kvm/guest_memfd.c
72
index, gfn, pfn, rc);
virt/kvm/guest_memfd.c
755
pgoff_t index, kvm_pfn_t *pfn,
virt/kvm/guest_memfd.c
782
*pfn = folio_file_pfn(folio, index);
virt/kvm/guest_memfd.c
790
gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
virt/kvm/guest_memfd.c
801
folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order);
virt/kvm/guest_memfd.c
831
kvm_pfn_t pfn;
virt/kvm/guest_memfd.c
836
folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL);
virt/kvm/guest_memfd.c
851
ret = post_populate(kvm, gfn, pfn, src_page, opaque);
virt/kvm/kvm_main.c
2827
kvm_pfn_t pfn;
virt/kvm/kvm_main.c
2835
pfn = map->pfn;
virt/kvm/kvm_main.c
2837
pfn = page_to_pfn(page);
virt/kvm/kvm_main.c
2841
return pfn;
virt/kvm/kvm_main.c
2848
static bool hva_to_pfn_fast(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
virt/kvm/kvm_main.c
2869
*pfn = kvm_resolve_pfn(kfp, page, NULL, true);
virt/kvm/kvm_main.c
2880
static int hva_to_pfn_slow(struct kvm_follow_pfn *kfp, kvm_pfn_t *pfn)
virt/kvm/kvm_main.c
2921
*pfn = kvm_resolve_pfn(kfp, page, NULL, flags & FOLL_WRITE);
virt/kvm/kvm_main.c
2985
kvm_pfn_t pfn;
virt/kvm/kvm_main.c
2993
if (hva_to_pfn_fast(kfp, &pfn))
virt/kvm/kvm_main.c
2994
return pfn;
virt/kvm/kvm_main.c
2996
npages = hva_to_pfn_slow(kfp, &pfn);
virt/kvm/kvm_main.c
2998
return pfn;
virt/kvm/kvm_main.c
3009
pfn = KVM_PFN_ERR_FAULT;
virt/kvm/kvm_main.c
3011
r = hva_to_pfn_remapped(vma, kfp, &pfn);
virt/kvm/kvm_main.c
3015
pfn = KVM_PFN_ERR_FAULT;
virt/kvm/kvm_main.c
3019
pfn = KVM_PFN_ERR_NEEDS_IO;
virt/kvm/kvm_main.c
3021
pfn = KVM_PFN_ERR_FAULT;
virt/kvm/kvm_main.c
3024
return pfn;
virt/kvm/kvm_main.c
3124
map->pfn = kvm_follow_pfn(&kfp);
virt/kvm/kvm_main.c
3125
if (is_error_noslot_pfn(map->pfn))
virt/kvm/kvm_main.c
3128
if (pfn_valid(map->pfn)) {
virt/kvm/kvm_main.c
3129
map->page = pfn_to_page(map->pfn);
virt/kvm/kvm_main.c
3133
map->hva = memremap(pfn_to_hpa(map->pfn), PAGE_SIZE, MEMREMAP_WB);
virt/kvm/pfncache.c
101
if (pfn_valid(pfn))
virt/kvm/pfncache.c
102
return kmap(pfn_to_page(pfn));
virt/kvm/pfncache.c
105
return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
virt/kvm/pfncache.c
111
static void gpc_unmap(kvm_pfn_t pfn, void *khva)
virt/kvm/pfncache.c
114
if (is_error_noslot_pfn(pfn) || !khva)
virt/kvm/pfncache.c
117
if (pfn_valid(pfn)) {
virt/kvm/pfncache.c
118
kunmap(pfn_to_page(pfn));
virt/kvm/pfncache.c
218
if (new_pfn == gpc->pfn)
virt/kvm/pfncache.c
238
gpc->pfn = new_pfn;
virt/kvm/pfncache.c
279
old_pfn = gpc->pfn;
virt/kvm/pfncache.c
35
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
virt/kvm/pfncache.c
350
gpc->pfn = KVM_PFN_ERR_FAULT;
virt/kvm/pfncache.c
355
unmap_old = (old_pfn != gpc->pfn);
virt/kvm/pfncache.c
391
gpc->pfn = KVM_PFN_ERR_FAULT;
virt/kvm/pfncache.c
474
old_pfn = gpc->pfn;
virt/kvm/pfncache.c
475
gpc->pfn = KVM_PFN_ERR_FAULT;
virt/kvm/pfncache.c
48
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
virt/kvm/pfncache.c
99
static void *gpc_map(kvm_pfn_t pfn)