arch/alpha/mm/init.c
121
p4d_t *p4d;
arch/alpha/mm/init.c
161
p4d = p4d_offset(pgd, VMALLOC_START);
arch/alpha/mm/init.c
162
pud = pud_offset(p4d, VMALLOC_START);
arch/arc/include/asm/pgtable-levels.h
111
#define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK))
arch/arc/include/asm/pgtable-levels.h
112
#define p4d_page(p4d) virt_to_page(p4d_pgtable(p4d))
arch/arc/include/asm/pgtable-levels.h
113
#define set_p4d(p4dp, p4d) (*(p4dp) = p4d)
arch/arc/mm/fault.c
33
p4d_t *p4d, *p4d_k;
arch/arc/mm/fault.c
45
p4d = p4d_offset(pgd, address);
arch/arc/mm/fault.c
49
if (!p4d_present(*p4d))
arch/arc/mm/fault.c
50
set_p4d(p4d, *p4d_k);
arch/arc/mm/fault.c
52
pud = pud_offset(p4d, address);
arch/arm/lib/uaccess_with_memcpy.c
27
p4d_t *p4d;
arch/arm/lib/uaccess_with_memcpy.c
37
p4d = p4d_offset(pgd, addr);
arch/arm/lib/uaccess_with_memcpy.c
38
if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
arch/arm/lib/uaccess_with_memcpy.c
41
pud = pud_offset(p4d, addr);
arch/arm/mm/dump.c
366
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
arch/arm/mm/dump.c
368
pud_t *pud = pud_offset(p4d, 0);
arch/arm/mm/dump.c
384
p4d_t *p4d = p4d_offset(pgd, 0);
arch/arm/mm/dump.c
388
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
arch/arm/mm/dump.c
390
if (!p4d_none(*p4d)) {
arch/arm/mm/dump.c
391
walk_pud(st, p4d, addr);
arch/arm/mm/dump.c
393
note_page(st, addr, 2, p4d_val(*p4d), NULL);
arch/arm/mm/fault-armv.c
69
p4d_t *p4d;
arch/arm/mm/fault-armv.c
80
p4d = p4d_offset(pgd, address);
arch/arm/mm/fault-armv.c
81
if (p4d_none_or_clear_bad(p4d))
arch/arm/mm/fault-armv.c
84
pud = pud_offset(p4d, address);
arch/arm/mm/fault.c
509
p4d_t *p4d, *p4d_k;
arch/arm/mm/fault.c
52
p4d_t *p4d;
arch/arm/mm/fault.c
524
p4d = p4d_offset(pgd, addr);
arch/arm/mm/fault.c
529
if (!p4d_present(*p4d))
arch/arm/mm/fault.c
530
set_p4d(p4d, *p4d_k);
arch/arm/mm/fault.c
532
pud = pud_offset(p4d, addr);
arch/arm/mm/fault.c
57
p4d = p4d_offset(pgd, addr);
arch/arm/mm/fault.c
58
if (p4d_none(*p4d))
arch/arm/mm/fault.c
61
if (p4d_bad(*p4d)) {
arch/arm/mm/fault.c
66
pud = pud_offset(p4d, addr);
arch/arm/mm/idmap.c
71
p4d_t *p4d = p4d_offset(pgd, addr);
arch/arm/mm/idmap.c
72
pud_t *pud = pud_offset(p4d, addr);
arch/arm/mm/mmu.c
1002
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
arch/arm/mm/mmu.c
1003
if (WARN_ON(!p4d))
arch/arm/mm/mmu.c
1005
pud = pud_alloc(mm, p4d, md->virtual);
arch/arm/mm/mmu.c
834
static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
arch/arm/mm/mmu.c
839
pud_t *pud = pud_offset(p4d, addr);
arch/arm/mm/mmu.c
854
p4d_t *p4d = p4d_offset(pgd, addr);
arch/arm/mm/mmu.c
859
alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
arch/arm/mm/mmu.c
861
} while (p4d++, addr = next, addr != end);
arch/arm/mm/mmu.c
911
p4d_t *p4d = p4d_offset(pgd, addr);
arch/arm/mm/mmu.c
912
pud_t *pud = pud_offset(p4d, addr);
arch/arm/mm/mmu.c
999
p4d_t *p4d;
arch/arm/mm/pgd.c
143
p4d_t *p4d;
arch/arm/mm/pgd.c
155
p4d = p4d_offset(pgd, 0);
arch/arm/mm/pgd.c
156
if (p4d_none_or_clear_bad(p4d))
arch/arm/mm/pgd.c
159
pud = pud_offset(p4d, 0);
arch/arm/mm/pgd.c
176
p4d_clear(p4d);
arch/arm/mm/pgd.c
180
p4d_free(mm, p4d);
arch/arm/mm/pgd.c
191
p4d = p4d_offset(pgd, 0);
arch/arm/mm/pgd.c
192
if (p4d_none_or_clear_bad(p4d))
arch/arm/mm/pgd.c
194
pud = pud_offset(p4d, 0);
arch/arm/mm/pgd.c
201
p4d_clear(p4d);
arch/arm/mm/pgd.c
205
p4d_free(mm, p4d);
arch/arm64/include/asm/pgtable-types.h
48
typedef struct { p4dval_t p4d; } p4d_t;
arch/arm64/include/asm/pgtable-types.h
49
#define p4d_val(x) ((x).p4d)
arch/arm64/include/asm/pgtable.h
1001
return (pud_t *)__va(p4d_page_paddr(p4d));
arch/arm64/include/asm/pgtable.h
1012
pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
arch/arm64/include/asm/pgtable.h
1016
return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
arch/arm64/include/asm/pgtable.h
1054
#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
arch/arm64/include/asm/pgtable.h
1060
#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
arch/arm64/include/asm/pgtable.h
488
static inline pte_t p4d_pte(p4d_t p4d)
arch/arm64/include/asm/pgtable.h
490
return __pte(p4d_val(p4d));
arch/arm64/include/asm/pgtable.h
732
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
arch/arm64/include/asm/pgtable.h
961
#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
arch/arm64/include/asm/pgtable.h
962
#define p4d_bad(p4d) (pgtable_l4_enabled() && \
arch/arm64/include/asm/pgtable.h
963
((p4d_val(p4d) & P4D_TYPE_MASK) != \
arch/arm64/include/asm/pgtable.h
965
#define p4d_present(p4d) (!p4d_none(p4d))
arch/arm64/include/asm/pgtable.h
967
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
arch/arm64/include/asm/pgtable.h
970
set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
arch/arm64/include/asm/pgtable.h
974
WRITE_ONCE(*p4dp, p4d);
arch/arm64/include/asm/pgtable.h
984
static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
arch/arm64/include/asm/pgtable.h
986
return __p4d_to_phys(p4d);
arch/arm64/include/asm/pgtable.h
999
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/arm64/include/asm/ptdump.h
68
void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
arch/arm64/include/asm/ptdump.h
84
static inline void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d) { }
arch/arm64/mm/fault.c
159
p4d_t *p4dp, p4d;
arch/arm64/mm/fault.c
168
p4d = READ_ONCE(*p4dp);
arch/arm64/mm/fault.c
169
pr_cont(", p4d=%016llx", p4d_val(p4d));
arch/arm64/mm/fault.c
170
if (p4d_none(p4d) || p4d_bad(p4d))
arch/arm64/mm/fixmap.c
77
p4d_t p4d = READ_ONCE(*p4dp);
arch/arm64/mm/fixmap.c
80
if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
arch/arm64/mm/fixmap.c
81
p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
arch/arm64/mm/fixmap.c
90
if (p4d_none(p4d))
arch/arm64/mm/mmu.c
1539
p4d_t *p4dp, p4d;
arch/arm64/mm/mmu.c
1544
p4d = READ_ONCE(*p4dp);
arch/arm64/mm/mmu.c
1545
if (p4d_none(p4d))
arch/arm64/mm/mmu.c
1548
WARN_ON(!p4d_present(p4d));
arch/arm64/mm/mmu.c
1700
p4d_t *p4dp, p4d;
arch/arm64/mm/mmu.c
1706
p4d = READ_ONCE(*p4dp);
arch/arm64/mm/mmu.c
1707
if (p4d_none(p4d))
arch/arm64/mm/mmu.c
1710
WARN_ON(!p4d_present(p4d));
arch/arm64/mm/mmu.c
357
p4d_t p4d = READ_ONCE(*p4dp);
arch/arm64/mm/mmu.c
360
if (p4d_none(p4d)) {
arch/arm64/mm/mmu.c
375
BUG_ON(p4d_bad(p4d));
arch/arm64/mm/mmu.c
670
p4d_t *p4dp, p4d;
arch/arm64/mm/mmu.c
694
p4d = p4dp_get(p4dp);
arch/arm64/mm/mmu.c
695
if (!p4d_present(p4d))
arch/arm64/mm/ptdump.c
269
void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
arch/arm64/mm/ptdump.c
271
note_page(pt_st, addr, 1, p4d_val(p4d));
arch/loongarch/include/asm/pgalloc.h
37
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/loongarch/include/asm/pgalloc.h
39
set_p4d(p4d, __p4d((unsigned long)pud));
arch/loongarch/include/asm/pgtable.h
153
static inline int p4d_none(p4d_t p4d)
arch/loongarch/include/asm/pgtable.h
155
return p4d_val(p4d) == (unsigned long)invalid_pud_table;
arch/loongarch/include/asm/pgtable.h
158
static inline int p4d_bad(p4d_t p4d)
arch/loongarch/include/asm/pgtable.h
160
return p4d_val(p4d) & ~PAGE_MASK;
arch/loongarch/include/asm/pgtable.h
163
static inline int p4d_present(p4d_t p4d)
arch/loongarch/include/asm/pgtable.h
165
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
arch/loongarch/include/asm/pgtable.h
168
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/loongarch/include/asm/pgtable.h
170
return (pud_t *)p4d_val(p4d);
arch/loongarch/include/asm/pgtable.h
173
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
arch/loongarch/include/asm/pgtable.h
175
WRITE_ONCE(*p4d, p4dval);
arch/loongarch/include/asm/pgtable.h
183
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
arch/loongarch/include/asm/pgtable.h
184
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
arch/loongarch/kvm/mmu.c
673
p4d_t p4d;
arch/loongarch/kvm/mmu.c
705
p4d = p4dp_get(p4d_offset(&pgd, hva));
arch/loongarch/kvm/mmu.c
706
if (p4d_none(p4d) || !p4d_present(p4d))
arch/loongarch/kvm/mmu.c
709
pud = pudp_get(pud_offset(&p4d, hva));
arch/loongarch/mm/fault.c
37
p4d_t *p4d;
arch/loongarch/mm/fault.c
49
p4d = p4d_offset(pgd, address);
arch/loongarch/mm/fault.c
50
if (!p4d_present(p4dp_get(p4d)))
arch/loongarch/mm/fault.c
53
pud = pud_offset(p4d, address);
arch/loongarch/mm/hugetlbpage.c
20
p4d_t *p4d;
arch/loongarch/mm/hugetlbpage.c
25
p4d = p4d_alloc(mm, pgd, addr);
arch/loongarch/mm/hugetlbpage.c
26
pud = pud_alloc(mm, p4d, addr);
arch/loongarch/mm/hugetlbpage.c
37
p4d_t *p4d;
arch/loongarch/mm/hugetlbpage.c
43
p4d = p4d_offset(pgd, addr);
arch/loongarch/mm/hugetlbpage.c
44
if (p4d_present(p4dp_get(p4d))) {
arch/loongarch/mm/hugetlbpage.c
45
pud = pud_offset(p4d, addr);
arch/loongarch/mm/init.c
147
p4d_t *p4d = p4d_offset(pgd, addr);
arch/loongarch/mm/init.c
151
if (p4d_none(p4dp_get(p4d))) {
arch/loongarch/mm/init.c
153
p4d_populate(&init_mm, p4d, pud);
arch/loongarch/mm/init.c
159
pud = pud_offset(p4d, addr);
arch/loongarch/mm/kasan_init.c
24
#define __p4d_none(early, p4d) (0)
arch/loongarch/mm/kasan_init.c
26
#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
arch/loongarch/mm/kasan_init.c
27
(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
arch/loongarch/mm/pageattr.c
164
p4d_t *p4d;
arch/loongarch/mm/pageattr.c
179
p4d = p4d_offset(pgd, addr);
arch/loongarch/mm/pageattr.c
180
if (p4d_none(p4dp_get(p4d)))
arch/loongarch/mm/pageattr.c
182
if (p4d_leaf(p4dp_get(p4d)))
arch/loongarch/mm/pageattr.c
185
pud = pud_offset(p4d, addr);
arch/loongarch/mm/pageattr.c
41
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
arch/loongarch/mm/pageattr.c
44
p4d_t val = p4dp_get(p4d);
arch/loongarch/mm/pageattr.c
48
set_p4d(p4d, val);
arch/m68k/include/asm/mmu_context.h
128
p4d = p4d_offset(pgd, mmuar);
arch/m68k/include/asm/mmu_context.h
129
if (p4d_none(*p4d))
arch/m68k/include/asm/mmu_context.h
132
pud = pud_offset(p4d, mmuar);
arch/m68k/include/asm/mmu_context.h
99
p4d_t *p4d;
arch/m68k/kernel/sys_m68k.c
470
p4d_t *p4d;
arch/m68k/kernel/sys_m68k.c
481
p4d = p4d_offset(pgd, (unsigned long)mem);
arch/m68k/kernel/sys_m68k.c
482
if (!p4d_present(*p4d))
arch/m68k/kernel/sys_m68k.c
484
pud = pud_offset(p4d, (unsigned long)mem);
arch/m68k/mm/mcfmmu.c
102
p4d = p4d_offset(pgd, mmuar);
arch/m68k/mm/mcfmmu.c
103
if (p4d_none(*p4d))
arch/m68k/mm/mcfmmu.c
106
pud = pud_offset(p4d, mmuar);
arch/m68k/mm/mcfmmu.c
82
p4d_t *p4d;
arch/m68k/sun3x/dvma.c
82
p4d_t *p4d;
arch/m68k/sun3x/dvma.c
94
p4d = p4d_offset(pgd, vaddr);
arch/m68k/sun3x/dvma.c
95
pud = pud_offset(p4d, vaddr);
arch/microblaze/mm/pgtable.c
139
p4d_t *p4d;
arch/microblaze/mm/pgtable.c
146
p4d = p4d_offset(pgd_offset_k(va), va);
arch/microblaze/mm/pgtable.c
147
pud = pud_offset(p4d, va);
arch/microblaze/mm/pgtable.c
198
p4d_t *p4d;
arch/microblaze/mm/pgtable.c
206
p4d = p4d_offset(pgd, addr & PAGE_MASK);
arch/microblaze/mm/pgtable.c
207
pud = pud_offset(p4d, addr & PAGE_MASK);
arch/mips/include/asm/pgalloc.h
94
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/mips/include/asm/pgalloc.h
96
set_p4d(p4d, __p4d((unsigned long)pud));
arch/mips/include/asm/pgtable-64.h
184
static inline int p4d_none(p4d_t p4d)
arch/mips/include/asm/pgtable-64.h
186
return p4d_val(p4d) == (unsigned long)invalid_pud_table;
arch/mips/include/asm/pgtable-64.h
189
static inline int p4d_bad(p4d_t p4d)
arch/mips/include/asm/pgtable-64.h
191
if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
arch/mips/include/asm/pgtable-64.h
197
static inline int p4d_present(p4d_t p4d)
arch/mips/include/asm/pgtable-64.h
199
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
arch/mips/include/asm/pgtable-64.h
207
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/mips/include/asm/pgtable-64.h
209
return (pud_t *)p4d_val(p4d);
arch/mips/include/asm/pgtable-64.h
212
#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
arch/mips/include/asm/pgtable-64.h
213
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
arch/mips/include/asm/pgtable-64.h
217
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
arch/mips/include/asm/pgtable-64.h
219
*p4d = p4dval;
arch/mips/kvm/mmu.c
107
p4d_t *p4d;
arch/mips/kvm/mmu.c
117
p4d = p4d_offset(pgd, addr);
arch/mips/kvm/mmu.c
118
pud = pud_offset(p4d, addr);
arch/mips/kvm/mmu.c
230
p4d_t *p4d;
arch/mips/kvm/mmu.c
242
p4d = p4d_offset(pgd, 0);
arch/mips/kvm/mmu.c
243
pud = pud_offset(p4d + i, 0);
arch/mips/kvm/mmu.c
352
p4d_t *p4d; \
arch/mips/kvm/mmu.c
363
p4d = p4d_offset(pgd, 0); \
arch/mips/kvm/mmu.c
364
pud = pud_offset(p4d + i, 0); \
arch/mips/mm/fault.c
287
p4d_t *p4d, *p4d_k;
arch/mips/mm/fault.c
299
p4d = p4d_offset(pgd, address);
arch/mips/mm/fault.c
304
pud = pud_offset(p4d, address);
arch/mips/mm/hugetlbpage.c
28
p4d_t *p4d;
arch/mips/mm/hugetlbpage.c
33
p4d = p4d_alloc(mm, pgd, addr);
arch/mips/mm/hugetlbpage.c
34
pud = pud_alloc(mm, p4d, addr);
arch/mips/mm/hugetlbpage.c
45
p4d_t *p4d;
arch/mips/mm/hugetlbpage.c
51
p4d = p4d_offset(pgd, addr);
arch/mips/mm/hugetlbpage.c
52
if (p4d_present(*p4d)) {
arch/mips/mm/hugetlbpage.c
53
pud = pud_offset(p4d, addr);
arch/mips/mm/pgtable-32.c
47
p4d_t *p4d;
arch/mips/mm/pgtable-32.c
73
p4d = p4d_offset(pgd, vaddr);
arch/mips/mm/pgtable-32.c
74
pud = pud_offset(p4d, vaddr);
arch/nios2/mm/fault.c
231
p4d_t *p4d, *p4d_k;
arch/nios2/mm/fault.c
243
p4d = p4d_offset(pgd, address);
arch/nios2/mm/fault.c
247
pud = pud_offset(p4d, address);
arch/nios2/mm/ioremap.c
89
p4d_t *p4d;
arch/nios2/mm/ioremap.c
94
p4d = p4d_alloc(&init_mm, dir, address);
arch/nios2/mm/ioremap.c
95
if (!p4d)
arch/nios2/mm/ioremap.c
97
pud = pud_alloc(&init_mm, p4d, address);
arch/openrisc/mm/fault.c
296
p4d_t *p4d, *p4d_k;
arch/openrisc/mm/fault.c
323
p4d = p4d_offset(pgd, address);
arch/openrisc/mm/fault.c
328
pud = pud_offset(p4d, address);
arch/openrisc/mm/init.c
201
p4d_t *p4d;
arch/openrisc/mm/init.c
206
p4d = p4d_offset(pgd_offset_k(va), va);
arch/openrisc/mm/init.c
207
pud = pud_offset(p4d, va);
arch/parisc/kernel/cache.c
414
p4d_t *p4d;
arch/parisc/kernel/cache.c
419
p4d = p4d_offset(pgd, addr);
arch/parisc/kernel/cache.c
420
if (!p4d_none(*p4d)) {
arch/parisc/kernel/cache.c
421
pud = pud_offset(p4d, addr);
arch/parisc/kernel/pci-dma.c
135
p4d_t *p4d;
arch/parisc/kernel/pci-dma.c
139
p4d = p4d_offset(dir, vaddr);
arch/parisc/kernel/pci-dma.c
140
pud = pud_offset(p4d, vaddr);
arch/parisc/mm/fixmap.c
17
p4d_t *p4d = p4d_offset(pgd, vaddr);
arch/parisc/mm/fixmap.c
18
pud_t *pud = pud_offset(p4d, vaddr);
arch/parisc/mm/hugetlbpage.c
30
p4d_t *p4d;
arch/parisc/mm/hugetlbpage.c
43
p4d = p4d_offset(pgd, addr);
arch/parisc/mm/hugetlbpage.c
44
pud = pud_alloc(mm, p4d, addr);
arch/parisc/mm/hugetlbpage.c
57
p4d_t *p4d;
arch/parisc/mm/hugetlbpage.c
66
p4d = p4d_offset(pgd, addr);
arch/parisc/mm/hugetlbpage.c
67
if (!p4d_none(*p4d)) {
arch/parisc/mm/hugetlbpage.c
68
pud = pud_offset(p4d, addr);
arch/parisc/mm/init.c
375
p4d_t *p4d = p4d_offset(pgd, vaddr);
arch/parisc/mm/init.c
376
pud_t *pud = pud_offset(p4d, vaddr);
arch/parisc/mm/init.c
672
p4d_t *p4d = p4d_offset(pgd, addr);
arch/parisc/mm/init.c
673
pud_t *pud = pud_offset(p4d, addr);
arch/powerpc/include/asm/book3s/64/hash.h
152
static inline int hash__p4d_bad(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/hash.h
154
return (p4d_val(p4d) == 0);
arch/powerpc/include/asm/book3s/64/pgtable.h
939
#define p4d_write(p4d) pte_write(p4d_pte(p4d))
arch/powerpc/include/asm/book3s/64/pgtable.h
946
static inline int p4d_none(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/pgtable.h
948
return !p4d_raw(p4d);
arch/powerpc/include/asm/book3s/64/pgtable.h
951
static inline int p4d_present(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/pgtable.h
953
return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
arch/powerpc/include/asm/book3s/64/pgtable.h
956
static inline pte_t p4d_pte(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/pgtable.h
958
return __pte_raw(p4d_raw(p4d));
arch/powerpc/include/asm/book3s/64/pgtable.h
966
static inline int p4d_bad(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/pgtable.h
969
return radix__p4d_bad(p4d);
arch/powerpc/include/asm/book3s/64/pgtable.h
970
return hash__p4d_bad(p4d);
arch/powerpc/include/asm/book3s/64/pgtable.h
974
static inline bool p4d_access_permitted(p4d_t p4d, bool write)
arch/powerpc/include/asm/book3s/64/pgtable.h
976
return pte_access_permitted(p4d_pte(p4d), write);
arch/powerpc/include/asm/book3s/64/pgtable.h
979
extern struct page *p4d_page(p4d_t p4d);
arch/powerpc/include/asm/book3s/64/pgtable.h
984
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/pgtable.h
986
return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
arch/powerpc/include/asm/book3s/64/radix.h
258
static inline int radix__p4d_bad(p4d_t p4d)
arch/powerpc/include/asm/book3s/64/radix.h
260
return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
arch/powerpc/include/asm/nohash/64/pgalloc.h
18
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/powerpc/include/asm/nohash/64/pgalloc.h
20
p4d_set(p4d, (unsigned long)pud);
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
56
#define p4d_none(p4d) (!p4d_val(p4d))
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
57
#define p4d_bad(p4d) (p4d_val(p4d) == 0)
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
58
#define p4d_present(p4d) (p4d_val(p4d) != 0)
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
62
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
64
return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS);
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
72
static inline pte_t p4d_pte(p4d_t p4d)
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
74
return __pte(p4d_val(p4d));
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
81
extern struct page *p4d_page(p4d_t p4d);
arch/powerpc/include/asm/nohash/64/pgtable.h
141
#define p4d_write(pgd) pte_write(p4d_pte(p4d))
arch/powerpc/kvm/book3s_64_mmu_radix.c
1291
p4d_t p4d, *p4dp;
arch/powerpc/kvm/book3s_64_mmu_radix.c
1365
p4d = READ_ONCE(*p4dp);
arch/powerpc/kvm/book3s_64_mmu_radix.c
1366
if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
arch/powerpc/kvm/book3s_64_mmu_radix.c
1371
pudp = pud_offset(&p4d, gpa);
arch/powerpc/kvm/book3s_64_mmu_radix.c
553
p4d_t *p4d = p4d_offset(pgd, 0);
arch/powerpc/kvm/book3s_64_mmu_radix.c
556
if (!p4d_present(*p4d))
arch/powerpc/kvm/book3s_64_mmu_radix.c
558
pud = pud_offset(p4d, 0);
arch/powerpc/kvm/book3s_64_mmu_radix.c
560
p4d_clear(p4d);
arch/powerpc/kvm/book3s_64_mmu_radix.c
621
p4d_t *p4d;
arch/powerpc/kvm/book3s_64_mmu_radix.c
629
p4d = p4d_offset(pgd, gpa);
arch/powerpc/kvm/book3s_64_mmu_radix.c
632
if (p4d_present(*p4d))
arch/powerpc/kvm/book3s_64_mmu_radix.c
633
pud = pud_offset(p4d, gpa);
arch/powerpc/kvm/book3s_64_mmu_radix.c
654
if (p4d_none(*p4d)) {
arch/powerpc/kvm/book3s_64_mmu_radix.c
657
p4d_populate(kvm->mm, p4d, new_pud);
arch/powerpc/kvm/book3s_64_mmu_radix.c
660
pud = pud_offset(p4d, gpa);
arch/powerpc/mm/book3s64/radix_pgtable.c
1120
p4d_t *p4d;
arch/powerpc/mm/book3s64/radix_pgtable.c
1149
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1150
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1225
p4d_t *p4d;
arch/powerpc/mm/book3s64/radix_pgtable.c
1231
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1232
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1257
p4d_t *p4d;
arch/powerpc/mm/book3s64/radix_pgtable.c
1266
p4d = p4d_offset(pgd, map_addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1267
pud = vmemmap_pud_alloc(p4d, node, map_addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1320
p4d_t *p4d;
arch/powerpc/mm/book3s64/radix_pgtable.c
1328
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
1329
pud = vmemmap_pud_alloc(p4d, node, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
723
static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
arch/powerpc/mm/book3s64/radix_pgtable.c
735
p4d_clear(p4d);
arch/powerpc/mm/book3s64/radix_pgtable.c
901
p4d_t *p4d;
arch/powerpc/mm/book3s64/radix_pgtable.c
909
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/book3s64/radix_pgtable.c
910
if (!p4d_present(*p4d))
arch/powerpc/mm/book3s64/radix_pgtable.c
913
if (p4d_leaf(*p4d)) {
arch/powerpc/mm/book3s64/radix_pgtable.c
924
pud_base = p4d_pgtable(*p4d);
arch/powerpc/mm/book3s64/radix_pgtable.c
926
free_pud_table(pud_base, p4d);
arch/powerpc/mm/book3s64/subpage_prot.c
57
p4d_t *p4d;
arch/powerpc/mm/book3s64/subpage_prot.c
64
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/book3s64/subpage_prot.c
65
if (p4d_none(*p4d))
arch/powerpc/mm/book3s64/subpage_prot.c
67
pud = pud_offset(p4d, addr);
arch/powerpc/mm/hugetlbpage.c
46
p4d_t *p4d;
arch/powerpc/mm/hugetlbpage.c
52
p4d = p4d_offset(pgd_offset(mm, addr), addr);
arch/powerpc/mm/hugetlbpage.c
54
return (pte_t *)p4d;
arch/powerpc/mm/hugetlbpage.c
56
pud = pud_alloc(mm, p4d, addr);
arch/powerpc/mm/kasan/init_book3e_64.c
17
static inline bool kasan_pud_table(p4d_t p4d)
arch/powerpc/mm/kasan/init_book3e_64.c
19
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
arch/powerpc/mm/pgtable.c
390
p4d_t *p4d;
arch/powerpc/mm/pgtable.c
400
p4d = p4d_offset(pgd, addr);
arch/powerpc/mm/pgtable.c
401
BUG_ON(p4d_none(*p4d));
arch/powerpc/mm/pgtable.c
402
pud = pud_offset(p4d, addr);
arch/powerpc/mm/pgtable.c
445
p4d_t p4d, *p4dp;
arch/powerpc/mm/pgtable.c
471
p4d = READ_ONCE(*p4dp);
arch/powerpc/mm/pgtable.c
474
if (p4d_none(p4d))
arch/powerpc/mm/pgtable.c
477
if (p4d_leaf(p4d)) {
arch/powerpc/mm/pgtable.c
488
pudp = pud_offset(&p4d, ea);
arch/powerpc/mm/pgtable_64.c
101
struct page *p4d_page(p4d_t p4d)
arch/powerpc/mm/pgtable_64.c
103
if (p4d_leaf(p4d)) {
arch/powerpc/mm/pgtable_64.c
105
VM_WARN_ON(!p4d_leaf(p4d));
arch/powerpc/mm/pgtable_64.c
106
return pte_page(p4d_pte(p4d));
arch/powerpc/mm/pgtable_64.c
108
return virt_to_page(p4d_pgtable(p4d));
arch/powerpc/mm/ptdump/hashpagetable.c
428
static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
arch/powerpc/mm/ptdump/hashpagetable.c
430
pud_t *pud = pud_offset(p4d, 0);
arch/powerpc/mm/ptdump/hashpagetable.c
444
p4d_t *p4d = p4d_offset(pgd, 0);
arch/powerpc/mm/ptdump/hashpagetable.c
448
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
arch/powerpc/mm/ptdump/hashpagetable.c
450
if (!p4d_none(*p4d))
arch/powerpc/mm/ptdump/hashpagetable.c
452
walk_pud(st, p4d, addr);
arch/powerpc/mm/ptdump/ptdump.c
317
static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
arch/powerpc/mm/ptdump/ptdump.c
319
note_page(pt_st, addr, 1, p4d_val(p4d));
arch/riscv/include/asm/pgalloc.h
100
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
arch/riscv/include/asm/pgalloc.h
42
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/riscv/include/asm/pgalloc.h
47
set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
arch/riscv/include/asm/pgalloc.h
51
static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
arch/riscv/include/asm/pgalloc.h
57
set_p4d_safe(p4d,
arch/riscv/include/asm/pgalloc.h
62
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
arch/riscv/include/asm/pgalloc.h
65
unsigned long pfn = virt_to_pfn(p4d);
arch/riscv/include/asm/pgalloc.h
72
p4d_t *p4d)
arch/riscv/include/asm/pgalloc.h
75
unsigned long pfn = virt_to_pfn(p4d);
arch/riscv/include/asm/pgalloc.h
96
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
arch/riscv/include/asm/pgtable-64.h
276
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
279
WRITE_ONCE(*p4dp, p4d);
arch/riscv/include/asm/pgtable-64.h
281
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
arch/riscv/include/asm/pgtable-64.h
284
static inline int p4d_none(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
287
return (p4d_val(p4d) == 0);
arch/riscv/include/asm/pgtable-64.h
292
static inline int p4d_present(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
295
return (p4d_val(p4d) & _PAGE_PRESENT);
arch/riscv/include/asm/pgtable-64.h
300
static inline int p4d_bad(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
303
return !p4d_present(p4d);
arch/riscv/include/asm/pgtable-64.h
308
static inline void p4d_clear(p4d_t *p4d)
arch/riscv/include/asm/pgtable-64.h
311
set_p4d(p4d, __p4d(0));
arch/riscv/include/asm/pgtable-64.h
319
static inline unsigned long _p4d_pfn(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
321
return __page_val_to_pfn(p4d_val(p4d));
arch/riscv/include/asm/pgtable-64.h
324
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
327
return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
arch/riscv/include/asm/pgtable-64.h
329
return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
arch/riscv/include/asm/pgtable-64.h
331
#define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
arch/riscv/include/asm/pgtable-64.h
333
static inline struct page *p4d_page(p4d_t p4d)
arch/riscv/include/asm/pgtable-64.h
335
return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
arch/riscv/include/asm/pgtable-64.h
341
pud_t *pud_offset(p4d_t *p4d, unsigned long address);
arch/riscv/include/asm/pgtable-64.h
46
unsigned long p4d;
arch/riscv/include/asm/pgtable-64.h
49
#define p4d_val(x) ((x).p4d)
arch/riscv/include/asm/pgtable.h
1302
#define set_p4d_safe(p4dp, p4d) \
arch/riscv/include/asm/pgtable.h
1304
WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
arch/riscv/include/asm/pgtable.h
1305
set_p4d(p4dp, p4d); \
arch/riscv/kernel/hibernate.c
293
p4d_t p4d = READ_ONCE(*src_p4dp);
arch/riscv/kernel/hibernate.c
297
if (p4d_none(p4d))
arch/riscv/kernel/hibernate.c
300
if (p4d_leaf(p4d)) {
arch/riscv/kernel/hibernate.c
301
set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot)));
arch/riscv/kvm/mmu.c
372
p4d_t p4d;
arch/riscv/kvm/mmu.c
394
p4d = p4dp_get(p4d_offset(&pgd, hva));
arch/riscv/kvm/mmu.c
395
if (p4d_none(p4d) || !p4d_present(p4d))
arch/riscv/kvm/mmu.c
398
pud = pudp_get(pud_offset(&p4d, hva));
arch/riscv/mm/fault.c
31
p4d_t *p4dp, p4d;
arch/riscv/mm/fault.c
51
p4d = p4dp_get(p4dp);
arch/riscv/mm/fault.c
52
pr_cont(", p4d=%016lx", p4d_val(p4d));
arch/riscv/mm/fault.c
53
if (p4d_none(p4d) || p4d_bad(p4d) || p4d_leaf(p4d))
arch/riscv/mm/hugetlbpage.c
100
if (!p4d_present(p4dp_get(p4d)))
arch/riscv/mm/hugetlbpage.c
103
pud = pud_offset(p4d, addr);
arch/riscv/mm/hugetlbpage.c
38
p4d_t *p4d;
arch/riscv/mm/hugetlbpage.c
43
p4d = p4d_alloc(mm, pgd, addr);
arch/riscv/mm/hugetlbpage.c
44
if (!p4d)
arch/riscv/mm/hugetlbpage.c
47
pud = pud_alloc(mm, p4d, addr);
arch/riscv/mm/hugetlbpage.c
91
p4d_t *p4d;
arch/riscv/mm/hugetlbpage.c
99
p4d = p4d_offset(pgd, addr);
arch/riscv/mm/init.c
1481
p4d_t *p4d;
arch/riscv/mm/init.c
1486
p4d = p4d_alloc(&init_mm, pgd, addr);
arch/riscv/mm/init.c
1487
if (!p4d)
arch/riscv/mm/init.c
1494
pud = pud_alloc(&init_mm, p4d, addr);
arch/riscv/mm/init.c
1610
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
arch/riscv/mm/init.c
1612
struct page *page = p4d_page(*p4d);
arch/riscv/mm/init.c
1626
p4d_clear(p4d);
arch/riscv/mm/init.c
1734
p4d_t *p4dp, p4d;
arch/riscv/mm/init.c
1740
p4d = p4dp_get(p4dp);
arch/riscv/mm/init.c
1741
if (!p4d_present(p4d))
arch/riscv/mm/init.c
1744
if (p4d_leaf(p4d)) {
arch/riscv/mm/init.c
1748
free_vmemmap_storage(p4d_page(p4d), P4D_SIZE, altmap);
arch/riscv/mm/kasan_init.c
377
static void __init kasan_shallow_populate_pud(p4d_t *p4d,
arch/riscv/mm/kasan_init.c
382
pud_t *pud_k = pud_offset(p4d, vaddr);
arch/riscv/mm/kasan_init.c
80
static void __init kasan_populate_pud(p4d_t *p4d,
arch/riscv/mm/kasan_init.c
87
if (p4d_none(p4dp_get(p4d))) {
arch/riscv/mm/kasan_init.c
89
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
arch/riscv/mm/kasan_init.c
92
pudp = pud_offset(p4d, vaddr);
arch/riscv/mm/pageattr.c
29
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
arch/riscv/mm/pageattr.c
32
p4d_t val = p4dp_get(p4d);
arch/riscv/mm/pageattr.c
36
set_p4d(p4d, val);
arch/riscv/mm/pageattr.c
440
p4d_t *p4d;
arch/riscv/mm/pageattr.c
450
p4d = p4d_offset(pgd, addr);
arch/riscv/mm/pageattr.c
451
if (!p4d_present(p4dp_get(p4d)))
arch/riscv/mm/pageattr.c
453
if (p4d_leaf(p4dp_get(p4d)))
arch/riscv/mm/pageattr.c
456
pud = pud_offset(p4d, addr);
arch/riscv/mm/pgtable.c
43
pud_t *pud_offset(p4d_t *p4d, unsigned long address)
arch/riscv/mm/pgtable.c
46
return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
arch/riscv/mm/pgtable.c
48
return (pud_t *)p4d;
arch/riscv/mm/pgtable.c
63
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
arch/riscv/mm/pgtable.c
68
void p4d_clear_huge(p4d_t *p4d)
arch/riscv/mm/ptdump.c
335
static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
arch/riscv/mm/ptdump.c
337
note_page(pt_st, addr, 1, p4d_val(p4d));
arch/s390/boot/vmem.c
148
static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
arch/s390/boot/vmem.c
153
p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
arch/s390/boot/vmem.c
201
static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
arch/s390/boot/vmem.c
386
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
arch/s390/boot/vmem.c
393
pud = pud_offset(p4d, addr);
arch/s390/boot/vmem.c
422
p4d_t *p4d;
arch/s390/boot/vmem.c
425
p4d = p4d_offset(pgd, addr);
arch/s390/boot/vmem.c
426
for (; addr < end; addr = next, p4d++) {
arch/s390/boot/vmem.c
428
if (p4d_none(*p4d)) {
arch/s390/boot/vmem.c
429
if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
arch/s390/boot/vmem.c
432
p4d_populate(&init_mm, p4d, pud);
arch/s390/boot/vmem.c
434
pgtable_pud_populate(p4d, addr, next, mode);
arch/s390/boot/vmem.c
442
p4d_t *p4d;
arch/s390/boot/vmem.c
457
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
arch/s390/boot/vmem.c
458
pgd_populate(&init_mm, pgd, p4d);
arch/s390/include/asm/page.h
113
DEFINE_PGVAL_FUNC(p4d)
arch/s390/include/asm/page.h
83
typedef struct { unsigned long p4d; } p4d_t;
arch/s390/include/asm/pgalloc.h
118
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
arch/s390/include/asm/pgalloc.h
120
set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
arch/s390/include/asm/pgalloc.h
123
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/s390/include/asm/pgalloc.h
125
set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
arch/s390/include/asm/pgalloc.h
64
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
arch/s390/include/asm/pgalloc.h
69
pagetable_dtor(virt_to_ptdesc(p4d));
arch/s390/include/asm/pgalloc.h
70
crst_table_free(mm, (unsigned long *) p4d);
arch/s390/include/asm/pgtable.h
1429
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
arch/s390/include/asm/pgtable.h
1431
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
arch/s390/include/asm/pgtable.h
1432
return (pud_t *) p4d_deref(p4d) + pud_index(address);
arch/s390/include/asm/pgtable.h
1474
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
arch/s390/include/asm/pgtable.h
690
static inline int p4d_folded(p4d_t p4d)
arch/s390/include/asm/pgtable.h
692
return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
arch/s390/include/asm/pgtable.h
695
static inline int p4d_present(p4d_t p4d)
arch/s390/include/asm/pgtable.h
697
if (p4d_folded(p4d))
arch/s390/include/asm/pgtable.h
699
return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
arch/s390/include/asm/pgtable.h
702
static inline int p4d_none(p4d_t p4d)
arch/s390/include/asm/pgtable.h
704
if (p4d_folded(p4d))
arch/s390/include/asm/pgtable.h
706
return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
arch/s390/include/asm/pgtable.h
709
static inline unsigned long p4d_pfn(p4d_t p4d)
arch/s390/include/asm/pgtable.h
714
return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
arch/s390/include/asm/pgtable.h
773
static inline int p4d_bad(p4d_t p4d)
arch/s390/include/asm/pgtable.h
775
unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
arch/s390/include/asm/pgtable.h
781
return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
arch/s390/include/asm/pgtable.h
964
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
arch/s390/include/asm/pgtable.h
966
WRITE_ONCE(*p4dp, p4d);
arch/s390/include/asm/pgtable.h
990
static inline void p4d_clear(p4d_t *p4d)
arch/s390/include/asm/pgtable.h
992
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
arch/s390/include/asm/pgtable.h
993
set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
arch/s390/include/asm/tlb.h
114
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
arch/s390/include/asm/tlb.h
122
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
arch/s390/kvm/dat.c
433
if (entry.p4d.h.i) {
arch/s390/kvm/dat.c
441
table = dereference_crste(entry.p4d);
arch/s390/kvm/dat.h
288
union p4d p4d;
arch/s390/kvm/dat.h
307
static_assert(sizeof(union p4d) == sizeof(unsigned long));
arch/s390/kvm/dat.h
321
union p4d p4ds[_CRST_ENTRIES];
arch/s390/kvm/dat.h
645
union p4d : (x).val, \
arch/s390/kvm/dat.h
652
union p4d : (x), \
arch/s390/kvm/dat.h
720
union p4d : (x).val & _REGION_ENTRY_ORIGIN, \
arch/s390/kvm/dat.h
811
static inline struct region3_table *dereference_p4d(union p4d p4d)
arch/s390/kvm/dat.h
813
return phys_to_virt(crste_origin(p4d));
arch/s390/kvm/dat.h
830
union p4d : _dereference_crste(_CRSTE(x)), \
arch/s390/kvm/gaccess.c
1351
if (table.p4d.i)
arch/s390/kvm/gaccess.c
1353
if (table.p4d.tt != TABLE_TYPE_REGION2)
arch/s390/kvm/gaccess.c
1355
if (vaddr.rtx01 < table.p4d.tf || vaddr.rtx01 > table.p4d.tl)
arch/s390/kvm/gaccess.c
1358
w->p |= table.p4d.p;
arch/s390/kvm/gaccess.c
1359
ptr = table.p4d.rto * PAGE_SIZE;
arch/s390/kvm/gaccess.c
31
union region2_table_entry p4d;
arch/s390/mm/dump_pagetables.c
165
static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
arch/s390/mm/dump_pagetables.c
167
note_page(pt_st, addr, 1, p4d_val(p4d));
arch/s390/mm/gmap_helpers.c
118
p4d_t *p4dp, p4d;
arch/s390/mm/gmap_helpers.c
129
p4d = p4dp_get(p4dp);
arch/s390/mm/gmap_helpers.c
130
if (p4d_none(p4d) || !p4d_present(p4d))
arch/s390/mm/pageattr.c
251
static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
arch/s390/mm/pageattr.c
259
pudp = pud_offset(p4d, addr);
arch/s390/mm/pgalloc.c
111
pagetable_dtor(virt_to_ptdesc(p4d));
arch/s390/mm/pgalloc.c
112
crst_table_free(mm, p4d);
arch/s390/mm/pgalloc.c
58
unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
arch/s390/mm/pgalloc.c
70
p4d = crst_table_alloc(mm);
arch/s390/mm/pgalloc.c
71
if (unlikely(!p4d))
arch/s390/mm/pgalloc.c
73
crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
arch/s390/mm/pgalloc.c
74
pagetable_p4d_ctor(virt_to_ptdesc(p4d));
arch/s390/mm/pgalloc.c
86
if (p4d) {
arch/s390/mm/pgalloc.c
88
p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
arch/s390/mm/pgalloc.c
89
mm->pgd = (pgd_t *) p4d;
arch/s390/mm/vmem.c
315
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
arch/s390/mm/vmem.c
324
pud = pud_offset(p4d, addr);
arch/s390/mm/vmem.c
371
static void try_free_pud_table(p4d_t *p4d, unsigned long start)
arch/s390/mm/vmem.c
376
pud = pud_offset(p4d, start);
arch/s390/mm/vmem.c
381
vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
arch/s390/mm/vmem.c
382
p4d_clear(p4d);
arch/s390/mm/vmem.c
390
p4d_t *p4d;
arch/s390/mm/vmem.c
393
p4d = p4d_offset(pgd, addr);
arch/s390/mm/vmem.c
394
for (; addr < end; addr = next, p4d++) {
arch/s390/mm/vmem.c
397
if (p4d_none(*p4d))
arch/s390/mm/vmem.c
399
} else if (p4d_none(*p4d)) {
arch/s390/mm/vmem.c
403
p4d_populate(&init_mm, p4d, pud);
arch/s390/mm/vmem.c
405
ret = modify_pud_table(p4d, addr, next, add, direct, altmap);
arch/s390/mm/vmem.c
409
try_free_pud_table(p4d, addr & P4D_MASK);
arch/s390/mm/vmem.c
418
p4d_t *p4d;
arch/s390/mm/vmem.c
421
p4d = p4d_offset(pgd, start);
arch/s390/mm/vmem.c
422
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
arch/s390/mm/vmem.c
423
if (!p4d_none(*p4d))
arch/s390/mm/vmem.c
436
p4d_t *p4d;
arch/s390/mm/vmem.c
457
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
arch/s390/mm/vmem.c
458
if (!p4d)
arch/s390/mm/vmem.c
460
pgd_populate(&init_mm, pgd, p4d);
arch/s390/mm/vmem.c
579
p4d_t *p4d;
arch/s390/mm/vmem.c
588
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
arch/s390/mm/vmem.c
589
if (!p4d)
arch/s390/mm/vmem.c
591
pgd_populate(&init_mm, pgd, p4d);
arch/s390/mm/vmem.c
593
p4d = p4d_offset(pgd, addr);
arch/s390/mm/vmem.c
594
if (p4d_none(*p4d)) {
arch/s390/mm/vmem.c
600
p4d_populate(&init_mm, p4d, pud);
arch/s390/mm/vmem.c
602
pud = pud_offset(p4d, addr);
arch/sh/mm/fault.c
124
p4d_t *p4d, *p4d_k;
arch/sh/mm/fault.c
134
p4d = p4d_offset(pgd, address);
arch/sh/mm/fault.c
139
pud = pud_offset(p4d, address);
arch/sh/mm/fault.c
56
p4d_t *p4d;
arch/sh/mm/fault.c
69
p4d = p4d_offset(pgd, addr);
arch/sh/mm/fault.c
71
pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
arch/sh/mm/fault.c
72
(u64)p4d_val(*p4d));
arch/sh/mm/fault.c
74
if (p4d_none(*p4d))
arch/sh/mm/fault.c
77
if (p4d_bad(*p4d)) {
arch/sh/mm/fault.c
82
pud = pud_offset(p4d, addr);
arch/sh/mm/hugetlbpage.c
28
p4d_t *p4d;
arch/sh/mm/hugetlbpage.c
35
p4d = p4d_alloc(mm, pgd, addr);
arch/sh/mm/hugetlbpage.c
36
if (p4d) {
arch/sh/mm/hugetlbpage.c
37
pud = pud_alloc(mm, p4d, addr);
arch/sh/mm/hugetlbpage.c
53
p4d_t *p4d;
arch/sh/mm/hugetlbpage.c
60
p4d = p4d_offset(pgd, addr);
arch/sh/mm/hugetlbpage.c
61
if (p4d) {
arch/sh/mm/hugetlbpage.c
62
pud = pud_offset(p4d, addr);
arch/sh/mm/init.c
50
p4d_t *p4d;
arch/sh/mm/init.c
60
p4d = p4d_alloc(NULL, pgd, addr);
arch/sh/mm/init.c
61
if (unlikely(!p4d)) {
arch/sh/mm/init.c
62
p4d_ERROR(*p4d);
arch/sh/mm/init.c
66
pud = pud_alloc(NULL, p4d, addr);
arch/sh/mm/tlbex_32.c
27
p4d_t *p4d;
arch/sh/mm/tlbex_32.c
47
p4d = p4d_offset(pgd, address);
arch/sh/mm/tlbex_32.c
48
if (p4d_none_or_clear_bad(p4d))
arch/sh/mm/tlbex_32.c
50
pud = pud_offset(p4d, address);
arch/sparc/include/asm/pgalloc_64.h
19
static inline void __p4d_populate(p4d_t *p4d, pud_t *pud)
arch/sparc/include/asm/pgalloc_64.h
21
p4d_set(p4d, pud);
arch/sparc/include/asm/pgtable_64.h
812
#define p4d_none(p4d) (!p4d_val(p4d))
arch/sparc/include/asm/pgtable_64.h
814
#define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
arch/sparc/include/asm/pgtable_64.h
861
#define p4d_pgtable(p4d) \
arch/sparc/include/asm/pgtable_64.h
862
((pud_t *) __va(p4d_val(p4d)))
arch/sparc/include/asm/pgtable_64.h
863
#define p4d_present(p4d) (p4d_val(p4d) != 0U)
arch/sparc/include/asm/pgtable_64.h
867
#define p4d_page(p4d) NULL
arch/sparc/mm/fault_32.c
274
p4d_t *p4d, *p4d_k;
arch/sparc/mm/fault_32.c
288
p4d = p4d_offset(pgd, address);
arch/sparc/mm/fault_32.c
289
pud = pud_offset(p4d, address);
arch/sparc/mm/hugetlbpage.c
194
p4d_t *p4d;
arch/sparc/mm/hugetlbpage.c
199
p4d = p4d_offset(pgd, addr);
arch/sparc/mm/hugetlbpage.c
200
pud = pud_alloc(mm, p4d, addr);
arch/sparc/mm/hugetlbpage.c
217
p4d_t *p4d;
arch/sparc/mm/hugetlbpage.c
224
p4d = p4d_offset(pgd, addr);
arch/sparc/mm/hugetlbpage.c
225
if (p4d_none(*p4d))
arch/sparc/mm/hugetlbpage.c
227
pud = pud_offset(p4d, addr);
arch/sparc/mm/init_64.c
1623
p4d_t *p4d;
arch/sparc/mm/init_64.c
1645
p4d = p4d_offset(pgd, addr);
arch/sparc/mm/init_64.c
1646
if (p4d_none(*p4d))
arch/sparc/mm/init_64.c
1649
pud = pud_offset(p4d, addr);
arch/sparc/mm/init_64.c
1774
p4d_t *p4d;
arch/sparc/mm/init_64.c
1790
p4d = p4d_offset(pgd, vstart);
arch/sparc/mm/init_64.c
1791
if (p4d_none(*p4d)) {
arch/sparc/mm/init_64.c
1799
p4d_populate(&init_mm, p4d, new);
arch/sparc/mm/init_64.c
1802
pud = pud_offset(p4d, vstart);
arch/sparc/mm/init_64.c
2590
p4d_t *p4d;
arch/sparc/mm/init_64.c
2597
p4d = vmemmap_p4d_populate(pgd, vstart, node);
arch/sparc/mm/init_64.c
2598
if (!p4d)
arch/sparc/mm/init_64.c
2601
pud = vmemmap_pud_populate(p4d, vstart, node);
arch/sparc/mm/srmmu.c
271
p4d_t *p4d;
arch/sparc/mm/srmmu.c
300
p4d = p4d_offset(pgd, vaddr);
arch/sparc/mm/srmmu.c
301
pud = pud_offset(p4d, vaddr);
arch/sparc/mm/srmmu.c
900
p4d_t *p4d;
arch/sparc/mm/srmmu.c
963
p4d = p4d_offset(pgd, PKMAP_BASE);
arch/sparc/mm/srmmu.c
964
pud = pud_offset(p4d, PKMAP_BASE);
arch/um/include/asm/pgtable-4level.h
69
#define p4d_populate(mm, p4d, pud) \
arch/um/include/asm/pgtable-4level.h
70
set_p4d(p4d, __p4d(_PAGE_TABLE + __pa(pud)))
arch/um/include/asm/pgtable-4level.h
89
static inline void p4d_clear (p4d_t *p4d)
arch/um/include/asm/pgtable-4level.h
91
set_p4d(p4d, __p4d(_PAGE_NEEDSYNC));
arch/um/include/asm/pgtable-4level.h
97
#define p4d_page(p4d) phys_to_page(p4d_val(p4d) & PAGE_MASK)
arch/um/include/asm/pgtable-4level.h
98
#define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & PAGE_MASK))
arch/um/kernel/skas/uaccess.c
20
p4d_t *p4d;
arch/um/kernel/skas/uaccess.c
31
p4d = p4d_offset(pgd, addr);
arch/um/kernel/skas/uaccess.c
32
if (!p4d_present(*p4d))
arch/um/kernel/skas/uaccess.c
35
pud = pud_offset(p4d, addr);
arch/um/kernel/tlb.c
115
static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
arch/um/kernel/tlb.c
123
pud = pud_offset(p4d, addr);
arch/um/kernel/tlb.c
142
p4d_t *p4d;
arch/um/kernel/tlb.c
146
p4d = p4d_offset(pgd, addr);
arch/um/kernel/tlb.c
149
if (!p4d_present(*p4d)) {
arch/um/kernel/tlb.c
150
if (p4d_needsync(*p4d)) {
arch/um/kernel/tlb.c
153
p4d_mkuptodate(*p4d);
arch/um/kernel/tlb.c
156
ret = update_pud_range(p4d, addr, next, ops);
arch/um/kernel/tlb.c
157
} while (p4d++, addr = next, ((addr < end) && !ret));
arch/x86/boot/startup/map_kernel.c
130
p4d = (p4dval_t *)rip_rel_ptr(level4_kernel_pgt);
arch/x86/boot/startup/map_kernel.c
131
p4d[MAX_PTRS_PER_P4D - 1] += load_delta;
arch/x86/boot/startup/map_kernel.c
133
pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
arch/x86/boot/startup/map_kernel.c
156
p4d = &early_pgts[next_early_pgt++]->pmd;
arch/x86/boot/startup/map_kernel.c
159
pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
arch/x86/boot/startup/map_kernel.c
160
pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
arch/x86/boot/startup/map_kernel.c
163
p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
arch/x86/boot/startup/map_kernel.c
164
p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
arch/x86/boot/startup/map_kernel.c
96
p4dval_t *p4d;
arch/x86/boot/startup/sme.c
113
p4d_t *p4d;
arch/x86/boot/startup/sme.c
119
p4d = ppd->pgtable_area;
arch/x86/boot/startup/sme.c
120
memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
arch/x86/boot/startup/sme.c
121
ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
arch/x86/boot/startup/sme.c
122
set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
arch/x86/boot/startup/sme.c
125
p4d = p4d_offset(pgd, ppd->vaddr);
arch/x86/boot/startup/sme.c
126
if (p4d_none(*p4d)) {
arch/x86/boot/startup/sme.c
130
set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
arch/x86/boot/startup/sme.c
133
pud = pud_offset(p4d, ppd->vaddr);
arch/x86/entry/vsyscall/vsyscall_64.c
347
p4d_t *p4d;
arch/x86/entry/vsyscall/vsyscall_64.c
353
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
arch/x86/entry/vsyscall/vsyscall_64.c
354
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
arch/x86/entry/vsyscall/vsyscall_64.c
355
pud = pud_offset(p4d, VSYSCALL_ADDR);
arch/x86/hyperv/hv_crash.c
118
p4d_t *p4d;
arch/x86/hyperv/hv_crash.c
122
p4d = p4d_offset(pgd, trampoline_pa);
arch/x86/hyperv/hv_crash.c
123
native_p4d_clear(p4d);
arch/x86/hyperv/hv_crash.c
229
p4d_t *p4d;
arch/x86/hyperv/hv_crash.c
232
p4d = p4d_offset(pgd, trampoline_pa);
arch/x86/hyperv/hv_crash.c
235
p4d_populate(&init_mm, p4d, (pud_t *)hv_crash_ptpgs[1]);
arch/x86/hyperv/hv_crash.c
236
p4d->p4d = p4d->p4d & ~(_PAGE_NX); /* enable execute */
arch/x86/hyperv/hv_crash.c
509
p4d_t *p4d;
arch/x86/hyperv/hv_crash.c
515
p4d = hv_crash_ptpgs[0] + pgd_index(addr) * sizeof(p4d);
arch/x86/hyperv/hv_crash.c
517
set_p4d(p4d, __p4d(_PAGE_TABLE | pa));
arch/x86/hyperv/hv_crash.c
518
p4d->p4d &= ~(_PAGE_NX); /* enable execute */
arch/x86/include/asm/kexec.h
163
p4d_t *p4d;
arch/x86/include/asm/paravirt.h
425
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
arch/x86/include/asm/paravirt.h
427
p4dval_t val = native_p4d_val(p4d);
arch/x86/include/asm/paravirt.h
440
static inline p4dval_t p4d_val(p4d_t p4d)
arch/x86/include/asm/paravirt.h
442
return PVOP_ALT_CALLEE1(p4dval_t, pv_ops, mmu.p4d_val, p4d.p4d,
arch/x86/include/asm/pgalloc.h
111
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/x86/include/asm/pgalloc.h
114
set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
arch/x86/include/asm/pgalloc.h
117
static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
arch/x86/include/asm/pgalloc.h
120
set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
arch/x86/include/asm/pgalloc.h
132
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
arch/x86/include/asm/pgalloc.h
136
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
arch/x86/include/asm/pgalloc.h
137
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
arch/x86/include/asm/pgalloc.h
140
static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
arch/x86/include/asm/pgalloc.h
144
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
arch/x86/include/asm/pgalloc.h
145
set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
arch/x86/include/asm/pgalloc.h
148
extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
arch/x86/include/asm/pgalloc.h
150
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
arch/x86/include/asm/pgalloc.h
154
___p4d_free_tlb(tlb, p4d);
arch/x86/include/asm/pgtable-3level.h
56
pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
arch/x86/include/asm/pgtable.h
1079
static inline int p4d_none(p4d_t p4d)
arch/x86/include/asm/pgtable.h
1081
return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
arch/x86/include/asm/pgtable.h
1084
static inline int p4d_present(p4d_t p4d)
arch/x86/include/asm/pgtable.h
1086
return p4d_flags(p4d) & _PAGE_PRESENT;
arch/x86/include/asm/pgtable.h
1089
static inline pud_t *p4d_pgtable(p4d_t p4d)
arch/x86/include/asm/pgtable.h
1091
return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
arch/x86/include/asm/pgtable.h
1098
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
arch/x86/include/asm/pgtable.h
1100
static inline int p4d_bad(p4d_t p4d)
arch/x86/include/asm/pgtable.h
1107
return (p4d_flags(p4d) & ~ignore_flags) != 0;
arch/x86/include/asm/pgtable.h
1733
#define set_p4d_safe(p4dp, p4d) \
arch/x86/include/asm/pgtable.h
1735
WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
arch/x86/include/asm/pgtable.h
1736
set_p4d(p4dp, p4d); \
arch/x86/include/asm/pgtable.h
286
static inline unsigned long p4d_pfn(p4d_t p4d)
arch/x86/include/asm/pgtable.h
288
return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
arch/x86/include/asm/pgtable.h
81
# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
arch/x86/include/asm/pgtable.h
85
#define p4d_clear(p4d) native_p4d_clear(p4d)
arch/x86/include/asm/pgtable_64.h
138
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
arch/x86/include/asm/pgtable_64.h
144
WRITE_ONCE(*p4dp, p4d);
arch/x86/include/asm/pgtable_64.h
148
pgd = native_make_pgd(native_p4d_val(p4d));
arch/x86/include/asm/pgtable_64.h
153
static inline void native_p4d_clear(p4d_t *p4d)
arch/x86/include/asm/pgtable_64.h
155
native_set_p4d(p4d, native_make_p4d(0));
arch/x86/include/asm/pgtable_types.h
342
typedef struct { p4dval_t p4d; } p4d_t;
arch/x86/include/asm/pgtable_types.h
349
static inline p4dval_t native_p4d_val(p4d_t p4d)
arch/x86/include/asm/pgtable_types.h
351
return p4d.p4d;
arch/x86/include/asm/pgtable_types.h
361
static inline p4dval_t native_p4d_val(p4d_t p4d)
arch/x86/include/asm/pgtable_types.h
363
return native_pgd_val(p4d.pgd);
arch/x86/include/asm/pgtable_types.h
384
return (pud_t) { .p4d.pgd = native_make_pgd(val) };
arch/x86/include/asm/pgtable_types.h
389
return native_pgd_val(pud.p4d.pgd);
arch/x86/include/asm/pgtable_types.h
408
return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
arch/x86/include/asm/pgtable_types.h
413
return native_pgd_val(pmd.pud.p4d.pgd);
arch/x86/include/asm/pgtable_types.h
417
static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
arch/x86/include/asm/pgtable_types.h
423
static inline p4dval_t p4d_flags_mask(p4d_t p4d)
arch/x86/include/asm/pgtable_types.h
425
return ~p4d_pfn_mask(p4d);
arch/x86/include/asm/pgtable_types.h
428
static inline p4dval_t p4d_flags(p4d_t p4d)
arch/x86/include/asm/pgtable_types.h
430
return native_p4d_val(p4d) & p4d_flags_mask(p4d);
arch/x86/include/asm/xen/page.h
336
#define pud_val_ma(v) ((v).p4d.pgd.pgd)
arch/x86/include/asm/xen/page.h
345
#define p4d_val_ma(x) ((x).p4d)
arch/x86/kernel/espfix_64.c
107
p4d_t *p4d;
arch/x86/kernel/espfix_64.c
115
p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
arch/x86/kernel/espfix_64.c
116
p4d_populate(&init_mm, p4d, espfix_pud_page);
arch/x86/kernel/head64.c
115
p4d = *p4d_p;
arch/x86/kernel/head64.c
117
if (p4d)
arch/x86/kernel/head64.c
118
pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
arch/x86/kernel/head64.c
83
p4dval_t p4d, *p4d_p;
arch/x86/kernel/ldt.c
217
p4d_t *p4d;
arch/x86/kernel/ldt.c
223
p4d = p4d_offset(pgd, va);
arch/x86/kernel/ldt.c
224
if (p4d_none(*p4d))
arch/x86/kernel/ldt.c
227
pud = pud_offset(p4d, va);
arch/x86/kernel/machine_kexec_32.c
83
p4d_t *p4d;
arch/x86/kernel/machine_kexec_32.c
91
p4d = p4d_offset(pgd, vaddr);
arch/x86/kernel/machine_kexec_32.c
92
pud = pud_offset(p4d, vaddr);
arch/x86/kernel/machine_kexec_64.c
153
free_page((unsigned long)image->arch.p4d);
arch/x86/kernel/machine_kexec_64.c
154
image->arch.p4d = NULL;
arch/x86/kernel/machine_kexec_64.c
169
p4d_t *p4d;
arch/x86/kernel/machine_kexec_64.c
183
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
arch/x86/kernel/machine_kexec_64.c
184
if (!p4d)
arch/x86/kernel/machine_kexec_64.c
186
image->arch.p4d = p4d;
arch/x86/kernel/machine_kexec_64.c
187
set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
arch/x86/kernel/machine_kexec_64.c
189
p4d = p4d_offset(pgd, vaddr);
arch/x86/kernel/machine_kexec_64.c
190
if (!p4d_present(*p4d)) {
arch/x86/kernel/machine_kexec_64.c
195
set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
arch/x86/kernel/machine_kexec_64.c
197
pud = pud_offset(p4d, vaddr);
arch/x86/kernel/tboot.c
117
p4d_t *p4d;
arch/x86/kernel/tboot.c
123
p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
arch/x86/kernel/tboot.c
124
if (!p4d)
arch/x86/kernel/tboot.c
126
pud = pud_alloc(&tboot_mm, p4d, vaddr);
arch/x86/kvm/mmu/mmu.c
3238
p4d_t p4d;
arch/x86/kvm/mmu/mmu.c
3270
p4d = READ_ONCE(*p4d_offset(&pgd, hva));
arch/x86/kvm/mmu/mmu.c
3271
if (p4d_none(p4d) || !p4d_present(p4d))
arch/x86/kvm/mmu/mmu.c
3274
pud = READ_ONCE(*pud_offset(&p4d, hva));
arch/x86/mm/dump_pagetables.c
284
static void effective_prot_p4d(struct ptdump_state *st, p4d_t p4d)
arch/x86/mm/dump_pagetables.c
286
effective_prot(st, 1, p4d_val(p4d));
arch/x86/mm/dump_pagetables.c
406
static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d)
arch/x86/mm/dump_pagetables.c
408
note_page(pt_st, addr, 1, p4d_val(p4d));
arch/x86/mm/fault.c
1006
p4d = p4d_offset(pgd, address);
arch/x86/mm/fault.c
1007
if (!p4d_present(*p4d))
arch/x86/mm/fault.c
1010
if (p4d_leaf(*p4d))
arch/x86/mm/fault.c
1011
return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
arch/x86/mm/fault.c
1013
pud = pud_offset(p4d, address);
arch/x86/mm/fault.c
179
p4d_t *p4d, *p4d_k;
arch/x86/mm/fault.c
194
p4d = p4d_offset(pgd, address);
arch/x86/mm/fault.c
199
pud = pud_offset(p4d, address);
arch/x86/mm/fault.c
298
p4d_t *p4d;
arch/x86/mm/fault.c
311
p4d = p4d_offset(pgd, address);
arch/x86/mm/fault.c
312
pud = pud_offset(p4d, address);
arch/x86/mm/fault.c
354
p4d_t *p4d;
arch/x86/mm/fault.c
367
p4d = p4d_offset(pgd, address);
arch/x86/mm/fault.c
368
if (bad_address(p4d))
arch/x86/mm/fault.c
371
pr_cont("P4D %lx ", p4d_val(*p4d));
arch/x86/mm/fault.c
372
if (!p4d_present(*p4d) || p4d_leaf(*p4d))
arch/x86/mm/fault.c
375
pud = pud_offset(p4d, address);
arch/x86/mm/fault.c
983
p4d_t *p4d;
arch/x86/mm/ident_map.c
152
p4d_t *p4d = p4d_page + p4d_index(addr);
arch/x86/mm/ident_map.c
156
if (p4d_present(*p4d)) {
arch/x86/mm/ident_map.c
157
pud = pud_offset(p4d, 0);
arch/x86/mm/ident_map.c
172
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
arch/x86/mm/ident_map.c
195
p4d_t *p4d;
arch/x86/mm/ident_map.c
199
p4d = p4d_offset(pgd, 0);
arch/x86/mm/ident_map.c
200
result = ident_p4d_init(info, p4d, addr, next);
arch/x86/mm/ident_map.c
206
p4d = (p4d_t *)info->alloc_pgt_page(info->context);
arch/x86/mm/ident_map.c
207
if (!p4d)
arch/x86/mm/ident_map.c
209
result = ident_p4d_init(info, p4d, addr, next);
arch/x86/mm/ident_map.c
213
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
arch/x86/mm/ident_map.c
219
pud_t *pud = pud_offset(p4d, 0);
arch/x86/mm/ident_map.c
32
static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
arch/x86/mm/ident_map.c
34
pud_t *pud = pud_offset(p4d, 0);
arch/x86/mm/ident_map.c
52
p4d_t *p4d = p4d_offset(pgd, 0);
arch/x86/mm/ident_map.c
56
if (!p4d_present(p4d[i]))
arch/x86/mm/ident_map.c
59
free_pud(info, &p4d[i]);
arch/x86/mm/ident_map.c
63
info->free_pgt_page(p4d, info->context);
arch/x86/mm/init_32.c
422
p4d_t *p4d;
arch/x86/mm/init_32.c
442
p4d = p4d_offset(pgd, va);
arch/x86/mm/init_32.c
443
pud = pud_offset(p4d, va);
arch/x86/mm/init_32.c
69
p4d_t *p4d;
arch/x86/mm/init_32.c
77
p4d = p4d_offset(pgd, 0);
arch/x86/mm/init_32.c
78
pud = pud_offset(p4d, 0);
arch/x86/mm/init_32.c
84
p4d = p4d_offset(pgd, 0);
arch/x86/mm/init_32.c
85
pud = pud_offset(p4d, 0);
arch/x86/mm/init_64.c
1076
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
arch/x86/mm/init_64.c
1088
free_pagetable(p4d_page(*p4d), 0);
arch/x86/mm/init_64.c
1090
p4d_clear(p4d);
arch/x86/mm/init_64.c
1223
p4d_t *p4d;
arch/x86/mm/init_64.c
1225
p4d = p4d_start + p4d_index(addr);
arch/x86/mm/init_64.c
1226
for (; addr < end; addr = next, p4d++) {
arch/x86/mm/init_64.c
1229
if (!p4d_present(*p4d))
arch/x86/mm/init_64.c
1232
BUILD_BUG_ON(p4d_leaf(*p4d));
arch/x86/mm/init_64.c
1234
pud_base = pud_offset(p4d, 0);
arch/x86/mm/init_64.c
1242
free_pud_table(pud_base, p4d);
arch/x86/mm/init_64.c
1257
p4d_t *p4d;
arch/x86/mm/init_64.c
1266
p4d = p4d_offset(pgd, 0);
arch/x86/mm/init_64.c
1267
remove_p4d_table(p4d, addr, next, altmap, direct);
arch/x86/mm/init_64.c
1325
p4d_t *p4d;
arch/x86/mm/init_64.c
1329
p4d = p4d_alloc(&init_mm, pgd, addr);
arch/x86/mm/init_64.c
1330
if (!p4d)
arch/x86/mm/init_64.c
1347
pud = pud_alloc(&init_mm, p4d, addr);
arch/x86/mm/init_64.c
1589
p4d_t *p4d;
arch/x86/mm/init_64.c
1605
p4d = p4d_offset(pgd, addr);
arch/x86/mm/init_64.c
1606
if (p4d_none(*p4d)) {
arch/x86/mm/init_64.c
1610
get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
arch/x86/mm/init_64.c
1612
pud = pud_offset(p4d, addr);
arch/x86/mm/init_64.c
192
p4d_t *p4d;
arch/x86/mm/init_64.c
196
p4d = p4d_offset(pgd, addr);
arch/x86/mm/init_64.c
201
if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
arch/x86/mm/init_64.c
202
BUG_ON(p4d_pgtable(*p4d)
arch/x86/mm/init_64.c
205
if (p4d_none(*p4d))
arch/x86/mm/init_64.c
206
set_p4d(p4d, *p4d_ref);
arch/x86/mm/init_64.c
270
p4d_t *p4d = (p4d_t *)spp_getpage();
arch/x86/mm/init_64.c
271
pgd_populate(&init_mm, pgd, p4d);
arch/x86/mm/init_64.c
272
if (p4d != p4d_offset(pgd, 0))
arch/x86/mm/init_64.c
274
p4d, p4d_offset(pgd, 0));
arch/x86/mm/init_64.c
279
static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
arch/x86/mm/init_64.c
281
if (p4d_none(*p4d)) {
arch/x86/mm/init_64.c
283
p4d_populate(&init_mm, p4d, pud);
arch/x86/mm/init_64.c
284
if (pud != pud_offset(p4d, 0))
arch/x86/mm/init_64.c
286
pud, pud_offset(p4d, 0));
arch/x86/mm/init_64.c
288
return pud_offset(p4d, vaddr);
arch/x86/mm/init_64.c
330
p4d_t *p4d = p4d_page + p4d_index(vaddr);
arch/x86/mm/init_64.c
331
pud_t *pud = fill_pud(p4d, vaddr);
arch/x86/mm/init_64.c
364
p4d_t *p4d;
arch/x86/mm/init_64.c
368
p4d = fill_p4d(pgd, vaddr);
arch/x86/mm/init_64.c
369
pud = fill_pud(p4d, vaddr);
arch/x86/mm/init_64.c
388
p4d_t *p4d;
arch/x86/mm/init_64.c
399
p4d = (p4d_t *) spp_getpage();
arch/x86/mm/init_64.c
400
set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
arch/x86/mm/init_64.c
403
p4d = p4d_offset(pgd, (unsigned long)__va(phys));
arch/x86/mm/init_64.c
404
if (p4d_none(*p4d)) {
arch/x86/mm/init_64.c
406
set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
arch/x86/mm/init_64.c
409
pud = pud_offset(p4d, (unsigned long)__va(phys));
arch/x86/mm/init_64.c
704
p4d_t *p4d = p4d_page + p4d_index(vaddr);
arch/x86/mm/init_64.c
717
set_p4d_init(p4d, __p4d(0), init);
arch/x86/mm/init_64.c
721
if (!p4d_none(*p4d)) {
arch/x86/mm/init_64.c
722
pud = pud_offset(p4d, 0);
arch/x86/mm/init_64.c
73
DEFINE_POPULATE(p4d_populate, p4d, pud, init)
arch/x86/mm/init_64.c
733
p4d_populate_init(&init_mm, p4d, pud, init);
arch/x86/mm/init_64.c
74
DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
arch/x86/mm/init_64.c
756
p4d_t *p4d;
arch/x86/mm/init_64.c
761
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
arch/x86/mm/init_64.c
762
paddr_last = phys_p4d_init(p4d, __pa(vaddr),
arch/x86/mm/init_64.c
769
p4d = alloc_low_page();
arch/x86/mm/init_64.c
770
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
arch/x86/mm/init_64.c
775
pgd_populate_init(&init_mm, pgd, p4d, init);
arch/x86/mm/init_64.c
778
(pud_t *) p4d, init);
arch/x86/mm/init_64.c
88
DEFINE_ENTRY(p4d, p4d, init)
arch/x86/mm/ioremap.c
831
p4d_t *p4d = p4d_offset(pgd, addr);
arch/x86/mm/ioremap.c
832
pud_t *pud = pud_offset(p4d, addr);
arch/x86/mm/kasan_init_64.c
102
static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
arch/x86/mm/kasan_init_64.c
108
if (p4d_none(*p4d)) {
arch/x86/mm/kasan_init_64.c
111
p4d_populate(&init_mm, p4d, p);
arch/x86/mm/kasan_init_64.c
114
pud = pud_offset(p4d, addr);
arch/x86/mm/kasan_init_64.c
126
p4d_t *p4d;
arch/x86/mm/kasan_init_64.c
134
p4d = p4d_offset(pgd, addr);
arch/x86/mm/kasan_init_64.c
137
kasan_populate_p4d(p4d, addr, next, nid);
arch/x86/mm/kasan_init_64.c
138
} while (p4d++, addr = next, addr != end);
arch/x86/mm/kasan_init_64.c
193
unsigned long p4d;
arch/x86/mm/kasan_init_64.c
198
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
arch/x86/mm/kasan_init_64.c
199
p4d += __START_KERNEL_map - phys_base;
arch/x86/mm/kasan_init_64.c
200
return (p4d_t *)p4d + p4d_index(addr);
arch/x86/mm/kasan_init_64.c
208
p4d_t *p4d, p4d_entry;
arch/x86/mm/kasan_init_64.c
217
p4d = early_p4d_offset(pgd, addr);
arch/x86/mm/kasan_init_64.c
221
if (!p4d_none(*p4d))
arch/x86/mm/kasan_init_64.c
226
set_p4d(p4d, p4d_entry);
arch/x86/mm/kasan_init_64.c
227
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
arch/x86/mm/kasan_init_64.c
248
p4d_t *p4d;
arch/x86/mm/kasan_init_64.c
252
p4d = p4d_offset(pgd, addr);
arch/x86/mm/kasan_init_64.c
256
if (p4d_none(*p4d)) {
arch/x86/mm/kasan_init_64.c
258
p4d_populate(&init_mm, p4d, p);
arch/x86/mm/kasan_init_64.c
260
} while (p4d++, addr = next, addr != end);
arch/x86/mm/kaslr.c
174
p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
arch/x86/mm/kaslr.c
191
p4d = p4d_offset(pgd, vaddr);
arch/x86/mm/kaslr.c
192
pud = pud_offset(p4d, vaddr);
arch/x86/mm/pat/set_memory.c
1299
p4d_t *p4d = p4d_offset(pgd, addr);
arch/x86/mm/pat/set_memory.c
1300
pud_t *pud = pud_offset(p4d, addr);
arch/x86/mm/pat/set_memory.c
1374
p4d_t *p4d;
arch/x86/mm/pat/set_memory.c
1384
p4d = p4d_offset(pgd, addr);
arch/x86/mm/pat/set_memory.c
1385
if (p4d_none(*p4d))
arch/x86/mm/pat/set_memory.c
1387
pud = pud_offset(p4d, addr);
arch/x86/mm/pat/set_memory.c
1497
static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
arch/x86/mm/pat/set_memory.c
1499
pud_t *pud = pud_offset(p4d, start);
arch/x86/mm/pat/set_memory.c
1653
static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
arch/x86/mm/pat/set_memory.c
1675
pud = pud_offset(p4d, start);
arch/x86/mm/pat/set_memory.c
1696
pud = pud_offset(p4d, start);
arch/x86/mm/pat/set_memory.c
1716
pud = pud_offset(p4d, start);
arch/x86/mm/pat/set_memory.c
1739
p4d_t *p4d;
arch/x86/mm/pat/set_memory.c
1746
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
arch/x86/mm/pat/set_memory.c
1747
if (!p4d)
arch/x86/mm/pat/set_memory.c
1750
set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
arch/x86/mm/pat/set_memory.c
1756
p4d = p4d_offset(pgd_entry, addr);
arch/x86/mm/pat/set_memory.c
1757
if (p4d_none(*p4d)) {
arch/x86/mm/pat/set_memory.c
1762
set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
arch/x86/mm/pat/set_memory.c
1768
ret = populate_pud(cpa, addr, p4d, pgprot);
arch/x86/mm/pat/set_memory.c
1775
unmap_pud_range(p4d, addr,
arch/x86/mm/pat/set_memory.c
715
p4d_t *p4d;
arch/x86/mm/pat/set_memory.c
730
p4d = p4d_offset(pgd, address);
arch/x86/mm/pat/set_memory.c
731
if (p4d_none(*p4d))
arch/x86/mm/pat/set_memory.c
734
if (p4d_leaf(*p4d) || !p4d_present(*p4d))
arch/x86/mm/pat/set_memory.c
735
return (pte_t *)p4d;
arch/x86/mm/pat/set_memory.c
738
*nx |= p4d_flags(*p4d) & _PAGE_NX;
arch/x86/mm/pat/set_memory.c
739
*rw &= p4d_flags(*p4d) & _PAGE_RW;
arch/x86/mm/pat/set_memory.c
741
pud = pud_offset(p4d, address);
arch/x86/mm/pat/set_memory.c
811
p4d_t *p4d;
arch/x86/mm/pat/set_memory.c
818
p4d = p4d_offset(pgd, address);
arch/x86/mm/pat/set_memory.c
819
if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
arch/x86/mm/pat/set_memory.c
822
pud = pud_offset(p4d, address);
arch/x86/mm/pat/set_memory.c
891
p4d_t *p4d;
arch/x86/mm/pat/set_memory.c
896
p4d = p4d_offset(pgd, address);
arch/x86/mm/pat/set_memory.c
897
pud = pud_offset(p4d, address);
arch/x86/mm/pgtable.c
255
p4d_t *p4d;
arch/x86/mm/pgtable.c
259
p4d = p4d_offset(pgd, 0);
arch/x86/mm/pgtable.c
260
pud = pud_offset(p4d, 0);
arch/x86/mm/pgtable.c
49
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
arch/x86/mm/pgtable.c
51
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
arch/x86/mm/pgtable.c
52
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
arch/x86/mm/pgtable.c
604
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
arch/x86/mm/pgtable.c
615
void p4d_clear_huge(p4d_t *p4d)
arch/x86/mm/pgtable_32.c
30
p4d_t *p4d;
arch/x86/mm/pgtable_32.c
40
p4d = p4d_offset(pgd, vaddr);
arch/x86/mm/pgtable_32.c
41
if (p4d_none(*p4d)) {
arch/x86/mm/pgtable_32.c
45
pud = pud_offset(p4d, vaddr);
arch/x86/mm/pti.c
209
p4d_t *p4d;
arch/x86/mm/pti.c
212
p4d = pti_user_pagetable_walk_p4d(address);
arch/x86/mm/pti.c
213
if (!p4d)
arch/x86/mm/pti.c
216
BUILD_BUG_ON(p4d_leaf(*p4d));
arch/x86/mm/pti.c
217
if (p4d_none(*p4d)) {
arch/x86/mm/pti.c
222
set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
arch/x86/mm/pti.c
225
pud = pud_offset(p4d, address);
arch/x86/mm/pti.c
328
p4d_t *p4d;
arch/x86/mm/pti.c
338
p4d = p4d_offset(pgd, addr);
arch/x86/mm/pti.c
339
if (WARN_ON(p4d_none(*p4d)))
arch/x86/mm/pti.c
342
pud = pud_offset(p4d, addr);
arch/x86/platform/efi/efi_64.c
71
p4d_t *p4d;
arch/x86/platform/efi/efi_64.c
81
p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
arch/x86/platform/efi/efi_64.c
82
if (!p4d)
arch/x86/platform/efi/efi_64.c
85
pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
arch/x86/power/hibernate.c
157
p4d_t *p4d;
arch/x86/power/hibernate.c
171
p4d = p4d_offset(pgd, relocated_restore_code);
arch/x86/power/hibernate.c
172
if (p4d_leaf(*p4d)) {
arch/x86/power/hibernate.c
173
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
arch/x86/power/hibernate.c
176
pud = pud_offset(p4d, relocated_restore_code);
arch/x86/power/hibernate_32.c
32
p4d_t *p4d;
arch/x86/power/hibernate_32.c
42
p4d = p4d_offset(pgd, 0);
arch/x86/power/hibernate_32.c
43
pud = pud_offset(p4d, 0);
arch/x86/power/hibernate_32.c
47
p4d = p4d_offset(pgd, 0);
arch/x86/power/hibernate_32.c
48
pud = pud_offset(p4d, 0);
arch/x86/power/hibernate_64.c
32
p4d_t *p4d = NULL;
arch/x86/power/hibernate_64.c
55
p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
arch/x86/power/hibernate_64.c
56
if (!p4d)
arch/x86/power/hibernate_64.c
72
if (p4d) {
arch/x86/power/hibernate_64.c
74
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
arch/x86/power/hibernate_64.c
76
set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
arch/x86/xen/mmu_pv.c
100
p4d_t xen_make_p4d(p4dval_t p4d);
arch/x86/xen/mmu_pv.c
1157
static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
arch/x86/xen/mmu_pv.c
1163
if (p4d_leaf(*p4d)) {
arch/x86/xen/mmu_pv.c
1164
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
arch/x86/xen/mmu_pv.c
1169
pud_tbl = pud_offset(p4d, 0);
arch/x86/xen/mmu_pv.c
1175
set_p4d(p4d, __p4d(0));
arch/x86/xen/mmu_pv.c
1186
p4d_t *p4d;
arch/x86/xen/mmu_pv.c
1192
p4d = p4d_offset(pgd, 0);
arch/x86/xen/mmu_pv.c
1193
if (!p4d_none(*p4d))
arch/x86/xen/mmu_pv.c
1194
xen_cleanmfnmap_p4d(p4d, unpin);
arch/x86/xen/mmu_pv.c
587
__visible p4dval_t xen_p4d_val(p4d_t p4d)
arch/x86/xen/mmu_pv.c
589
return pte_mfn_to_pfn(p4d.p4d);
arch/x86/xen/mmu_pv.c
593
__visible p4d_t xen_make_p4d(p4dval_t p4d)
arch/x86/xen/mmu_pv.c
595
p4d = pte_pfn_to_mfn(p4d);
arch/x86/xen/mmu_pv.c
597
return native_make_p4d(p4d);
arch/x86/xen/mmu_pv.c
636
static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
arch/x86/xen/mmu_pv.c
644
if (p4d_none(*p4d))
arch/x86/xen/mmu_pv.c
647
pud = pud_offset(p4d, 0);
arch/x86/xen/mmu_pv.c
686
p4d_t *p4d;
arch/x86/xen/mmu_pv.c
694
p4d = p4d_offset(&pgd[i], 0);
arch/x86/xen/mmu_pv.c
695
xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
arch/x86/xen/mmu_pv.c
95
p4dval_t xen_p4d_val(p4d_t p4d);
arch/xtensa/mm/fault.c
36
p4d_t *p4d, *p4d_k;
arch/xtensa/mm/fault.c
52
p4d = p4d_offset(pgd, address);
arch/xtensa/mm/fault.c
54
if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
arch/xtensa/mm/fault.c
57
pud = pud_offset(p4d, address);
arch/xtensa/mm/tlb.c
179
p4d_t *p4d;
arch/xtensa/mm/tlb.c
190
p4d = p4d_offset(pgd, vaddr);
arch/xtensa/mm/tlb.c
191
if (p4d_none_or_clear_bad(p4d))
arch/xtensa/mm/tlb.c
193
pud = pud_offset(p4d, vaddr);
fs/userfaultfd.c
290
p4d_t *p4d;
fs/userfaultfd.c
302
p4d = p4d_offset(pgd, address);
fs/userfaultfd.c
303
if (!p4d_present(*p4d))
fs/userfaultfd.c
305
pud = pud_offset(p4d, address);
include/asm-generic/pgalloc.h
259
static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
include/asm-generic/pgalloc.h
261
struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
include/asm-generic/pgalloc.h
263
BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
include/asm-generic/pgalloc.h
268
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
include/asm-generic/pgalloc.h
271
__p4d_free(mm, p4d);
include/asm-generic/pgtable-nop4d.h
25
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
include/asm-generic/pgtable-nop4d.h
27
#define pgd_populate(mm, pgd, p4d) do { } while (0)
include/asm-generic/pgtable-nop4d.h
28
#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
include/asm-generic/pgtable-nopud.h
16
typedef struct { p4d_t p4d; } pud_t;
include/asm-generic/pgtable-nopud.h
28
static inline int p4d_none(p4d_t p4d) { return 0; }
include/asm-generic/pgtable-nopud.h
29
static inline int p4d_bad(p4d_t p4d) { return 0; }
include/asm-generic/pgtable-nopud.h
30
static inline int p4d_present(p4d_t p4d) { return 1; }
include/asm-generic/pgtable-nopud.h
31
static inline void p4d_clear(p4d_t *p4d) { }
include/asm-generic/pgtable-nopud.h
32
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
include/asm-generic/pgtable-nopud.h
34
#define p4d_populate(mm, p4d, pud) do { } while (0)
include/asm-generic/pgtable-nopud.h
35
#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
include/asm-generic/pgtable-nopud.h
42
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
include/asm-generic/pgtable-nopud.h
44
return (pud_t *)p4d;
include/asm-generic/pgtable-nopud.h
48
#define pud_val(x) (p4d_val((x).p4d))
include/asm-generic/pgtable-nopud.h
51
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
include/asm-generic/pgtable-nopud.h
52
#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
include/linux/mm.h
3213
static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
include/linux/mm.h
3222
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
include/linux/mm.h
3311
static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
include/linux/mm.h
3314
return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
include/linux/mm.h
3315
NULL : pud_offset(p4d, address);
include/linux/mm.h
4491
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
include/linux/pagewalk.h
73
int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
include/linux/pgalloc.h
15
#define pgd_populate_kernel(addr, pgd, p4d) \
include/linux/pgalloc.h
17
pgd_populate(&init_mm, pgd, p4d); \
include/linux/pgalloc.h
22
#define p4d_populate_kernel(addr, p4d, pud) \
include/linux/pgalloc.h
24
p4d_populate(&init_mm, p4d, pud); \
include/linux/pgtable.h
1263
#define p4d_access_permitted(p4d, write) \
include/linux/pgtable.h
1264
(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
include/linux/pgtable.h
133
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
include/linux/pgtable.h
135
return p4d_pgtable(*p4d) + pud_index(address);
include/linux/pgtable.h
1437
#define p4d_clear_bad(p4d) do { } while (0)
include/linux/pgtable.h
1443
#define pud_clear_bad(p4d) do { } while (0)
include/linux/pgtable.h
1459
static inline int p4d_none_or_clear_bad(p4d_t *p4d)
include/linux/pgtable.h
1461
if (p4d_none(*p4d))
include/linux/pgtable.h
1463
if (unlikely(p4d_bad(*p4d))) {
include/linux/pgtable.h
1464
p4d_clear_bad(p4d);
include/linux/pgtable.h
2033
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
include/linux/pgtable.h
2034
void p4d_clear_huge(p4d_t *p4d);
include/linux/pgtable.h
2036
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
include/linux/pgtable.h
2040
static inline void p4d_clear_huge(p4d_t *p4d) { }
include/linux/pgtable.h
2047
int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
include/linux/pgtable.h
2051
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
include/linux/pgtable.h
2063
static inline void p4d_clear_huge(p4d_t *p4d) { }
include/linux/pgtable.h
2072
static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
include/linux/pgtable.h
2237
#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
include/linux/ptdump.h
17
void (*note_page_p4d)(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
include/linux/ptdump.h
23
void (*effective_prot_p4d)(struct ptdump_state *st, p4d_t p4d);
kernel/events/core.c
8417
p4d_t *p4dp, p4d;
kernel/events/core.c
8431
p4d = READ_ONCE(*p4dp);
kernel/events/core.c
8432
if (!p4d_present(p4d))
kernel/events/core.c
8435
if (p4d_leaf(p4d))
kernel/events/core.c
8436
return p4d_leaf_size(p4d);
kernel/events/core.c
8438
pudp = pud_offset_lockless(p4dp, p4d, addr);
mm/debug_vm_pgtable.c
481
p4d_t p4d;
mm/debug_vm_pgtable.c
484
memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
mm/debug_vm_pgtable.c
485
WARN_ON(!p4d_same(p4d, p4d));
mm/debug_vm_pgtable.c
536
p4d_t p4d = p4dp_get(args->p4dp);
mm/debug_vm_pgtable.c
542
WARN_ON(p4d_none(p4d));
mm/debug_vm_pgtable.c
544
p4d = p4dp_get(args->p4dp);
mm/debug_vm_pgtable.c
545
WARN_ON(!p4d_none(p4d));
mm/debug_vm_pgtable.c
550
p4d_t p4d;
mm/debug_vm_pgtable.c
563
p4d = p4dp_get(args->p4dp);
mm/debug_vm_pgtable.c
564
WARN_ON(p4d_bad(p4d));
mm/gup.c
1035
p4d_t *p4d;
mm/gup.c
1048
p4d = p4d_offset(pgd, address);
mm/gup.c
1049
if (p4d_none(*p4d))
mm/gup.c
1051
pud = pud_offset(p4d, address);
mm/gup.c
3043
static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
mm/gup.c
3050
pudp = pud_offset_lockless(p4dp, p4d, addr);
mm/gup.c
3078
p4d_t p4d = p4dp_get(p4dp);
mm/gup.c
3081
if (!p4d_present(p4d))
mm/gup.c
3083
BUILD_BUG_ON(p4d_leaf(p4d));
mm/gup.c
3084
if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
mm/gup.c
975
p4d_t *p4dp, p4d;
mm/gup.c
978
p4d = p4dp_get(p4dp);
mm/gup.c
979
BUILD_BUG_ON(p4d_leaf(p4d));
mm/gup.c
981
if (!p4d_present(p4d) || p4d_bad(p4d))
mm/hugetlb.c
6945
p4d_t *p4d = p4d_offset(pgd, addr);
mm/hugetlb.c
6946
pud_t *pud = pud_offset(p4d, addr);
mm/hugetlb.c
7019
p4d_t *p4d;
mm/hugetlb.c
7024
p4d = p4d_alloc(mm, pgd, addr);
mm/hugetlb.c
7025
if (!p4d)
mm/hugetlb.c
7027
pud = pud_alloc(mm, p4d, addr);
mm/hugetlb.c
7062
p4d_t *p4d;
mm/hugetlb.c
7069
p4d = p4d_offset(pgd, addr);
mm/hugetlb.c
7070
if (!p4d_present(*p4d))
mm/hugetlb.c
7073
pud = pud_offset(p4d, addr);
mm/kasan/init.c
144
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
mm/kasan/init.c
147
pud_t *pud = pud_offset(p4d, addr);
mm/kasan/init.c
185
p4d_t *p4d = p4d_offset(pgd, addr);
mm/kasan/init.c
194
p4d_populate_kernel(addr, p4d,
mm/kasan/init.c
196
pud = pud_offset(p4d, addr);
mm/kasan/init.c
205
if (p4d_none(*p4d)) {
mm/kasan/init.c
209
p = pud_alloc(&init_mm, p4d, addr);
mm/kasan/init.c
215
p4d_populate_kernel(addr, p4d, p);
mm/kasan/init.c
218
zero_pud_populate(p4d, addr, next);
mm/kasan/init.c
219
} while (p4d++, addr = next, addr != end);
mm/kasan/init.c
242
p4d_t *p4d;
mm/kasan/init.c
256
p4d = p4d_offset(pgd, addr);
mm/kasan/init.c
257
p4d_populate_kernel(addr, p4d,
mm/kasan/init.c
259
pud = pud_offset(p4d, addr);
mm/kasan/init.c
314
static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
mm/kasan/init.c
325
pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
mm/kasan/init.c
326
p4d_clear(p4d);
mm/kasan/init.c
331
p4d_t *p4d;
mm/kasan/init.c
335
p4d = p4d_start + i;
mm/kasan/init.c
336
if (!p4d_none(*p4d))
mm/kasan/init.c
419
static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
mm/kasan/init.c
424
for (; addr < end; addr = next, p4d++) {
mm/kasan/init.c
429
if (!p4d_present(*p4d))
mm/kasan/init.c
432
if (kasan_pud_table(*p4d)) {
mm/kasan/init.c
435
p4d_clear(p4d);
mm/kasan/init.c
439
pud = pud_offset(p4d, addr);
mm/kasan/init.c
441
kasan_free_pud(pud_offset(p4d, 0), p4d);
mm/kasan/init.c
45
static inline bool kasan_pud_table(p4d_t p4d)
mm/kasan/init.c
458
p4d_t *p4d;
mm/kasan/init.c
47
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
mm/kasan/init.c
474
p4d = p4d_offset(pgd, addr);
mm/kasan/init.c
475
kasan_remove_p4d_table(p4d, addr, next);
mm/kasan/init.c
50
static inline bool kasan_pud_table(p4d_t p4d)
mm/kasan/shadow.c
189
p4d_t *p4d;
mm/kasan/shadow.c
196
p4d = p4d_offset(pgd, addr);
mm/kasan/shadow.c
197
if (p4d_none(*p4d))
mm/kasan/shadow.c
199
pud = pud_offset(p4d, addr);
mm/memory-failure.c
342
p4d_t *p4d;
mm/memory-failure.c
352
p4d = p4d_offset(pgd, address);
mm/memory-failure.c
353
if (!p4d_present(*p4d))
mm/memory-failure.c
355
pud = pud_offset(p4d, address);
mm/memory.c
2030
struct vm_area_struct *vma, p4d_t *p4d,
mm/memory.c
2037
pud = pud_offset(p4d, addr);
mm/memory.c
2062
p4d_t *p4d;
mm/memory.c
2065
p4d = p4d_offset(pgd, addr);
mm/memory.c
2068
if (p4d_none_or_clear_bad(p4d))
mm/memory.c
2070
next = zap_pud_range(tlb, vma, p4d, addr, next, details);
mm/memory.c
2071
} while (p4d++, addr = next, addr != end);
mm/memory.c
2264
p4d_t *p4d;
mm/memory.c
2269
p4d = p4d_alloc(mm, pgd, addr);
mm/memory.c
2270
if (!p4d)
mm/memory.c
2272
pud = pud_alloc(mm, p4d, addr);
mm/memory.c
232
static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
mm/memory.c
241
pud = pud_offset(p4d, addr);
mm/memory.c
260
pud = pud_offset(p4d, start);
mm/memory.c
261
p4d_clear(p4d);
mm/memory.c
270
p4d_t *p4d;
mm/memory.c
275
p4d = p4d_offset(pgd, addr);
mm/memory.c
278
if (p4d_none_or_clear_bad(p4d))
mm/memory.c
280
free_pud_range(tlb, p4d, addr, next, floor, ceiling);
mm/memory.c
281
} while (p4d++, addr = next, addr != end);
mm/memory.c
2916
static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
mm/memory.c
2925
pud = pud_alloc(mm, p4d, addr);
mm/memory.c
294
p4d = p4d_offset(pgd, start);
mm/memory.c
2942
p4d_t *p4d;
mm/memory.c
2947
p4d = p4d_alloc(mm, pgd, addr);
mm/memory.c
2948
if (!p4d)
mm/memory.c
2952
err = remap_pud_range(mm, p4d, addr, next,
mm/memory.c
2956
} while (p4d++, addr = next, addr != end);
mm/memory.c
296
p4d_free_tlb(tlb, p4d, start);
mm/memory.c
3296
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
mm/memory.c
3306
pud = pud_alloc_track(mm, p4d, addr, mask);
mm/memory.c
3310
pud = pud_offset(p4d, addr);
mm/memory.c
3337
p4d_t *p4d;
mm/memory.c
3342
p4d = p4d_alloc_track(mm, pgd, addr, mask);
mm/memory.c
3343
if (!p4d)
mm/memory.c
3346
p4d = p4d_offset(pgd, addr);
mm/memory.c
3350
if (p4d_none(*p4d) && !create)
mm/memory.c
3352
if (WARN_ON_ONCE(p4d_leaf(*p4d)))
mm/memory.c
3354
if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
mm/memory.c
3357
p4d_clear_bad(p4d);
mm/memory.c
3359
err = apply_to_pud_range(mm, p4d, addr, next,
mm/memory.c
3363
} while (p4d++, addr = next, addr != end);
mm/memory.c
538
p4d_t p4d, *p4dp;
mm/memory.c
556
p4d = p4dp_get(p4dp);
mm/memory.c
557
p4dv = p4d_val(p4d);
mm/memory.c
559
if (!p4d_present(p4d) || p4d_leaf(p4d)) {
mm/memory.c
6369
p4d_t *p4d;
mm/memory.c
6373
p4d = p4d_alloc(mm, pgd, address);
mm/memory.c
6374
if (!p4d)
mm/memory.c
6377
vmf.pud = pud_alloc(mm, p4d, address);
mm/memory.c
6684
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
mm/memory.c
6691
if (!p4d_present(*p4d)) {
mm/memory.c
6694
p4d_populate(mm, p4d, new);
mm/memory.c
6794
p4d_t *p4dp, p4d;
mm/memory.c
6812
p4d = p4dp_get(p4dp);
mm/memory.c
6813
if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
mm/mprotect.c
522
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
mm/mprotect.c
532
pudp = pud_offset(p4d, addr);
mm/mprotect.c
584
p4d_t *p4d;
mm/mprotect.c
588
p4d = p4d_offset(pgd, addr);
mm/mprotect.c
591
ret = change_prepare(vma, p4d, pud, addr, cp_flags);
mm/mprotect.c
594
if (p4d_none_or_clear_bad(p4d))
mm/mprotect.c
596
pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
mm/mprotect.c
598
} while (p4d++, addr = next, addr != end);
mm/mprotect.c
617
ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
mm/mremap.c
115
p4d_t *p4d;
mm/mremap.c
118
p4d = p4d_alloc(mm, pgd, addr);
mm/mremap.c
119
if (!p4d)
mm/mremap.c
122
return pud_alloc(mm, p4d, addr);
mm/mremap.c
78
p4d_t *p4d;
mm/mremap.c
85
p4d = p4d_offset(pgd, addr);
mm/mremap.c
86
if (p4d_none_or_clear_bad(p4d))
mm/mremap.c
89
pud = pud_offset(p4d, addr);
mm/page_vma_mapped.c
187
p4d_t *p4d;
mm/page_vma_mapped.c
226
p4d = p4d_offset(pgd, pvmw->address);
mm/page_vma_mapped.c
227
if (!p4d_present(*p4d)) {
mm/page_vma_mapped.c
231
pud = pud_offset(p4d, pvmw->address);
mm/pagewalk.c
186
static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
mm/pagewalk.c
197
pud = pud_offset(p4d, addr);
mm/pagewalk.c
254
p4d_t *p4d;
mm/pagewalk.c
262
p4d = p4d_offset(pgd, addr);
mm/pagewalk.c
265
if (p4d_none_or_clear_bad(p4d)) {
mm/pagewalk.c
267
err = __pud_alloc(walk->mm, p4d, addr);
mm/pagewalk.c
276
err = ops->p4d_entry(p4d, addr, next, walk);
mm/pagewalk.c
281
err = walk_pud_range(p4d, addr, next, walk);
mm/pagewalk.c
284
} while (p4d++, addr = next, addr != end);
mm/percpu.c
3148
p4d_t *p4d;
mm/percpu.c
3153
p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
mm/percpu.c
3154
pgd_populate_kernel(addr, pgd, p4d);
mm/percpu.c
3157
p4d = p4d_offset(pgd, addr);
mm/percpu.c
3158
if (p4d_none(*p4d)) {
mm/percpu.c
3160
p4d_populate_kernel(addr, p4d, pud);
mm/percpu.c
3163
pud = pud_offset(p4d, addr);
mm/pgalloc-track.h
19
static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
mm/pgalloc-track.h
23
if (unlikely(p4d_none(*p4d))) {
mm/pgalloc-track.h
24
if (__pud_alloc(mm, p4d, address))
mm/pgalloc-track.h
29
return pud_offset(p4d, address);
mm/pgtable-generic.c
34
void p4d_clear_bad(p4d_t *p4d)
mm/pgtable-generic.c
36
p4d_ERROR(*p4d);
mm/pgtable-generic.c
37
p4d_clear(p4d);
mm/ptdump.c
53
static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
mm/ptdump.c
57
p4d_t val = p4dp_get(p4d);
mm/rmap.c
886
p4d_t *p4d;
mm/rmap.c
894
p4d = p4d_offset(pgd, address);
mm/rmap.c
895
if (!p4d_present(*p4d))
mm/rmap.c
898
pud = pud_offset(p4d, address);
mm/sparse-vmemmap.c
211
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
mm/sparse-vmemmap.c
213
pud_t *pud = pud_offset(p4d, addr);
mm/sparse-vmemmap.c
226
p4d_t *p4d = p4d_offset(pgd, addr);
mm/sparse-vmemmap.c
227
if (p4d_none(*p4d)) {
mm/sparse-vmemmap.c
232
p4d_populate_kernel(addr, p4d, p);
mm/sparse-vmemmap.c
234
return p4d;
mm/sparse-vmemmap.c
255
p4d_t *p4d;
mm/sparse-vmemmap.c
263
p4d = vmemmap_p4d_populate(pgd, addr, node);
mm/sparse-vmemmap.c
264
if (!p4d)
mm/sparse-vmemmap.c
266
pud = vmemmap_pud_populate(p4d, addr, node);
mm/sparse-vmemmap.c
422
p4d_t *p4d;
mm/sparse-vmemmap.c
433
p4d = vmemmap_p4d_populate(pgd, addr, node);
mm/sparse-vmemmap.c
434
if (!p4d)
mm/sparse-vmemmap.c
437
pud = vmemmap_pud_populate(p4d, addr, node);
mm/swapfile.c
2279
static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
mm/swapfile.c
2287
pud = pud_offset(p4d, addr);
mm/swapfile.c
2303
p4d_t *p4d;
mm/swapfile.c
2307
p4d = p4d_offset(pgd, addr);
mm/swapfile.c
2310
if (p4d_none_or_clear_bad(p4d))
mm/swapfile.c
2312
ret = unuse_pud_range(vma, p4d, addr, next, type);
mm/swapfile.c
2315
} while (p4d++, addr = next, addr != end);
mm/userfaultfd.c
468
p4d_t *p4d;
mm/userfaultfd.c
472
p4d = p4d_alloc(mm, pgd, address);
mm/userfaultfd.c
473
if (!p4d)
mm/userfaultfd.c
475
pud = pud_alloc(mm, p4d, address);
mm/vmalloc.c
219
static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
mm/vmalloc.c
227
pud = pud_alloc_track(&init_mm, p4d, addr, mask);
mm/vmalloc.c
246
static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
mm/vmalloc.c
265
if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
mm/vmalloc.c
268
return p4d_set_huge(p4d, phys_addr, prot);
mm/vmalloc.c
275
p4d_t *p4d;
mm/vmalloc.c
279
p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
mm/vmalloc.c
280
if (!p4d)
mm/vmalloc.c
285
if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
mm/vmalloc.c
291
err = vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask);
mm/vmalloc.c
294
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
mm/vmalloc.c
424
static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
mm/vmalloc.c
431
pud = pud_offset(p4d, addr);
mm/vmalloc.c
452
p4d_t *p4d;
mm/vmalloc.c
455
p4d = p4d_offset(pgd, addr);
mm/vmalloc.c
459
p4d_clear_huge(p4d);
mm/vmalloc.c
460
if (p4d_bad(*p4d))
mm/vmalloc.c
463
if (p4d_none_or_clear_bad(p4d))
mm/vmalloc.c
465
vunmap_pud_range(p4d, addr, next, mask);
mm/vmalloc.c
466
} while (p4d++, addr = next, addr != end);
mm/vmalloc.c
587
static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
mm/vmalloc.c
594
pud = pud_alloc_track(&init_mm, p4d, addr, mask);
mm/vmalloc.c
609
p4d_t *p4d;
mm/vmalloc.c
612
p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
mm/vmalloc.c
613
if (!p4d)
mm/vmalloc.c
617
if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
mm/vmalloc.c
619
} while (p4d++, addr = next, addr != end);
mm/vmalloc.c
804
p4d_t *p4d;
mm/vmalloc.c
822
p4d = p4d_offset(pgd, addr);
mm/vmalloc.c
823
if (p4d_none(*p4d))
mm/vmalloc.c
825
if (p4d_leaf(*p4d))
mm/vmalloc.c
826
return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
mm/vmalloc.c
827
if (WARN_ON_ONCE(p4d_bad(*p4d)))
mm/vmalloc.c
830
pud = pud_offset(p4d, addr);
mm/vmscan.c
3733
static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
mm/vmscan.c
3742
VM_WARN_ON_ONCE(p4d_leaf(*p4d));
mm/vmscan.c
3744
pud = pud_offset(p4d, start & P4D_MASK);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
170
printf("P4D: 0x%016lx\n", mapping.p4d);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
20
u64 p4d;
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
69
parse_next_value(&rest, &mapping->p4d);