PTRS_PER_PUD
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
if (PTRS_PER_PUD != 1)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
static pud_t pud[2][PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
for (i = 0; i < PTRS_PER_PUD; i++) {
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
extern pud_t invalid_pud_table[PTRS_PER_PUD];
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
end = p + PTRS_PER_PUD;
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
extern pud_t invalid_pud_table[PTRS_PER_PUD];
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
end = p + PTRS_PER_PUD;
uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
offset = PTRS_PER_PUD;
offset = PTRS_PER_PUD;
for (i = 0; i < PTRS_PER_PUD; i++) {
for (i = 0; i < PTRS_PER_PUD; i++)
for (i = 0; i < PTRS_PER_PUD; i++)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
for (i = 0; i < PTRS_PER_PUD; i++) {
static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
for (i = 0; i < PTRS_PER_PUD; ++i)
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
if (PTRS_PER_PUD != 1)
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
#define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
for (i = 0; i < PTRS_PER_PUD; i++) {
for (i = 0; i < PTRS_PER_PUD; i++) {
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
for (i = 0; i < PTRS_PER_PUD; i++)
num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
static pud_t level3_ident_pgt[PTRS_PER_PUD] __page_aligned_bss;
for (i = 0; i < PTRS_PER_PUD; i++) {
for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
n_pmd -= PTRS_PER_PUD;
nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
if (PTRS_PER_PUD > 1)
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
#define MAX_PTRS_PER_PUD PTRS_PER_PUD
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
for (i = 0; i < PTRS_PER_PUD; i++) {
if (depth == 2 && PTRS_PER_PUD == 1)
if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))