PTRS_PER_P4D
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D);
for (i = 0; i < PTRS_PER_P4D; i++) {
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
for (i = 0; i < PTRS_PER_P4D; ++i)
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
if (PTRS_PER_P4D != 1)
p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
if (PTRS_PER_P4D > 1)
entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
if (PTRS_PER_P4D > 1)
tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
#define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
for (i = 0; i < PTRS_PER_P4D; i++) {
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
for (i = 0; i < PTRS_PER_P4D; i++) {
if (depth == 1 && PTRS_PER_P4D == 1)