PUD_SIZE
#define PUD_MASK (~(PUD_SIZE - 1))
addr = start + i * PUD_SIZE;
case PUD_SIZE:
__flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
#define PUD_MASK (~(PUD_SIZE-1))
case PUD_SIZE:
case PUD_SIZE:
__set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
n += DIV_ROUND_UP(range, PUD_SIZE);
if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
ALIGN(hva, PUD_SIZE) <= vma->vm_end)
if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
if (sz == PUD_SIZE) {
if (sz != PUD_SIZE && pud_none(pud))
case PUD_SIZE:
return PGDIR_SIZE - PUD_SIZE;
return PUD_SIZE - CONT_PMD_SIZE;
return PUD_SIZE - PMD_SIZE;
case PUD_SIZE:
case PUD_SIZE:
#define SHADOW_ALIGN PUD_SIZE
PUD_SIZE, altmap);
end = addr + PUD_SIZE;
if (ALIGN_DOWN(addr, PUD_SIZE) == addr)
if (WARN_ON_ONCE((next - addr) != PUD_SIZE))
#define PUD_MASK (~(PUD_SIZE-1))
#define PUD_MASK (~(PUD_SIZE-1))
nb = SZ_16G / PUD_SIZE;
#define PUD_MASK (~(PUD_SIZE-1))
return PUD_SIZE;
#define PUD_MASK (~(PUD_SIZE-1))
else if (sz < PUD_SIZE)
pdsize = PUD_SIZE;
gpa = (gpa & PUD_MASK) + PUD_SIZE;
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
(hva & (PUD_SIZE - PAGE_SIZE))) {
if (end - start >= PUD_SIZE)
if (map_page_size == PUD_SIZE) {
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
mapping_size = PUD_SIZE;
if (!IS_ALIGNED(addr, PUD_SIZE) ||
!IS_ALIGNED(next, PUD_SIZE)) {
if (map_page_size == PUD_SIZE) {
if (!mm_pmd_folded(mm) && sz >= PUD_SIZE)
else if (sz < PUD_SIZE)
pdsize = PUD_SIZE;
addr = start + i * PUD_SIZE;
#define PUD_MASK (~(PUD_SIZE - 1))
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
size = PUD_SIZE;
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
if (vma_pagesize != PUD_SIZE &&
if (sz == PUD_SIZE)
case PUD_SIZE:
return P4D_SIZE - PUD_SIZE;
return PUD_SIZE - PMD_SIZE;
else if (sz >= PUD_SIZE)
else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE)
if (sz == PUD_SIZE) {
nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
(uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
(uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap);
if (sz == PUD_SIZE) {
!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
return PUD_SIZE;
PUD_SIZE, PAGE_TABLE);
memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (next - vaddr >= PUD_SIZE &&
pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
else if (stride_size >= PUD_SIZE)
stride_size = PUD_SIZE;
start, end - start, PUD_SIZE);
IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
!IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
if (!IS_ALIGNED(pa, PUD_SIZE))
if (sz == PUD_SIZE)
if (sz == PUD_SIZE)
else if (cpu_has_edat2() && size == PUD_SIZE)
need_split |= !!(addr + PUD_SIZE > next);
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap);
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE) &&
#define PUD_MASK (~(PUD_SIZE-1))
if (sz >= PUD_SIZE)
if (size >= PUD_SIZE)
if (size >= PUD_SIZE)
return vstart + PUD_SIZE;
pte_val += PUD_SIZE;
vstart += PUD_SIZE;
if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
if (hugepage_size >= PUD_SIZE) {
#define PUD_MASK (~(PUD_SIZE-1))
if (memparse(p, &p) != PUD_SIZE) {
pud_start = ALIGN(region->start, PUD_SIZE);
pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
#define PUD_MASK (~(PUD_SIZE - 1))
BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
unsigned long start = round_down(mr[i].start, PUD_SIZE);
unsigned long end = round_up(mr[i].end, PUD_SIZE);
end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
((end - addr) == PUD_SIZE) &&
IS_ALIGNED(addr, PUD_SIZE)) {
p = early_alloc(PUD_SIZE, nid, false);
memblock_free(p, PUD_SIZE);
vaddr = round_up(vaddr + 1, PUD_SIZE);
if (start & (PUD_SIZE - 1)) {
unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
while (end - start >= PUD_SIZE) {
unmap_pmd_range(pud, start, start + PUD_SIZE);
start += PUD_SIZE;
if (start & (PUD_SIZE - 1)) {
unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
start += PUD_SIZE;
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
cur_pages += PUD_SIZE >> PAGE_SHIFT;
mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
addr = round_up(addr + 1, PUD_SIZE);
xen_free_ro_pages(pa, PUD_SIZE);
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
unsigned int fault_size = PUD_SIZE;
if (dev_dax->align > PUD_SIZE) {
(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
if (can_fault(vmf, PUD_SIZE, &pfn))
#define PUD_MASK (~(PUD_SIZE-1))
else if (_sz >= PUD_SIZE) \
tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE);
if (size >= PUD_SIZE)
({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
#define pud_leaf_size(x) PUD_SIZE
phys_align_check(pstart, pend, PUD_SIZE, &phys,
if (args->fixed_alignment == PUD_SIZE)
args->fixed_alignment < PUD_SIZE)
unsigned long ceil = floor + PUD_SIZE;
unsigned long s_end = sbase + PUD_SIZE;
unsigned long end = start + PUD_SIZE;
unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
*start = ALIGN_DOWN(*start, PUD_SIZE);
*end = ALIGN(*end, PUD_SIZE);
if (sz == PUD_SIZE) {
if (sz == PUD_SIZE)
if (hp_size == PUD_SIZE)
return P4D_SIZE - PUD_SIZE;
return PUD_SIZE - PMD_SIZE;
return PUD_SIZE - PMD_SIZE;
for (address = start; address < end; address += PUD_SIZE) {
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
ALIGN_DOWN(vma->vm_end, PUD_SIZE),
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
if ((next - addr != PUD_SIZE) ||
flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
size = PUD_SIZE;
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
unsigned long stride = PUD_SIZE >> PAGE_SHIFT;
__page_table_check_pud_clear(mm, addr + PUD_SIZE * i, *(pudp + i));
step_forward(pvmw, PUD_SIZE);
entry_size = PUD_SIZE;
if ((end - addr) != PUD_SIZE)
if (!IS_ALIGNED(addr, PUD_SIZE))
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
WARN_ON(next - addr < PUD_SIZE);
if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))