PMD_MASK
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
if (split_pmd_page(pmd, addr & PMD_MASK))
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
addr = (addr & PMD_MASK) + SZ_1M;
unsigned long phys = __PREG(GPLR) & PMD_MASK;
*pmd = __pmd((addr & PMD_MASK) | prot);
addr = (addr & PMD_MASK) | prot;
#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
} else if (!((paddr | size | addr) & ~PMD_MASK)) {
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd_empty_section_gap(addr & PMD_MASK);
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
next = (addr + PMD_SIZE - 1) & PMD_MASK;
addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
*ipap &= PMD_MASK;
if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
VM_BUG_ON(phys & ~PMD_MASK);
if (((addr | next | phys) & ~PMD_MASK) == 0 &&
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
& PMD_MASK)
fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
address &= ~PMD_MASK;
address = (address + PMD_SIZE) & PMD_MASK;
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
vaddr &= ~PMD_MASK;
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
vaddr &= ~PMD_MASK;
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
gpa = (gpa & PMD_MASK) + PMD_SIZE;
unsigned long lgpa = gpa & PMD_MASK;
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
hend = end & PMD_MASK;
unsigned long start = address & PMD_MASK;
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
*gpa &= PMD_MASK;
vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
vaddr <= (vaddr & PMD_MASK) && end >= next)
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
new = __pmd(pmd_val(new) & PMD_MASK);
need_split |= !!(addr & ~PMD_MASK);
try_free_pte_table(pmd, addr & PMD_MASK);
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
vaddr &= ~PMD_MASK;
if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
this_end = (vstart + PMD_SIZE) & PMD_MASK;
vstart = vstart & PMD_MASK;
start = (start + PMD_SIZE) & PMD_MASK;
start = (start + PMD_SIZE) & PMD_MASK;
if (!(start & ~(PMD_MASK))) {
address = __address & PMD_MASK;
address &= PMD_MASK;
if (load_delta & ~PMD_MASK)
ppd->vaddr_end = vaddr_end & PMD_MASK;
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_MASK) & __PHYSICAL_MASK)
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)
pmd = (physaddr & PMD_MASK) + early_pmd_flags;
BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
for (addr = start & PMD_MASK;
addr &= PMD_MASK;
if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
pfn &= PMD_MASK >> PAGE_SHIFT;
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
lpaddr = address & PMD_MASK;
addr &= PMD_MASK;
if (PFN_PHYS(pfn) & ~PMD_MASK)
addr &= PMD_MASK;
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
start = addr & PMD_MASK;
offset = virt_addr & ~PMD_MASK;
WARN_ON_ONCE(addr & ~PMD_MASK);
efi_va &= PMD_MASK;
efi_va = (efi_va & PMD_MASK) + pa_offset;
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
vaddr &= PMD_MASK;
return pa + (vaddr & ~PMD_MASK);
#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
unsigned long pmd_addr = vmf->address & PMD_MASK;
bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
pfn &= PMD_MASK >> PAGE_SHIFT;
unsigned long pmd_addr = vmf->address & PMD_MASK;
#define PAGEMAP_WALK_MASK (PMD_MASK)
unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
unsigned long addr = vmf->address & PMD_MASK;
unsigned long addr = vmf->address & PMD_MASK;
return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
addr &= PMD_MASK;
ceiling &= PMD_MASK;
vmf->address & PMD_MASK, &ptl);
if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
pmd_pfn(pmd), PMD_MASK, pmd_write(pmd),
mask = PMD_MASK;
try_realign_addr(pmc, PMD_MASK);
*start = max3(left, vma->vm_start, faddr & PMD_MASK);
*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl);
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
start = max(addr & PMD_MASK, vma->vm_start);
end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1;