Symbol: PMD_SIZE
arch/alpha/include/asm/pgtable.h
34
#define PMD_MASK (~(PMD_SIZE-1))
arch/arc/include/asm/pgtable-levels.h
82
#define PMD_MASK (~(PMD_SIZE - 1))
arch/arc/include/asm/processor.h
90
#define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PMD_SIZE * 4)
arch/arc/mm/tlb.c
624
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
arch/arc/mm/tlb.c
630
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
arch/arm/include/asm/highmem.h
8
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
arch/arm/include/asm/kfence.h
24
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
arch/arm/include/asm/memory.h
74
#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
arch/arm/include/asm/pgtable-2level.h
88
#define PMD_MASK (~(PMD_SIZE-1))
arch/arm/mm/dma-mapping.c
299
addr += PMD_SIZE)
arch/arm/mm/dump.c
350
addr = start + i * PMD_SIZE;
arch/arm/mm/dump.c
357
if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
arch/arm/mm/fault-armv.c
127
const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
arch/arm/mm/fault-armv.c
128
const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
arch/arm/mm/ioremap.c
199
addr += PMD_SIZE;
arch/arm/mm/ioremap.c
232
addr += PMD_SIZE;
arch/arm/mm/ioremap.c
263
addr += PMD_SIZE;
arch/arm/mm/kasan_init.c
191
for (; start && start < end; start += PMD_SIZE)
arch/arm/mm/kasan_init.c
286
create_mapping((void *)PKMAP_BASE, (void *)(PKMAP_BASE + PMD_SIZE));
arch/arm/mm/mmu.c
1114
next = (addr + PMD_SIZE - 1) & PMD_MASK;
arch/arm/mm/mmu.c
1206
if (!IS_ALIGNED(block_start, PMD_SIZE)) {
arch/arm/mm/mmu.c
1209
len = round_up(block_start, PMD_SIZE) - block_start;
arch/arm/mm/mmu.c
1242
if (!IS_ALIGNED(block_start, PMD_SIZE))
arch/arm/mm/mmu.c
1244
else if (!IS_ALIGNED(block_end, PMD_SIZE))
arch/arm/mm/mmu.c
1263
memblock_limit = round_down(memblock_limit, PMD_SIZE);
arch/arm/mm/mmu.c
1293
for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE)
arch/arm/mm/mmu.c
1301
for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE)
arch/arm/mm/mmu.c
1304
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
arch/arm/mm/mmu.c
1310
addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
arch/arm/mm/mmu.c
1312
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
arch/arm/mm/mmu.c
1327
addr < VMALLOC_START; addr += PMD_SIZE)
arch/arm/mm/mmu.c
1382
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
arch/arm64/include/asm/hugetlb.h
83
case PMD_SIZE:
arch/arm64/include/asm/hugetlb.h
84
__flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
arch/arm64/include/asm/kernel-pgtable.h
21
#if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
arch/arm64/include/asm/pgtable-hwdef.h
57
#define PMD_MASK (~(PMD_SIZE-1))
arch/arm64/include/asm/pgtable-hwdef.h
97
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
arch/arm64/include/asm/pgtable.h
1352
case PMD_SIZE:
arch/arm64/include/asm/pgtable.h
1409
return pte_pmd(__ptep_get_and_clear_anysz(mm, address, (pte_t *)pmdp, PMD_SIZE));
arch/arm64/include/asm/pgtable.h
684
case PMD_SIZE:
arch/arm64/include/asm/pgtable.h
721
__set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
arch/arm64/include/asm/pgtable.h
787
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
arch/arm64/include/asm/pgtable.h
92
__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
230
if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
237
if (map_size == PMD_SIZE)
arch/arm64/kvm/hyp/nvhe/mm.c
335
phys = ALIGN(hyp_memory[i].base, PMD_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
336
if (phys + PMD_SIZE < (hyp_memory[i].base + hyp_memory[i].size))
arch/arm64/kvm/hyp/nvhe/mm.c
344
addr = ALIGN(__io_map_base, PMD_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
345
ret = __pkvm_alloc_private_va_range(addr, PMD_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
349
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PMD_SIZE, phys, PAGE_HYP);
arch/arm64/kvm/hyp/nvhe/mm.c
353
ret = kvm_pgtable_walk(&pkvm_pgtable, addr, PMD_SIZE, &walker);
arch/arm64/kvm/hyp/nvhe/mm.c
367
*size = PMD_SIZE;
arch/arm64/kvm/mmu.c
102
n += DIV_ROUND_UP(range, PMD_SIZE);
arch/arm64/kvm/mmu.c
1341
if (is_protected_kvm_enabled() && map_size != PMD_SIZE)
arch/arm64/kvm/mmu.c
1413
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
arch/arm64/kvm/mmu.c
1419
if (sz < PMD_SIZE)
arch/arm64/kvm/mmu.c
1426
return PMD_SIZE;
arch/arm64/kvm/mmu.c
1454
if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
arch/arm64/kvm/mmu.c
1455
ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
arch/arm64/kvm/mmu.c
1456
ALIGN(hva, PMD_SIZE) <= vma->vm_end)
arch/arm64/kvm/mmu.c
1714
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
arch/arm64/kvm/mmu.c
1747
if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
arch/arm64/kvm/mmu.c
1748
max_map_size = PMD_SIZE;
arch/arm64/kvm/mmu.c
1749
else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
arch/arm64/kvm/pkvm.c
370
if (size != PAGE_SIZE && size != PMD_SIZE)
arch/arm64/kvm/pkvm.c
72
hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
arch/arm64/kvm/pkvm.c
73
PMD_SIZE);
arch/arm64/kvm/pkvm.c
77
hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
arch/arm64/mm/hugetlbpage.c
105
*pgsize = PMD_SIZE;
arch/arm64/mm/hugetlbpage.c
264
} else if (sz == PMD_SIZE) {
arch/arm64/mm/hugetlbpage.c
308
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
arch/arm64/mm/hugetlbpage.c
333
case PMD_SIZE:
arch/arm64/mm/hugetlbpage.c
334
return PUD_SIZE - PMD_SIZE;
arch/arm64/mm/hugetlbpage.c
336
return PMD_SIZE - CONT_PTE_SIZE;
arch/arm64/mm/hugetlbpage.c
357
case PMD_SIZE:
arch/arm64/mm/hugetlbpage.c
56
case PMD_SIZE:
arch/arm64/mm/hugetlbpage.c
91
*pgsize = PMD_SIZE;
arch/arm64/mm/mmu.c
1493
PMD_SIZE, altmap);
arch/arm64/mm/mmu.c
1919
} while (pmdp++, next += PMD_SIZE, next != end);
arch/arm64/mm/mmu.c
634
unsigned int step = PMD_SIZE >> PAGE_SHIFT;
arch/arm64/mm/mmu.c
733
if (ALIGN_DOWN(addr, PMD_SIZE) == addr)
arch/arm64/mm/pageattr.c
56
if (WARN_ON_ONCE((next - addr) != PMD_SIZE))
arch/csky/mm/init.c
118
vaddr += PMD_SIZE;
arch/csky/mm/init.c
131
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
arch/loongarch/include/asm/pgtable.h
109
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
arch/loongarch/include/asm/pgtable.h
113
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
arch/loongarch/include/asm/pgtable.h
116
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
arch/loongarch/include/asm/pgtable.h
31
#define PMD_MASK (~(PMD_SIZE-1))
arch/loongarch/include/asm/pgtable.h
36
#define PMD_MASK (~(PMD_SIZE-1))
arch/loongarch/kernel/numa.c
84
PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
arch/loongarch/kvm/mmu.c
399
if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE)
arch/loongarch/kvm/mmu.c
400
&& IS_ALIGNED(hva_start, PMD_SIZE))
arch/loongarch/kvm/mmu.c
427
gpa_offset = gpa_start & (PMD_SIZE - 1);
arch/loongarch/kvm/mmu.c
428
hva_offset = hva_start & (PMD_SIZE - 1);
arch/loongarch/kvm/mmu.c
433
gpa_offset = PMD_SIZE;
arch/loongarch/kvm/mmu.c
434
if ((size + gpa_offset) < (PMD_SIZE * 2))
arch/loongarch/kvm/mmu.c
638
return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE));
arch/m68k/include/asm/pgtable_mm.h
40
#define PMD_MASK (~(PMD_SIZE-1))
arch/m68k/mm/kmap.c
254
if (!(virtaddr & (PMD_SIZE-1)))
arch/m68k/mm/kmap.c
269
physaddr += PMD_SIZE;
arch/m68k/mm/kmap.c
270
virtaddr += PMD_SIZE;
arch/m68k/mm/kmap.c
271
size -= PMD_SIZE;
arch/m68k/mm/kmap.c
379
virtaddr += PMD_SIZE;
arch/m68k/mm/kmap.c
380
size -= PMD_SIZE;
arch/m68k/mm/kmap.c
50
#define IO_SIZE PMD_SIZE
arch/m68k/mm/kmap.c
85
virtaddr += PMD_SIZE;
arch/m68k/mm/kmap.c
86
size -= PMD_SIZE;
arch/m68k/mm/motorola.c
320
if (!(virtaddr & (PMD_SIZE-1)))
arch/m68k/mm/motorola.c
355
physaddr += PMD_SIZE;
arch/m68k/mm/motorola.c
369
size -= PMD_SIZE;
arch/m68k/mm/motorola.c
370
virtaddr += PMD_SIZE;
arch/m68k/sun3x/dvma.c
121
end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
arch/mips/include/asm/pgtable-64.h
51
#define PMD_MASK (~(PMD_SIZE-1))
arch/mips/mm/init.c
260
vaddr += PMD_SIZE;
arch/nios2/mm/ioremap.c
33
if (end > PMD_SIZE)
arch/nios2/mm/ioremap.c
34
end = PMD_SIZE;
arch/nios2/mm/ioremap.c
70
address = (address + PMD_SIZE) & PMD_MASK;
arch/parisc/include/asm/pgtable.h
114
#define PMD_MASK (~(PMD_SIZE-1))
arch/parisc/kernel/pci-dma.c
120
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
arch/parisc/kernel/pci-dma.c
121
orig_vaddr += PMD_SIZE;
arch/parisc/kernel/pci-dma.c
170
if (end > PMD_SIZE)
arch/parisc/kernel/pci-dma.c
171
end = PMD_SIZE;
arch/parisc/kernel/pci-dma.c
210
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
arch/parisc/kernel/pci-dma.c
211
orig_vaddr += PMD_SIZE;
arch/parisc/kernel/pci-dma.c
85
if (end > PMD_SIZE)
arch/parisc/kernel/pci-dma.c
86
end = PMD_SIZE;
arch/parisc/mm/init.c
676
BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
arch/powerpc/include/asm/book3s/64/hash.h
201
nb = SZ_16M / PMD_SIZE;
arch/powerpc/include/asm/book3s/64/pgtable.h
210
#define PMD_MASK (~(PMD_SIZE-1))
arch/powerpc/include/asm/book3s/64/pgtable.h
285
return PMD_SIZE;
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
32
#define PMD_MASK (~(PMD_SIZE-1))
arch/powerpc/include/asm/nohash/pgtable.h
76
if (sz < PMD_SIZE)
arch/powerpc/include/asm/nohash/pgtable.h
79
pdsize = PMD_SIZE;
arch/powerpc/include/asm/pgtable.h
201
return IS_ALIGNED(vmemmap_size, PMD_SIZE);
arch/powerpc/kvm/book3s_64_mmu_radix.c
1386
gpa = (gpa & PMD_MASK) + PMD_SIZE;
arch/powerpc/kvm/book3s_64_mmu_radix.c
877
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
arch/powerpc/kvm/book3s_64_mmu_radix.c
878
(hva & (PMD_SIZE - PAGE_SIZE))) {
arch/powerpc/mm/book3s64/hash_tlb.c
232
addr = ALIGN_DOWN(addr, PMD_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1007
VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE));
arch/powerpc/mm/book3s64/radix_pgtable.c
103
if (map_page_size == PMD_SIZE) {
arch/powerpc/mm/book3s64/radix_pgtable.c
1143
start = ALIGN_DOWN(start, PMD_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
1170
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
arch/powerpc/mm/book3s64/radix_pgtable.c
1171
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
arch/powerpc/mm/book3s64/radix_pgtable.c
1179
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
arch/powerpc/mm/book3s64/radix_pgtable.c
1338
addr_pfn += (PMD_SIZE >> PAGE_SHIFT);
arch/powerpc/mm/book3s64/radix_pgtable.c
166
if (map_page_size == PMD_SIZE) {
arch/powerpc/mm/book3s64/radix_pgtable.c
1692
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
330
} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
arch/powerpc/mm/book3s64/radix_pgtable.c
332
mapping_size = PMD_SIZE;
arch/powerpc/mm/book3s64/radix_pgtable.c
741
unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
743
return !vmemmap_populated(start, PMD_SIZE);
arch/powerpc/mm/book3s64/radix_pgtable.c
836
if (IS_ALIGNED(addr, PMD_SIZE) &&
arch/powerpc/mm/book3s64/radix_pgtable.c
837
IS_ALIGNED(next, PMD_SIZE)) {
arch/powerpc/mm/book3s64/radix_pgtable.c
839
free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
arch/powerpc/mm/book3s64/radix_pgtable.c
845
free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
arch/powerpc/mm/book3s64/radix_tlb.c
1049
if (!flush_pid && (end - start) >= PMD_SIZE)
arch/powerpc/mm/book3s64/radix_tlb.c
1081
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
arch/powerpc/mm/book3s64/radix_tlb.c
1093
PMD_SIZE, MMU_PAGE_2M);
arch/powerpc/mm/book3s64/radix_tlb.c
1102
PMD_SIZE, MMU_PAGE_2M);
arch/powerpc/mm/book3s64/radix_tlb.c
1109
hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc);
arch/powerpc/mm/hugetlbpage.c
66
if (sz >= PMD_SIZE) {
arch/powerpc/mm/hugetlbpage.c
71
for (i = 0; i < sz / PMD_SIZE; i++) {
arch/powerpc/mm/nohash/tlb_64e.c
60
unsigned long end = address + PMD_SIZE;
arch/powerpc/mm/pgtable.c
367
if (sz < PMD_SIZE)
arch/powerpc/mm/pgtable.c
370
pdsize = PMD_SIZE;
arch/powerpc/mm/ptdump/hashpagetable.c
421
addr = start + i * PMD_SIZE;
arch/riscv/include/asm/crash_reserve.h
5
#define CRASH_ALIGN PMD_SIZE
arch/riscv/include/asm/pgtable-64.h
42
#define PMD_MASK (~(PMD_SIZE - 1))
arch/riscv/include/asm/pgtable.h
102
#define MAX_FDT_SIZE PMD_SIZE
arch/riscv/include/asm/pgtable.h
104
#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
arch/riscv/kernel/kexec_elf.c
95
kbuf.buf_align = PMD_SIZE;
arch/riscv/kvm/mmu.c
276
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
arch/riscv/kvm/mmu.c
299
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
arch/riscv/kvm/mmu.c
348
if ((gpa_start & (PMD_SIZE - 1)) != (uaddr_start & (PMD_SIZE - 1)))
arch/riscv/kvm/mmu.c
363
return (hva >= ALIGN(uaddr_start, PMD_SIZE)) && (hva < ALIGN_DOWN(uaddr_end, PMD_SIZE));
arch/riscv/kvm/mmu.c
412
size = PMD_SIZE;
arch/riscv/kvm/mmu.c
435
if (sz < PMD_SIZE)
arch/riscv/kvm/mmu.c
442
return PMD_SIZE;
arch/riscv/kvm/mmu.c
498
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
arch/riscv/kvm/mmu.c
513
vma_pagesize != PMD_SIZE &&
arch/riscv/mm/hugetlbpage.c
112
if (sz == PMD_SIZE)
arch/riscv/mm/hugetlbpage.c
137
case PMD_SIZE:
arch/riscv/mm/hugetlbpage.c
138
return PUD_SIZE - PMD_SIZE;
arch/riscv/mm/hugetlbpage.c
140
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
arch/riscv/mm/hugetlbpage.c
228
else if (sz >= PMD_SIZE)
arch/riscv/mm/hugetlbpage.c
56
if (sz == PMD_SIZE) {
arch/riscv/mm/init.c
1105
nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
arch/riscv/mm/init.c
1107
kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
arch/riscv/mm/init.c
1157
BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
arch/riscv/mm/init.c
1196
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
arch/riscv/mm/init.c
1208
kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
1211
kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
1689
free_vmemmap_storage(pmd_page(pmd), PMD_SIZE, altmap);
arch/riscv/mm/init.c
235
vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
arch/riscv/mm/init.c
538
if (sz == PMD_SIZE) {
arch/riscv/mm/init.c
756
!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
arch/riscv/mm/init.c
757
return PMD_SIZE;
arch/riscv/mm/init.c
884
PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
886
set_satp_mode_pmd + PMD_SIZE,
arch/riscv/mm/init.c
887
set_satp_mode_pmd + PMD_SIZE,
arch/riscv/mm/init.c
888
PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
945
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
arch/riscv/mm/init.c
948
PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
953
for (va = start_va; va < end_va; va += PMD_SIZE)
arch/riscv/mm/init.c
956
PMD_SIZE, PAGE_KERNEL);
arch/riscv/mm/init.c
964
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
arch/riscv/mm/init.c
967
PMD_SIZE,
arch/riscv/mm/init.c
982
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
arch/riscv/mm/init.c
985
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
arch/riscv/mm/init.c
993
pa, PMD_SIZE, PAGE_KERNEL);
arch/riscv/mm/init.c
994
create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
arch/riscv/mm/init.c
995
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
arch/riscv/mm/init.c
998
dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
arch/riscv/mm/kasan_init.c
66
if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
arch/riscv/mm/kasan_init.c
67
(next - vaddr) >= PMD_SIZE) {
arch/riscv/mm/kasan_init.c
68
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
arch/riscv/mm/kasan_init.c
71
memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
arch/riscv/mm/pageattr.c
107
if (next - vaddr >= PMD_SIZE &&
arch/riscv/mm/pageattr.c
165
pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
arch/riscv/mm/pgtable.c
132
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
arch/riscv/mm/tlbflush.c
192
else if (stride_size >= PMD_SIZE)
arch/riscv/mm/tlbflush.c
193
stride_size = PMD_SIZE;
arch/riscv/mm/tlbflush.c
214
start, end - start, PMD_SIZE);
arch/s390/boot/vmem.c
174
IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
arch/s390/boot/vmem.c
323
!IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
arch/s390/boot/vmem.c
327
if (!IS_ALIGNED(pa, PMD_SIZE))
arch/s390/kernel/vdso.c
95
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
arch/s390/mm/hugetlbpage.c
196
else if (sz == PMD_SIZE)
arch/s390/mm/hugetlbpage.c
227
if (cpu_has_edat1() && size == PMD_SIZE)
arch/s390/mm/pageattr.c
180
need_split |= !!(addr + PMD_SIZE > next);
arch/s390/mm/pageattr.c
219
pmd_addr += PMD_SIZE;
arch/s390/mm/vmem.c
105
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
arch/s390/mm/vmem.c
128
if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
arch/s390/mm/vmem.c
138
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
arch/s390/mm/vmem.c
146
if (!IS_ALIGNED(start, PMD_SIZE))
arch/s390/mm/vmem.c
153
if (!IS_ALIGNED(end, PMD_SIZE))
arch/s390/mm/vmem.c
160
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
arch/s390/mm/vmem.c
164
return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
arch/s390/mm/vmem.c
240
if (IS_ALIGNED(addr, PMD_SIZE) &&
arch/s390/mm/vmem.c
241
IS_ALIGNED(next, PMD_SIZE)) {
arch/s390/mm/vmem.c
243
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
arch/s390/mm/vmem.c
247
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
arch/s390/mm/vmem.c
253
if (IS_ALIGNED(addr, PMD_SIZE) &&
arch/s390/mm/vmem.c
254
IS_ALIGNED(next, PMD_SIZE) &&
arch/s390/mm/vmem.c
270
new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap);
arch/s390/mm/vmem.c
273
if (!IS_ALIGNED(addr, PMD_SIZE) ||
arch/s390/mm/vmem.c
274
!IS_ALIGNED(next, PMD_SIZE)) {
arch/sh/include/asm/pgtable-3level.h
24
#define PMD_MASK (~(PMD_SIZE-1))
arch/sh/include/asm/pgtable-3level.h
26
#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
arch/sh/mm/init.c
193
vaddr += PMD_SIZE;
arch/sh/mm/init.c
321
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
arch/sparc/include/asm/pgtable_32.h
16
#define PMD_MASK (~(PMD_SIZE-1))
arch/sparc/include/asm/pgtable_64.h
53
#define PMD_MASK (~(PMD_SIZE-1))
arch/sparc/mm/hugetlbpage.c
208
if (sz >= PMD_SIZE)
arch/sparc/mm/hugetlbpage.c
252
else if (size >= PMD_SIZE)
arch/sparc/mm/hugetlbpage.c
295
else if (size >= PMD_SIZE)
arch/sparc/mm/init_64.c
1723
return vstart + PMD_SIZE;
arch/sparc/mm/init_64.c
1740
pte_val += PMD_SIZE;
arch/sparc/mm/init_64.c
1741
vstart += PMD_SIZE;
arch/sparc/mm/init_64.c
1751
if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
arch/sparc/mm/init_64.c
1835
this_end = (vstart + PMD_SIZE) & PMD_MASK;
arch/sparc/mm/init_64.c
2586
vend = ALIGN(vend, PMD_SIZE);
arch/sparc/mm/init_64.c
2587
for (; vstart < vend; vstart += PMD_SIZE) {
arch/sparc/mm/init_64.c
2608
void *block = vmemmap_alloc_block(PMD_SIZE, node);
arch/sparc/mm/init_64.c
431
} else if (hugepage_size >= PMD_SIZE) {
arch/sparc/mm/init_64.c
438
if (hugepage_size >= PMD_SIZE) {
arch/sparc/mm/srmmu.c
701
if (start > (0xffffffffUL - PMD_SIZE))
arch/sparc/mm/srmmu.c
703
start = (start + PMD_SIZE) & PMD_MASK;
arch/sparc/mm/srmmu.c
736
if (start > (0xffffffffUL - PMD_SIZE))
arch/sparc/mm/srmmu.c
738
start = (start + PMD_SIZE) & PMD_MASK;
arch/sparc/mm/srmmu.c
793
if (srmmu_probe(addr + PMD_SIZE) == probed)
arch/sparc/mm/srmmu.c
821
start += PMD_SIZE;
arch/um/include/asm/pgtable-4level.h
32
#define PMD_MASK (~(PMD_SIZE-1))
arch/x86/boot/compressed/ident_map_64.c
370
end = address + PMD_SIZE;
arch/x86/boot/compressed/ident_map_64.c
98
start = round_down(start, PMD_SIZE);
arch/x86/boot/compressed/ident_map_64.c
99
end = round_up(end, PMD_SIZE);
arch/x86/boot/startup/map_kernel.c
179
for (i = 0; i < DIV_ROUND_UP(va_end - va_text, PMD_SIZE); i++) {
arch/x86/boot/startup/map_kernel.c
182
pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
arch/x86/boot/startup/map_kernel.c
53
for (; paddr < paddr_end; paddr += PMD_SIZE) {
arch/x86/boot/startup/sme.c
194
ppd->vaddr += PMD_SIZE;
arch/x86/boot/startup/sme.c
195
ppd->paddr += PMD_SIZE;
arch/x86/boot/startup/sme.c
221
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
arch/x86/boot/startup/sme.c
320
kernel_end = ALIGN((unsigned long)rip_rel_ptr(_end), PMD_SIZE);
arch/x86/boot/startup/sme.c
347
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
arch/x86/boot/startup/sme.c
370
workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
arch/x86/boot/startup/sme.c
93
static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
arch/x86/coco/sev/core.c
211
unsigned long vaddr_end = vaddr + PMD_SIZE;
arch/x86/coco/sev/core.c
351
if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
arch/x86/coco/sev/core.c
352
(vaddr_end - vaddr) >= PMD_SIZE) {
arch/x86/coco/sev/core.c
354
vaddr += PMD_SIZE;
arch/x86/include/asm/efi.h
34
#define EFI_UNACCEPTED_UNIT_SIZE PMD_SIZE
arch/x86/include/asm/pgtable_32_areas.h
36
#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)
arch/x86/include/asm/pgtable_32_types.h
13
# define PMD_MASK (~(PMD_SIZE - 1))
arch/x86/include/asm/pgtable_64_types.h
83
#define PMD_MASK (~(PMD_SIZE - 1))
arch/x86/kernel/amd_gart_64.c
508
iommu_size -= round_up(a, PMD_SIZE) - a;
arch/x86/kernel/setup_percpu.c
144
atom_size = PMD_SIZE;
arch/x86/kvm/mmu/mmu.c
1448
if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
arch/x86/kvm/mmu/mmu.c
1449
ALIGN(end << PAGE_SHIFT, PMD_SIZE))
arch/x86/kvm/svm/sev.c
4078
if (IS_ALIGNED(svm->vmcb->control.exit_info_2, PMD_SIZE)) {
arch/x86/kvm/svm/sev.c
5090
use_2m_update ? PMD_SIZE : PAGE_SIZE);
arch/x86/mm/cpu_entry_area.c
258
for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
arch/x86/mm/fault.c
271
addr += PMD_SIZE) {
arch/x86/mm/hugetlbpage.c
26
if (size == PMD_SIZE)
arch/x86/mm/ident_map.c
84
for (; addr < end; addr += PMD_SIZE) {
arch/x86/mm/init.c
354
unsigned long start = round_down(mr[i].start, PMD_SIZE);
arch/x86/mm/init.c
355
unsigned long end = round_up(mr[i].end, PMD_SIZE);
arch/x86/mm/init.c
421
end_pfn = PFN_DOWN(PMD_SIZE);
arch/x86/mm/init.c
423
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
425
end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
435
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
437
end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
440
if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
arch/x86/mm/init.c
441
end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
462
start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
463
end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
arch/x86/mm/init.c
643
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
arch/x86/mm/init.c
647
real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
arch/x86/mm/init.c
649
memblock_phys_free(addr, PMD_SIZE);
arch/x86/mm/init.c
650
real_end = addr + PMD_SIZE;
arch/x86/mm/init.c
654
step_size = PMD_SIZE;
arch/x86/mm/init.c
703
unsigned long step_size = PMD_SIZE;
arch/x86/mm/init_32.c
147
vaddr += PMD_SIZE;
arch/x86/mm/init_32.c
230
vaddr += PMD_SIZE;
arch/x86/mm/init_32.c
488
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
arch/x86/mm/init_64.c
1035
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
arch/x86/mm/init_64.c
1037
free_pagetable(page, get_order(PMD_SIZE));
arch/x86/mm/init_64.c
1153
if (IS_ALIGNED(addr, PMD_SIZE) &&
arch/x86/mm/init_64.c
1154
IS_ALIGNED(next, PMD_SIZE)) {
arch/x86/mm/init_64.c
1419
all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
arch/x86/mm/init_64.c
1537
addr_end = addr + PMD_SIZE;
arch/x86/mm/init_64.c
1538
p_end = p + PMD_SIZE;
arch/x86/mm/init_64.c
1540
if (!IS_ALIGNED(addr, PMD_SIZE) ||
arch/x86/mm/init_64.c
1541
!IS_ALIGNED(next, PMD_SIZE))
arch/x86/mm/init_64.c
396
for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
arch/x86/mm/init_64.c
448
unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
arch/x86/mm/init_64.c
459
for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
arch/x86/mm/init_64.c
536
paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
arch/x86/mm/init_64.c
862
ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
arch/x86/mm/init_64.c
870
unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
arch/x86/mm/init_64.c
879
return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
arch/x86/mm/init_64.c
902
if (likely(IS_ALIGNED(end, PMD_SIZE)))
arch/x86/mm/init_64.c
920
const unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
arch/x86/mm/init_64.c
933
if (!IS_ALIGNED(start, PMD_SIZE))
arch/x86/mm/init_64.c
941
if (!IS_ALIGNED(end, PMD_SIZE))
arch/x86/mm/kasan_init_64.c
46
((end - addr) == PMD_SIZE) &&
arch/x86/mm/kasan_init_64.c
47
IS_ALIGNED(addr, PMD_SIZE)) {
arch/x86/mm/kasan_init_64.c
48
p = early_alloc(PMD_SIZE, nid, false);
arch/x86/mm/kasan_init_64.c
51
memblock_free(p, PMD_SIZE);
arch/x86/mm/mem_encrypt_amd.c
169
vaddr += PMD_SIZE;
arch/x86/mm/mem_encrypt_amd.c
170
paddr += PMD_SIZE;
arch/x86/mm/mem_encrypt_amd.c
171
size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
arch/x86/mm/pat/set_memory.c
1160
pfninc = PMD_SIZE >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1162
lpinc = PMD_SIZE;
arch/x86/mm/pat/set_memory.c
1460
if (start & (PMD_SIZE - 1)) {
arch/x86/mm/pat/set_memory.c
1461
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
arch/x86/mm/pat/set_memory.c
1473
while (end - start >= PMD_SIZE) {
arch/x86/mm/pat/set_memory.c
1477
__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
arch/x86/mm/pat/set_memory.c
1479
start += PMD_SIZE;
arch/x86/mm/pat/set_memory.c
1590
if (start & (PMD_SIZE - 1)) {
arch/x86/mm/pat/set_memory.c
1592
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
arch/x86/mm/pat/set_memory.c
1619
while (end - start >= PMD_SIZE) {
arch/x86/mm/pat/set_memory.c
1633
start += PMD_SIZE;
arch/x86/mm/pat/set_memory.c
1634
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
1635
cur_pages += PMD_SIZE >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
261
return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
arch/x86/mm/pat/set_memory.c
421
for (addr = start; within(addr, start, end); addr += PMD_SIZE)
arch/x86/mm/pgtable.c
668
mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
arch/x86/mm/pgtable.c
671
__func__, addr, addr + PMD_SIZE);
arch/x86/mm/pti.c
352
addr = round_up(addr + 1, PMD_SIZE);
arch/x86/mm/pti.c
389
addr = round_up(addr + 1, PMD_SIZE);
arch/x86/mm/pti.c
607
unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
arch/x86/platform/efi/efi_64.c
331
if (!(pa & (PMD_SIZE - 1))) {
arch/x86/platform/efi/efi_64.c
334
u64 pa_offset = pa & (PMD_SIZE - 1);
arch/x86/platform/efi/efi_64.c
341
efi_va -= PMD_SIZE;
arch/x86/virt/svm/sev.c
178
if (IS_ALIGNED(pa, PMD_SIZE))
arch/x86/virt/svm/sev.c
197
pa = ALIGN_DOWN(pa, PMD_SIZE);
arch/x86/virt/svm/sev.c
198
if (e820__mapped_any(pa, pa + PMD_SIZE, E820_TYPE_RAM)) {
arch/x86/virt/svm/sev.c
200
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
arch/x86/virt/svm/sev.c
201
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
arch/x86/virt/svm/sev.c
202
if (!memblock_is_region_reserved(pa, PMD_SIZE))
arch/x86/virt/svm/sev.c
203
memblock_reserve(pa, PMD_SIZE);
arch/x86/xen/mmu_pv.c
1070
unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
arch/x86/xen/mmu_pv.c
1076
pmd++, vaddr += PMD_SIZE) {
arch/x86/xen/mmu_pv.c
1120
xen_free_ro_pages(pa, PMD_SIZE);
arch/x86/xen/mmu_pv.c
1219
size = roundup(size, PMD_SIZE);
arch/x86/xen/mmu_pv.c
1248
xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
arch/x86/xen/mmu_pv.c
1966
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
arch/x86/xen/p2m.c
395
(unsigned long)(p2m + pfn) + i * PMD_SIZE);
arch/x86/xen/p2m.c
411
PMD_SIZE * PMDS_PER_MID_PAGE);
arch/x86/xen/p2m.c
412
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
arch/x86/xen/p2m.c
479
vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
arch/x86/xen/p2m.c
508
vaddr += PMD_SIZE;
arch/xtensa/mm/kasan_init.c
29
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
drivers/crypto/ccp/sev-dev.c
1114
order = get_order(PMD_SIZE * num_2mb_pages);
drivers/crypto/ccp/sfs.c
17
#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
drivers/dax/cxl.c
21
PMD_SIZE, IORESOURCE_DAX_KMEM);
drivers/dax/dax-private.h
122
if (align == PMD_SIZE && has_transparent_hugepage())
drivers/dax/device.c
151
unsigned int fault_size = PMD_SIZE;
drivers/dax/device.c
156
if (dev_dax->align > PMD_SIZE) {
drivers/dax/device.c
169
(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
drivers/dax/device.c
173
phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
drivers/dax/hmem/hmem.c
30
mri->target_node, PMD_SIZE, flags);
drivers/hv/mshv_vtl_main.c
1242
if (can_fault(vmf, PMD_SIZE, &pfn))
fs/dax.c
1811
size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
fs/dax.c
1962
if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
fs/dax.c
1979
.len = PMD_SIZE,
fs/dax.c
2031
if (iomap_length(&iter) < PMD_SIZE)
fs/dax.c
2036
iter.status = iomap_iter_advance(&iter, PMD_SIZE);
fs/dax.c
340
return PMD_SIZE;
fs/dax.c
37
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
fs/dax.c
38
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
fs/proc/task_mmu.c
1858
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
include/asm-generic/pgtable-nopmd.h
23
#define PMD_MASK (~(PMD_SIZE-1))
include/asm-generic/tlb.h
688
else if (_sz >= PMD_SIZE) \
include/linux/hugetlb.h
1013
else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
include/linux/mmu_notifier.h
542
PMD_SIZE); \
include/linux/mmu_notifier.h
564
___address + PMD_SIZE); \
include/linux/pgtable.h
1422
({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
include/linux/pgtable.h
2287
#define pmd_leaf_size(x) PMD_SIZE
kernel/bpf/core.c
895
#ifdef PMD_SIZE
mm/debug_vm_pgtable.c
1129
if (args->fixed_alignment < PMD_SIZE)
mm/debug_vm_pgtable.c
1130
phys_align_check(pstart, pend, PMD_SIZE, &phys,
mm/debug_vm_pgtable.c
440
args->fixed_alignment < PMD_SIZE)
mm/execmem.c
169
if (IS_ALIGNED(size, PMD_SIZE) &&
mm/execmem.c
170
IS_ALIGNED(mas.index, PMD_SIZE)) {
mm/execmem.c
289
alloc_size = round_up(size, PMD_SIZE);
mm/huge_memory.c
1241
ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
mm/huge_memory.c
2506
flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
mm/hugetlb.c
6253
pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
mm/hugetlb.c
6948
if (sz != PMD_SIZE)
mm/hugetlb.c
7032
BUG_ON(sz != PMD_SIZE);
mm/hugetlb.c
7099
else if (hp_size == PMD_SIZE)
mm/hugetlb.c
7100
return PUD_SIZE - PMD_SIZE;
mm/hugetlb.c
7111
if (huge_page_size(h) == PMD_SIZE)
mm/hugetlb.c
7112
return PUD_SIZE - PMD_SIZE;
mm/hugetlb_vmemmap.c
778
pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT;
mm/hugetlb_vmemmap.c
83
split_page(head, get_order(PMD_SIZE));
mm/hugetlb_vmemmap.c
89
flush_tlb_kernel_range(start, start + PMD_SIZE);
mm/kasan/init.c
118
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
mm/kasan/init.c
380
if (IS_ALIGNED(addr, PMD_SIZE) &&
mm/kasan/init.c
381
IS_ALIGNED(next, PMD_SIZE)) {
mm/ksm.c
2527
page += ((addr & (PMD_SIZE - 1)) >> PAGE_SHIFT);
mm/memory.c
1843
return details && details->reclaim_pt && (end - start >= PMD_SIZE);
mm/memory.c
346
addr += PMD_SIZE;
mm/memory.c
356
end -= PMD_SIZE;
mm/memory.c
422
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
mm/memory.c
6016
unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE);
mm/memory.c
6021
end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE,
mm/memory_hotplug.c
1359
return IS_ALIGNED(vmemmap_size, PMD_SIZE);
mm/mmap.c
846
&& IS_ALIGNED(len, PMD_SIZE)) {
mm/mremap.c
410
flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
mm/mremap.c
553
size = PMD_SIZE;
mm/mremap.c
860
extent == PMD_SIZE) {
mm/page_idle.c
78
referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PMD_SIZE);
mm/page_table_check.c
167
page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
mm/page_table_check.c
234
unsigned long stride = PMD_SIZE >> PAGE_SHIFT;
mm/page_table_check.c
243
__page_table_check_pmd_clear(mm, addr + PMD_SIZE * i, *(pmdp + i));
mm/page_vma_mapped.c
292
step_forward(pvmw, PMD_SIZE);
mm/page_vma_mapped.c
309
if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
mm/pagewalk.c
974
entry_size = PMD_SIZE;
mm/sparse-vmemmap.c
445
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
mm/sparse.c
408
return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
mm/swap_state.c
807
*end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
mm/vmalloc.c
152
if ((end - addr) != PMD_SIZE)
mm/vmalloc.c
155
if (!IS_ALIGNED(addr, PMD_SIZE))
mm/vmalloc.c
158
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
mm/vmalloc.c
4015
if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
mm/vmalloc.c
413
WARN_ON(next - addr < PMD_SIZE);
mm/vmscan.c
3614
addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first;
mm/vmscan.c
3729
if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))