HPAGE_PMD_NR
update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
pages /= HPAGE_PMD_NR;
pages_per_block = HPAGE_PMD_NR;
if (max > (unsigned long)HPAGE_PMD_NR)
max = (unsigned long)HPAGE_PMD_NR;
return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
if (is_large && IS_ALIGNED(pfn, HPAGE_PMD_NR)
&& (pfn + HPAGE_PMD_NR <= pfn_last)) {
pfn += HPAGE_PMD_NR;
mdevice->calloc += HPAGE_PMD_NR;
nr = HPAGE_PMD_NR;
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR;
ra->async_size = HPAGE_PMD_NR;
*page_mask = HPAGE_PMD_NR - 1;
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-HPAGE_PMD_NR);
ret = HPAGE_PMD_NR;
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
return HPAGE_PMD_NR;
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
folio_ref_add(folio, HPAGE_PMD_NR - 1);
folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
folio_ref_add(folio, HPAGE_PMD_NR - 1);
folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
for (i = 0; i < HPAGE_PMD_NR; i++)
set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
(unmapped && referenced < HPAGE_PMD_NR / 2))) {
i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
lruvec_stat_mod_folio(new_folio, NR_SHMEM, HPAGE_PMD_NR);
lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, HPAGE_PMD_NR);
folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
progress += HPAGE_PMD_NR;
if (err || max_ptes_none > HPAGE_PMD_NR - 1)
if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
for (i = 0; i < HPAGE_PMD_NR; i++) {
unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
flush_icache_pages(vma, page, HPAGE_PMD_NR);
add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
for (i = 0; i < HPAGE_PMD_NR; i++)
for (i = 1; i < HPAGE_PMD_NR; i++)
nr = HPAGE_PMD_NR;
if (ret == HPAGE_PMD_NR) {
pages += HPAGE_PMD_NR;
if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
__folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
__folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD);
if (pages == HPAGE_PMD_NR)
if (pages == HPAGE_PMD_NR) {
#define SWAPFILE_CLUSTER HPAGE_PMD_NR
if (nr_pages >= HPAGE_PMD_NR) {
pages /= HPAGE_PMD_NR;
v[i] /= HPAGE_PMD_NR;