pmd_table
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft());
#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
WARN_ON(!pmd_table(pmd));
WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
if (!pmd_table(pmd)) {
if (pmd_table(pmd)) {
pmd_t *pmd_table;
pmd_table = (pmd_t *)alloc_low_page();
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
BUG_ON(pmd_table != pmd_offset(pud, 0));
return pmd_table;
pmd_table = pmd_offset(pud, 0);
return pmd_table;
pmd_t *pmd_table;
pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd_table)
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
BUG_ON(pmd_table != pmd_offset(pud, 0));
pmd_table = pmd_offset(pud, 0);
return pmd_table;
goto pmd_table;
pmd_table: