PTRS_PER_PTE
BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
#define LAST_PKMAP PTRS_PER_PTE
for (i = 0; i < PTRS_PER_PTE; i++)
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
for (i = 0; i < PTRS_PER_PTE; i++)
static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
#define MAX_DVM_OPS PTRS_PER_PTE
tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
*pte += PTRS_PER_PTE * sizeof(pte_t);
return PTRS_PER_PTE + 1;
for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
static pte_t tbl[PTRS_PER_PTE] __page_aligned_bss;
static pgd_t tmp_pg_dir[PTRS_PER_PTE] __initdata __aligned(PAGE_SIZE);
return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE;
clear_next_level(l++, next_level_idx(start), PTRS_PER_PTE);
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; i++) {
for (i = 0; i < PTRS_PER_PTE; i++, ptep++, pfn++)
extern pte_t invalid_pte_table[PTRS_PER_PTE];
((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE)
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
__pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD)));
#define LAST_PKMAP PTRS_PER_PTE
return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
extern pte_t invalid_pte_table[PTRS_PER_PTE];
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
end = p + PTRS_PER_PTE;
for (i = 0; i < PTRS_PER_PTE; i++) {
kvm->stat.pages += PTRS_PER_PTE;
return child + (gfn & (PTRS_PER_PTE - 1));
gfn = gfn & ~(PTRS_PER_PTE - 1);
pfn = pfn & ~(PTRS_PER_PTE - 1);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
for (i = 0; i < PTRS_PER_PTE; i++)
end = p + PTRS_PER_PTE;
next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
last_pte_table += PTRS_PER_PTE;
for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
next_pgtable += PTRS_PER_PTE * sizeof (pte_t);
for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
extern pte_t invalid_pte_table[PTRS_PER_PTE];
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
extern pte_t invalid_pte_table[PTRS_PER_PTE];
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
for (j = 0; p < e && j < PTRS_PER_PTE;
start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
offset = PTRS_PER_PTE;
pte_val(*(ptep + PTRS_PER_PTE)));
pte_val(*(ptep + PTRS_PER_PTE)));
for (i = 0; i < PTRS_PER_PTE; i++) {
i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
nw = PTRS_PER_PTE - i;
i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
nw = PTRS_PER_PTE - i;
for (i = 0; i < PTRS_PER_PTE; i++) {
for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; i++)
addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
for (i = 0; i < PTRS_PER_PTE; i++)
mask = (PTRS_PER_PTE * (1UL << kvm_riscv_gstage_pgd_xbits)) - 1;
mask = PTRS_PER_PTE - 1;
if (PTRS_PER_PTE < (gvsz >> order)) {
if (PTRS_PER_PTE < (gpsz >> order)) {
if (PTRS_PER_PTE < (gpsz >> order)) {
if (PTRS_PER_PTE < (gvsz >> order)) {
for (i = 0; i < PTRS_PER_PTE; i++) {
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
for (i = 0; i < PTRS_PER_PTE; ++i)
p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
memset64((void *)pt->pgstes, val, PTRS_PER_PTE);
memset64((void *)pt->ptes, ptes, PTRS_PER_PTE);
for (i = 0; i < PTRS_PER_PTE; i++) {
update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
#define PTE_SIZE (PTRS_PER_PTE*4)
#define SRMMU_PTE_TABLE_SIZE (PTRS_PER_PTE*4)
offset = get_random_u32_below(PTRS_PER_PTE);
memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
-PTRS_PER_PTE
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
for (i = 0; i < PTRS_PER_PTE; i++) {
#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
for (i = 0; i < PTRS_PER_PTE; i++)
&& lastpte && lastpte + PTRS_PER_PTE != pte);
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
pfn += PTRS_PER_PTE;
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
for (i = 0; i < PTRS_PER_PTE; i++) {
for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; i++) {
direct_pages_count[level - 1] += PTRS_PER_PTE;
direct_pages_count[level - 1] -= PTRS_PER_PTE;
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
for (i = 1, pte++; i < PTRS_PER_PTE; i++, pte++) {
if (pmd_pfn(entry) != pmd_pfn(first) + i * PTRS_PER_PTE)
for (i = 0; i < PTRS_PER_PTE; i++)
(FIXMAP_PMD_NUM * PTRS_PER_PTE));
pfn += PTRS_PER_PTE;
max_pte = pte + PTRS_PER_PTE;
for (i = 0; i < PTRS_PER_PTE; i++) {
for (i = 0; i < PTRS_PER_PTE; i++)
set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
idx_pte < min(n_pte, PTRS_PER_PTE);
n_pte -= PTRS_PER_PTE;
for (i = 0; i < PTRS_PER_PTE; i++) {
#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
for (i = 0; i < PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE; ++i)
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
for (i = 0; i < PTRS_PER_PTE; ++i)
n_pages = ALIGN(n_pages, PTRS_PER_PTE);
for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
for (i = 0; i < PTRS_PER_PTE; i++) {
ptr += ((iova >> shift) & (PTRS_PER_PTE - 1));
ptr += ((iova >> shift) & (PTRS_PER_PTE - 1));
atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
#define MAX_PTRS_PER_PTE PTRS_PER_PTE
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
src_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
next = min(end, addr + (PTRS_PER_PTE << PAGE_SHIFT));
src_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*src_pfns),
dst_pfns = kvcalloc(PTRS_PER_PTE, sizeof(*dst_pfns),
next = min(end, addr + (PTRS_PER_PTE << PAGE_SHIFT));
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
for (i = 0; i < PTRS_PER_PTE; i++) {
for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) {
remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
pte_off + (nr_pages - idx) > PTRS_PER_PTE)) {
if (val / PAGE_SIZE > PTRS_PER_PTE)
to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_unmap(ptep - PTRS_PER_PTE);
if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))