#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/page_table_check.h>
#include <linux/stop_machine.h>
#include <asm/sections.h>
#include <asm/mmu.h>
#include <asm/tlb.h>
#include <asm/firmware.h>
#include <mm/mmu_decl.h>
#include <trace/events/thp.h>
#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
#warning Limited user VSID range means pagetable space is wasted
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
{
int rc;
if ((start + page_size) >= H_VMEMMAP_END) {
pr_warn("Outside the supported range\n");
return -1;
}
rc = htab_bolt_mapping(start, start + page_size, phys,
pgprot_val(PAGE_KERNEL),
mmu_vmemmap_psize, mmu_kernel_ssize);
if (rc < 0) {
int rc2 = htab_remove_mapping(start, start + page_size,
mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(rc2 && (rc2 != -ENOENT));
}
return rc;
}
#ifdef CONFIG_MEMORY_HOTPLUG
void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size)
{
int rc = htab_remove_mapping(start, start + page_size,
mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON((rc < 0) && (rc != -ENOENT));
WARN_ON(rc == -ENOENT);
}
#endif
#endif
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
p4dp = p4d_offset(pgdp, ea);
pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
if (!pmdp)
return -ENOMEM;
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
} else {
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
mmu_io_psize, mmu_kernel_ssize)) {
printk(KERN_ERR "Failed to do bolted mapping IO "
"memory at %016lx !\n", pa);
return -ENOMEM;
}
}
smp_wmb();
return 0;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long clr,
unsigned long set)
{
__be64 old_be, tmp;
unsigned long old;
#ifdef CONFIG_DEBUG_VM
WARN_ON(!hash__pmd_trans_huge(*pmdp));
assert_spin_locked(pmd_lockptr(mm, pmdp));
#endif
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
and. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
: "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
"r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
: "cc" );
old = be64_to_cpu(old_be);
trace_hugepage_update_pmd(addr, old, clr, set);
if (old & H_PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp, old);
return old;
}
pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = *pmdp;
pmd_clear(pmdp);
page_table_check_pmd_clear(vma->vm_mm, address, pmd);
serialize_against_pte_lookup(vma->vm_mm);
flush_hash_table_pmd_range(vma->vm_mm, &pmd, address);
return pmd;
}
void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
pgtable_t *pgtable_slot;
assert_spin_locked(pmd_lockptr(mm, pmdp));
pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
*pgtable_slot = pgtable;
smp_wmb();
}
pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
pgtable_t pgtable;
pgtable_t *pgtable_slot;
assert_spin_locked(pmd_lockptr(mm, pmdp));
pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
pgtable = *pgtable_slot;
*pgtable_slot = NULL;
memset(pgtable, 0, PTE_FRAG_SIZE);
return pgtable;
}
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd)
{
int ssize;
unsigned int psize;
unsigned long vsid;
unsigned long flags = 0;
#ifdef CONFIG_DEBUG_VM
psize = get_slice_psize(mm, addr);
BUG_ON(psize == MMU_PAGE_16M);
#endif
if (old_pmd & H_PAGE_COMBO)
psize = MMU_PAGE_4K;
else
psize = MMU_PAGE_64K;
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_user_vsid(&mm->context, addr, ssize);
WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
if (mm_is_thread_local(mm))
flags |= HPTE_LOCAL_UPDATE;
return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
}
pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
pmd_t old_pmd;
pgtable_t pgtable;
unsigned long old;
pgtable_t *pgtable_slot;
old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
pgtable = *pgtable_slot;
memset(pgtable, 0, PTE_FRAG_SIZE);
return old_pmd;
}
int hash__has_transparent_hugepage(void)
{
if (!mmu_has_feature(MMU_FTR_16M_PAGE))
return 0;
if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
return 0;
if (mmu_psize_defs[MMU_PAGE_64K].shift &&
(mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
return 0;
if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
struct change_memory_parms {
unsigned long start, end, newpp;
unsigned int step, nr_cpus;
atomic_t master_cpu;
atomic_t cpu_counter;
};
static struct change_memory_parms chmem_parms;
static DEFINE_MUTEX(chmem_lock);
static void change_memory_range(unsigned long start, unsigned long end,
unsigned int step, unsigned long newpp)
{
unsigned long idx;
pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
start, end, newpp, step);
for (idx = start; idx < end; idx += step)
mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
mmu_kernel_ssize);
}
static int notrace chmem_secondary_loop(struct change_memory_parms *parms)
{
unsigned long msr, tmp, flags;
int *p;
p = &parms->cpu_counter.counter;
local_irq_save(flags);
hard_irq_disable();
asm volatile (
"mfmsr %[msr] ;"
"li %[tmp], %[MSR_IR_DR] ;"
"andc %[tmp], %[msr], %[tmp] ;"
"mtmsrd %[tmp] ;"
"1: "
"lwarx %[tmp], 0, %[p] ;"
"addic %[tmp], %[tmp], -1 ;"
"stwcx. %[tmp], 0, %[p] ;"
"bne- 1b ;"
"2: ;"
"lwz %[tmp], 0(%[p]) ;"
"cmpwi %[tmp], 0 ;"
"bne- 2b ;"
"mtmsrd %[msr] ;"
:
[msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)
:
[p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
:
"cc", "xer"
);
local_irq_restore(flags);
return 0;
}
static int change_memory_range_fn(void *data)
{
struct change_memory_parms *parms = data;
if (atomic_xchg(&parms->master_cpu, 1) == 1)
return chmem_secondary_loop(parms);
while (atomic_read(&parms->cpu_counter) > 1)
barrier();
change_memory_range(parms->start, parms->end, parms->step, parms->newpp);
mb();
atomic_dec(&parms->cpu_counter);
return 0;
}
static bool hash__change_memory_range(unsigned long start, unsigned long end,
unsigned long newpp)
{
unsigned int step, shift;
shift = mmu_psize_defs[mmu_linear_psize].shift;
step = 1 << shift;
start = ALIGN_DOWN(start, step);
end = ALIGN(end, step);
if (start >= end)
return false;
if (firmware_has_feature(FW_FEATURE_LPAR)) {
mutex_lock(&chmem_lock);
chmem_parms.start = start;
chmem_parms.end = end;
chmem_parms.step = step;
chmem_parms.newpp = newpp;
atomic_set(&chmem_parms.master_cpu, 0);
cpus_read_lock();
atomic_set(&chmem_parms.cpu_counter, num_online_cpus());
mb();
stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms,
cpu_online_mask);
cpus_read_unlock();
mutex_unlock(&chmem_lock);
} else
change_memory_range(start, end, step, newpp);
return true;
}
void hash__mark_rodata_ro(void)
{
unsigned long start, end, pp;
start = (unsigned long)_stext;
end = (unsigned long)__end_rodata;
pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
WARN_ON(!hash__change_memory_range(start, end, pp));
}
void hash__mark_initmem_nx(void)
{
unsigned long start, end, pp;
start = (unsigned long)__init_begin;
end = (unsigned long)__init_end;
pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
WARN_ON(!hash__change_memory_range(start, end, pp));
}
#endif