root/arch/nios2/mm/fault.c
/*
 * Copyright (C) 2009 Wind River Systems Inc
 *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
 *
 * based on arch/mips/mm/fault.c which is:
 *
 * Copyright (C) 1995-2000 Ralf Baechle
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>

#include <asm/mmu_context.h>
#include <asm/traps.h>

#define EXC_SUPERV_INSN_ACCESS  9  /* Supervisor only instruction address */
#define EXC_SUPERV_DATA_ACCESS  11 /* Supervisor only data address */
#define EXC_X_PROTECTION_FAULT  13 /* TLB permission violation (x) */
#define EXC_R_PROTECTION_FAULT  14 /* TLB permission violation (r) */
#define EXC_W_PROTECTION_FAULT  15 /* TLB permission violation (w) */

/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
                                unsigned long address)
{
        struct vm_area_struct *vma = NULL;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
        int code = SEGV_MAPERR;
        vm_fault_t fault;
        unsigned int flags = FAULT_FLAG_DEFAULT;

        cause >>= 2;

        /* Restart the instruction */
        regs->ea -= 4;

        /*
         * We fault-in kernel-space virtual memory on-demand. The
         * 'reference' page table is init_mm.pgd.
         *
         * NOTE! We MUST NOT take any locks for this case. We may
         * be in an interrupt or a critical region, and should
         * only copy the information from the master page table,
         * nothing more.
         */
        if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
                if (user_mode(regs))
                        goto bad_area_nosemaphore;
                else
                        goto vmalloc_fault;
        }

        if (unlikely(address >= TASK_SIZE))
                goto bad_area_nosemaphore;

        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
        if (faulthandler_disabled() || !mm)
                goto bad_area_nosemaphore;

        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;

        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

retry:
        vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
                goto bad_area_nosemaphore;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
        code = SEGV_ACCERR;

        switch (cause) {
        case EXC_SUPERV_INSN_ACCESS:
                goto bad_area;
        case EXC_SUPERV_DATA_ACCESS:
                goto bad_area;
        case EXC_X_PROTECTION_FAULT:
                if (!(vma->vm_flags & VM_EXEC))
                        goto bad_area;
                break;
        case EXC_R_PROTECTION_FAULT:
                if (!(vma->vm_flags & VM_READ))
                        goto bad_area;
                break;
        case EXC_W_PROTECTION_FAULT:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
                flags = FAULT_FLAG_WRITE;
                break;
        }

        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
        fault = handle_mm_fault(vma, address, flags, regs);

        if (fault_signal_pending(fault, regs)) {
                if (!user_mode(regs))
                        goto no_context;
                return;
        }

        /* The fault is fully completed (including releasing mmap lock) */
        if (fault & VM_FAULT_COMPLETED)
                return;

        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
                else if (fault & VM_FAULT_SIGSEGV)
                        goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
        }

        if (fault & VM_FAULT_RETRY) {
                flags |= FAULT_FLAG_TRIED;

                /*
                 * No need to mmap_read_unlock(mm) as we would
                 * have already released it in __lock_page_or_retry
                 * in mm/filemap.c.
                 */

                goto retry;
        }

        mmap_read_unlock(mm);
        return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
        mmap_read_unlock(mm);

bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
                        pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
                                "cause %ld\n", current->comm, SIGSEGV, address, cause);
                        show_regs(regs);
                }
                _exception(SIGSEGV, regs, code, address);
                return;
        }

no_context:
        /* Are we prepared to handle this kernel fault? */
        if (fixup_exception(regs))
                return;

        /*
         * Oops. The kernel tried to access some bad page. We'll have to
         * terminate things with extreme prejudice.
         */
        bust_spinlocks(1);

        pr_alert("Unable to handle kernel %s at virtual address %08lx",
                address < PAGE_SIZE ? "NULL pointer dereference" :
                "paging request", address);
        pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
                cause);
        panic("Oops");
        return;

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
        mmap_read_unlock(mm);
        if (!user_mode(regs))
                goto no_context;
        pagefault_out_of_memory();
        return;

do_sigbus:
        mmap_read_unlock(mm);

        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
                goto no_context;

        _exception(SIGBUS, regs, BUS_ADRERR, address);
        return;

vmalloc_fault:
        {
                /*
                 * Synchronize this task's top level page-table
                 * with the 'reference' page table.
                 *
                 * Do _not_ use "tsk" here. We might be inside
                 * an interrupt in the middle of a task switch..
                 */
                int offset = pgd_index(address);
                pgd_t *pgd, *pgd_k;
                p4d_t *p4d, *p4d_k;
                pud_t *pud, *pud_k;
                pmd_t *pmd, *pmd_k;
                pte_t *pte_k;

                pgd = pgd_current + offset;
                pgd_k = init_mm.pgd + offset;

                if (!pgd_present(*pgd_k))
                        goto no_context;
                set_pgd(pgd, *pgd_k);

                p4d = p4d_offset(pgd, address);
                p4d_k = p4d_offset(pgd_k, address);
                if (!p4d_present(*p4d_k))
                        goto no_context;
                pud = pud_offset(p4d, address);
                pud_k = pud_offset(p4d_k, address);
                if (!pud_present(*pud_k))
                        goto no_context;
                pmd = pmd_offset(pud, address);
                pmd_k = pmd_offset(pud_k, address);
                if (!pmd_present(*pmd_k))
                        goto no_context;
                set_pmd(pmd, *pmd_k);

                pte_k = pte_offset_kernel(pmd_k, address);
                if (!pte_present(*pte_k))
                        goto no_context;

                flush_tlb_kernel_page(address);
                return;
        }
}