#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/alternative-macros.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/scs.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
#include <linux/sizes.h>
.section .irqentry.text, "ax"
.macro new_vmalloc_check
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
blt a0, zero, .Lnew_vmalloc_restore_context_a0
REG_S a1, TASK_TI_A1(tp)
li a1, EXC_LOAD_PAGE_FAULT
beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_STORE_PAGE_FAULT
beq a0, a1, .Lnew_vmalloc_kernel_address
li a1, EXC_INST_PAGE_FAULT
bne a0, a1, .Lnew_vmalloc_restore_context_a1
.Lnew_vmalloc_kernel_address:
csrr a0, CSR_TVAL
bge a0, zero, .Lnew_vmalloc_restore_context_a1
REG_S a2, TASK_TI_A2(tp)
lw a2, TASK_TI_CPU(tp)
srli a1, a2, 6
slli a1, a1, 3
la a0, new_vmalloc
add a0, a0, a1
slli a1, a1, 3
sub a1, a2, a1
li a2, 1
sll a1, a2, a1
REG_L a2, 0(a0)
and a2, a2, a1
beq a2, zero, .Lnew_vmalloc_restore_context
amoxor.d a0, a1, (a0)
ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
REG_L a0, TASK_TI_A0(tp)
REG_L a1, TASK_TI_A1(tp)
REG_L a2, TASK_TI_A2(tp)
csrw CSR_SCRATCH, x0
sret
.Lnew_vmalloc_restore_context:
REG_L a2, TASK_TI_A2(tp)
.Lnew_vmalloc_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
.Lnew_vmalloc_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm
.macro save_userssp tmp, status
ALTERNATIVE("nops(4)",
__stringify( \
andi \tmp, \status, SR_SPP; \
bnez \tmp, skip_ssp_save; \
csrrw \tmp, CSR_SSP, x0; \
REG_S \tmp, TASK_TI_USER_SSP(tp); \
skip_ssp_save:),
0,
RISCV_ISA_EXT_ZICFISS,
CONFIG_RISCV_USER_CFI)
.endm
.macro restore_userssp tmp, status
ALTERNATIVE("nops(4)",
__stringify( \
andi \tmp, \status, SR_SPP; \
bnez \tmp, skip_ssp_restore; \
REG_L \tmp, TASK_TI_USER_SSP(tp); \
csrw CSR_SSP, \tmp; \
skip_ssp_restore:),
0,
RISCV_ISA_EXT_ZICFISS,
CONFIG_RISCV_USER_CFI)
.endm
SYM_CODE_START(handle_exception)
csrrw tp, CSR_SCRATCH, tp
bnez tp, .Lsave_context
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
#ifdef CONFIG_64BIT
new_vmalloc_check
#endif
REG_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK
addi sp, sp, -(PT_SIZE_ON_STACK)
srli sp, sp, THREAD_SHIFT
andi sp, sp, 0x1
bnez sp, handle_kernel_stack_overflow
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
save_from_x6_to_x31
li t0, SR_SUM | SR_FS_VS
#ifdef CONFIG_64BIT
li t1, SR_ELP
or t0, t0, t1
#endif
REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_STATUS, t0
save_userssp s2, s1
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_STATUS(sp)
REG_S s2, PT_EPC(sp)
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
csrw CSR_SCRATCH, x0
load_global_pointer
scs_load_current_if_task_changed s5
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
call riscv_v_context_nesting_start
#endif
move a0, sp
bge s4, zero, 1f
call do_irq
j ret_from_exception
1:
slli t0, s4, RISCV_LGPTR
la t1, excp_vect_table
la t2, excp_vect_table_end
add t0, t1, t0
bgeu t0, t2, 3f
REG_L t1, 0(t0)
2: jalr t1
j ret_from_exception
3:
la t1, do_trap_unknown
j 2b
SYM_CODE_END(handle_exception)
ASM_NOKPROBE(handle_exception)
SYM_CODE_START_NOALIGN(ret_from_exception)
REG_L s0, PT_STATUS(sp)
#ifdef CONFIG_RISCV_M_MODE
li t0, SR_MPP
and s0, s0, t0
#else
andi s0, s0, SR_SPP
#endif
bnez s0, 1f
#ifdef CONFIG_KSTACK_ERASE
call stackleak_erase_on_task_stack
#endif
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
scs_save_current
csrw CSR_SCRATCH, tp
1:
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
call riscv_v_context_nesting_end
#endif
REG_L a0, PT_STATUS(sp)
restore_userssp s3, a0
REG_L a2, PT_EPC(sp)
REG_SC x0, a2, PT_EPC(sp)
csrw CSR_STATUS, a0
csrw CSR_EPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
restore_from_x6_to_x31
REG_L x2, PT_SP(sp)
#ifdef CONFIG_RISCV_M_MODE
mret
#else
sret
#endif
SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
csrrw x31, CSR_SCRATCH, x31
asm_per_cpu sp, overflow_stack, x31
li x31, OVERFLOW_STACK_SIZE
add sp, sp, x31
xor x31, x31, x31
csrrw x31, CSR_SCRATCH, x31
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
save_from_x6_to_x31
REG_L s0, TASK_TI_KERNEL_SP(tp)
csrr s1, CSR_STATUS
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_STATUS(sp)
REG_S s2, PT_EPC(sp)
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
move a0, sp
tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif
SYM_CODE_START(ret_from_fork_kernel_asm)
call schedule_tail
move a0, s1
move a1, s0
move a2, sp
call ret_from_fork_kernel
j ret_from_exception
SYM_CODE_END(ret_from_fork_kernel_asm)
SYM_CODE_START(ret_from_fork_user_asm)
call schedule_tail
move a0, sp
call ret_from_fork_user
j ret_from_exception
SYM_CODE_END(ret_from_fork_user_asm)
#ifdef CONFIG_IRQ_STACKS
SYM_FUNC_START(call_on_irq_stack)
addi sp, sp, -STACKFRAME_SIZE_ON_STACK
REG_S ra, STACKFRAME_RA(sp)
REG_S s0, STACKFRAME_FP(sp)
addi s0, sp, STACKFRAME_SIZE_ON_STACK
scs_save_current
scs_load_irq_stack t0
load_per_cpu t0, irq_stack_ptr, t1
li t1, IRQ_STACK_SIZE
add sp, t0, t1
jalr a1
scs_load_current
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
REG_L ra, STACKFRAME_RA(sp)
REG_L s0, STACKFRAME_FP(sp)
addi sp, sp, STACKFRAME_SIZE_ON_STACK
ret
SYM_FUNC_END(call_on_irq_stack)
#endif
SYM_FUNC_START(__switch_to)
li a4, TASK_THREAD_RA
add a3, a0, a4
add a4, a1, a4
REG_S ra, TASK_THREAD_RA_RA(a3)
REG_S sp, TASK_THREAD_SP_RA(a3)
REG_S s0, TASK_THREAD_S0_RA(a3)
REG_S s1, TASK_THREAD_S1_RA(a3)
REG_S s2, TASK_THREAD_S2_RA(a3)
REG_S s3, TASK_THREAD_S3_RA(a3)
REG_S s4, TASK_THREAD_S4_RA(a3)
REG_S s5, TASK_THREAD_S5_RA(a3)
REG_S s6, TASK_THREAD_S6_RA(a3)
REG_S s7, TASK_THREAD_S7_RA(a3)
REG_S s8, TASK_THREAD_S8_RA(a3)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
csrr s0, CSR_STATUS
REG_S s0, TASK_THREAD_SUM_RA(a3)
scs_save_current
REG_L s0, TASK_THREAD_SUM_RA(a4)
li s1, SR_SUM
and s0, s0, s1
csrs CSR_STATUS, s0
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
REG_L s0, TASK_THREAD_S0_RA(a4)
REG_L s1, TASK_THREAD_S1_RA(a4)
REG_L s2, TASK_THREAD_S2_RA(a4)
REG_L s3, TASK_THREAD_S3_RA(a4)
REG_L s4, TASK_THREAD_S4_RA(a4)
REG_L s5, TASK_THREAD_S5_RA(a4)
REG_L s6, TASK_THREAD_S6_RA(a4)
REG_L s7, TASK_THREAD_S7_RA(a4)
REG_L s8, TASK_THREAD_S8_RA(a4)
REG_L s9, TASK_THREAD_S9_RA(a4)
REG_L s10, TASK_THREAD_S10_RA(a4)
REG_L s11, TASK_THREAD_S11_RA(a4)
move tp, a1
scs_load_current
ret
SYM_FUNC_END(__switch_to)
#ifndef CONFIG_MMU
#define do_page_fault do_trap_unknown
#endif
.section ".rodata"
.align LGREG
SYM_DATA_START_LOCAL(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
RISCV_PTR do_trap_break
RISCV_PTR do_trap_load_misaligned
RISCV_PTR do_trap_load_fault
RISCV_PTR do_trap_store_misaligned
RISCV_PTR do_trap_store_fault
RISCV_PTR do_trap_ecall_u
RISCV_PTR do_trap_ecall_s
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_ecall_m
ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
RISCV_PTR do_page_fault
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_software_check
SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
#ifndef CONFIG_MMU
SYM_DATA_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
ecall
SYM_DATA_END(__user_rt_sigreturn)
#endif