diff options
Diffstat (limited to 'arch/riscv/kernel/entry.S')
| -rw-r--r-- | arch/riscv/kernel/entry.S | 321 | 
1 files changed, 33 insertions, 288 deletions
| diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 99d38fdf8b18..3fbb100bc9e4 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -14,11 +14,7 @@  #include <asm/asm-offsets.h>  #include <asm/errata_list.h> -#if !IS_ENABLED(CONFIG_PREEMPTION) -.set resume_kernel, restore_all -#endif - -ENTRY(handle_exception) +SYM_CODE_START(handle_exception)  	/*  	 * If coming from userspace, preserve the user thread pointer and load  	 * the kernel thread pointer.  If we came from the kernel, the scratch @@ -46,32 +42,7 @@ _save_context:  	REG_S x1,  PT_RA(sp)  	REG_S x3,  PT_GP(sp)  	REG_S x5,  PT_T0(sp) -	REG_S x6,  PT_T1(sp) -	REG_S x7,  PT_T2(sp) -	REG_S x8,  PT_S0(sp) -	REG_S x9,  PT_S1(sp) -	REG_S x10, PT_A0(sp) -	REG_S x11, PT_A1(sp) -	REG_S x12, PT_A2(sp) -	REG_S x13, PT_A3(sp) -	REG_S x14, PT_A4(sp) -	REG_S x15, PT_A5(sp) -	REG_S x16, PT_A6(sp) -	REG_S x17, PT_A7(sp) -	REG_S x18, PT_S2(sp) -	REG_S x19, PT_S3(sp) -	REG_S x20, PT_S4(sp) -	REG_S x21, PT_S5(sp) -	REG_S x22, PT_S6(sp) -	REG_S x23, PT_S7(sp) -	REG_S x24, PT_S8(sp) -	REG_S x25, PT_S9(sp) -	REG_S x26, PT_S10(sp) -	REG_S x27, PT_S11(sp) -	REG_S x28, PT_T3(sp) -	REG_S x29, PT_T4(sp) -	REG_S x30, PT_T5(sp) -	REG_S x31, PT_T6(sp) +	save_from_x6_to_x31  	/*  	 * Disable user-mode memory access as it should only be set in the @@ -106,19 +77,8 @@ _save_context:  .option norelax  	la gp, __global_pointer$  .option pop - -#ifdef CONFIG_TRACE_IRQFLAGS -	call __trace_hardirqs_off -#endif - -#ifdef CONFIG_CONTEXT_TRACKING_USER -	/* If previous state is in user mode, call user_exit_callable(). */ -	li   a0, SR_PP -	and a0, s1, a0 -	bnez a0, skip_context_tracking -	call user_exit_callable -skip_context_tracking: -#endif +	move a0, sp /* pt_regs */ +	la ra, ret_from_exception  	/*  	 * MSB of cause differentiates between @@ -126,38 +86,13 @@ skip_context_tracking:  	 */  	bge s4, zero, 1f -	la ra, ret_from_exception -  	/* Handle interrupts */ -	move a0, sp /* pt_regs */ -	la a1, generic_handle_arch_irq -	jr a1 -1: -	/* -	 * Exceptions run with interrupts enabled or disabled depending on the -	 * state of SR_PIE in m/sstatus. -	 */ -	andi t0, s1, SR_PIE -	beqz t0, 1f -	/* kprobes, entered via ebreak, must have interrupts disabled. */ -	li t0, EXC_BREAKPOINT -	beq s4, t0, 1f -#ifdef CONFIG_TRACE_IRQFLAGS -	call __trace_hardirqs_on -#endif -	csrs CSR_STATUS, SR_IE - +	tail do_irq  1: -	la ra, ret_from_exception -	/* Handle syscalls */ -	li t0, EXC_SYSCALL -	beq s4, t0, handle_syscall -  	/* Handle other exceptions */  	slli t0, s4, RISCV_LGPTR  	la t1, excp_vect_table  	la t2, excp_vect_table_end -	move a0, sp /* pt_regs */  	add t0, t1, t0  	/* Check if exception code lies within bounds */  	bgeu t0, t2, 1f @@ -165,95 +100,16 @@ skip_context_tracking:  	jr t0  1:  	tail do_trap_unknown +SYM_CODE_END(handle_exception) -handle_syscall: -#ifdef CONFIG_RISCV_M_MODE -	/* -	 * When running is M-Mode (no MMU config), MPIE does not get set. -	 * As a result, we need to force enable interrupts here because -	 * handle_exception did not do set SR_IE as it always sees SR_PIE -	 * being cleared. -	 */ -	csrs CSR_STATUS, SR_IE -#endif -#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) -	/* Recover a0 - a7 for system calls */ -	REG_L a0, PT_A0(sp) -	REG_L a1, PT_A1(sp) -	REG_L a2, PT_A2(sp) -	REG_L a3, PT_A3(sp) -	REG_L a4, PT_A4(sp) -	REG_L a5, PT_A5(sp) -	REG_L a6, PT_A6(sp) -	REG_L a7, PT_A7(sp) -#endif -	 /* save the initial A0 value (needed in signal handlers) */ -	REG_S a0, PT_ORIG_A0(sp) -	/* -	 * Advance SEPC to avoid executing the original -	 * scall instruction on sret -	 */ -	addi s2, s2, 0x4 -	REG_S s2, PT_EPC(sp) -	/* Trace syscalls, but only if requested by the user. */ -	REG_L t0, TASK_TI_FLAGS(tp) -	andi t0, t0, _TIF_SYSCALL_WORK -	bnez t0, handle_syscall_trace_enter -check_syscall_nr: -	/* Check to make sure we don't jump to a bogus syscall number. */ -	li t0, __NR_syscalls -	la s0, sys_ni_syscall -	/* -	 * Syscall number held in a7. -	 * If syscall number is above allowed value, redirect to ni_syscall. -	 */ -	bgeu a7, t0, 3f -#ifdef CONFIG_COMPAT -	REG_L s0, PT_STATUS(sp) -	srli s0, s0, SR_UXL_SHIFT -	andi s0, s0, (SR_UXL >> SR_UXL_SHIFT) -	li t0, (SR_UXL_32 >> SR_UXL_SHIFT) -	sub t0, s0, t0 -	bnez t0, 1f - -	/* Call compat_syscall */ -	la s0, compat_sys_call_table -	j 2f -1: -#endif -	/* Call syscall */ -	la s0, sys_call_table -2: -	slli t0, a7, RISCV_LGPTR -	add s0, s0, t0 -	REG_L s0, 0(s0) -3: -	jalr s0 - -ret_from_syscall: -	/* Set user a0 to kernel a0 */ -	REG_S a0, PT_A0(sp) -	/* -	 * We didn't execute the actual syscall. -	 * Seccomp already set return value for the current task pt_regs. -	 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) -	 */ -ret_from_syscall_rejected: -#ifdef CONFIG_DEBUG_RSEQ -	move a0, sp -	call rseq_syscall -#endif -	/* Trace syscalls, but only if requested by the user. */ -	REG_L t0, TASK_TI_FLAGS(tp) -	andi t0, t0, _TIF_SYSCALL_WORK -	bnez t0, handle_syscall_trace_exit - +/* + * The ret_from_exception must be called with interrupt disabled. Here is the + * caller list: + *  - handle_exception + *  - ret_from_fork + */  SYM_CODE_START_NOALIGN(ret_from_exception)  	REG_L s0, PT_STATUS(sp) -	csrc CSR_STATUS, SR_IE -#ifdef CONFIG_TRACE_IRQFLAGS -	call __trace_hardirqs_off -#endif  #ifdef CONFIG_RISCV_M_MODE  	/* the MPP value is too large to be used as an immediate arg for addi */  	li t0, SR_MPP @@ -261,17 +117,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)  #else  	andi s0, s0, SR_SPP  #endif -	bnez s0, resume_kernel -SYM_CODE_END(ret_from_exception) - -	/* Interrupts must be disabled here so flags are checked atomically */ -	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ -	andi s1, s0, _TIF_WORK_MASK -	bnez s1, resume_userspace_slow -resume_userspace: -#ifdef CONFIG_CONTEXT_TRACKING_USER -	call user_enter_callable -#endif +	bnez s0, 1f  	/* Save unwound kernel stack pointer in thread_info */  	addi s0, sp, PT_SIZE_ON_STACK @@ -282,18 +128,7 @@ resume_userspace:  	 * structures again.  	 */  	csrw CSR_SCRATCH, tp - -restore_all: -#ifdef CONFIG_TRACE_IRQFLAGS -	REG_L s1, PT_STATUS(sp) -	andi t0, s1, SR_PIE -	beqz t0, 1f -	call __trace_hardirqs_on -	j 2f  1: -	call __trace_hardirqs_off -2: -#endif  	REG_L a0, PT_STATUS(sp)  	/*  	 * The current load reservation is effectively part of the processor's @@ -322,32 +157,7 @@ restore_all:  	REG_L x3,  PT_GP(sp)  	REG_L x4,  PT_TP(sp)  	REG_L x5,  PT_T0(sp) -	REG_L x6,  PT_T1(sp) -	REG_L x7,  PT_T2(sp) -	REG_L x8,  PT_S0(sp) -	REG_L x9,  PT_S1(sp) -	REG_L x10, PT_A0(sp) -	REG_L x11, PT_A1(sp) -	REG_L x12, PT_A2(sp) -	REG_L x13, PT_A3(sp) -	REG_L x14, PT_A4(sp) -	REG_L x15, PT_A5(sp) -	REG_L x16, PT_A6(sp) -	REG_L x17, PT_A7(sp) -	REG_L x18, PT_S2(sp) -	REG_L x19, PT_S3(sp) -	REG_L x20, PT_S4(sp) -	REG_L x21, PT_S5(sp) -	REG_L x22, PT_S6(sp) -	REG_L x23, PT_S7(sp) -	REG_L x24, PT_S8(sp) -	REG_L x25, PT_S9(sp) -	REG_L x26, PT_S10(sp) -	REG_L x27, PT_S11(sp) -	REG_L x28, PT_T3(sp) -	REG_L x29, PT_T4(sp) -	REG_L x30, PT_T5(sp) -	REG_L x31, PT_T6(sp) +	restore_from_x6_to_x31  	REG_L x2,  PT_SP(sp) @@ -356,47 +166,10 @@ restore_all:  #else  	sret  #endif - -#if IS_ENABLED(CONFIG_PREEMPTION) -resume_kernel: -	REG_L s0, TASK_TI_PREEMPT_COUNT(tp) -	bnez s0, restore_all -	REG_L s0, TASK_TI_FLAGS(tp) -	andi s0, s0, _TIF_NEED_RESCHED -	beqz s0, restore_all -	call preempt_schedule_irq -	j restore_all -#endif - -resume_userspace_slow: -	/* Enter slow path for supplementary processing */ -	move a0, sp /* pt_regs */ -	move a1, s0 /* current_thread_info->flags */ -	call do_work_pending -	j resume_userspace - -/* Slow paths for ptrace. */ -handle_syscall_trace_enter: -	move a0, sp -	call do_syscall_trace_enter -	move t0, a0 -	REG_L a0, PT_A0(sp) -	REG_L a1, PT_A1(sp) -	REG_L a2, PT_A2(sp) -	REG_L a3, PT_A3(sp) -	REG_L a4, PT_A4(sp) -	REG_L a5, PT_A5(sp) -	REG_L a6, PT_A6(sp) -	REG_L a7, PT_A7(sp) -	bnez t0, ret_from_syscall_rejected -	j check_syscall_nr -handle_syscall_trace_exit: -	move a0, sp -	call do_syscall_trace_exit -	j ret_from_exception +SYM_CODE_END(ret_from_exception)  #ifdef CONFIG_VMAP_STACK -handle_kernel_stack_overflow: +SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)  	/*  	 * Takes the psuedo-spinlock for the shadow stack, in case multiple  	 * harts are concurrently overflowing their kernel stacks.  We could @@ -464,32 +237,7 @@ restore_caller_reg:  	REG_S x1,  PT_RA(sp)  	REG_S x3,  PT_GP(sp)  	REG_S x5,  PT_T0(sp) -	REG_S x6,  PT_T1(sp) -	REG_S x7,  PT_T2(sp) -	REG_S x8,  PT_S0(sp) -	REG_S x9,  PT_S1(sp) -	REG_S x10, PT_A0(sp) -	REG_S x11, PT_A1(sp) -	REG_S x12, PT_A2(sp) -	REG_S x13, PT_A3(sp) -	REG_S x14, PT_A4(sp) -	REG_S x15, PT_A5(sp) -	REG_S x16, PT_A6(sp) -	REG_S x17, PT_A7(sp) -	REG_S x18, PT_S2(sp) -	REG_S x19, PT_S3(sp) -	REG_S x20, PT_S4(sp) -	REG_S x21, PT_S5(sp) -	REG_S x22, PT_S6(sp) -	REG_S x23, PT_S7(sp) -	REG_S x24, PT_S8(sp) -	REG_S x25, PT_S9(sp) -	REG_S x26, PT_S10(sp) -	REG_S x27, PT_S11(sp) -	REG_S x28, PT_T3(sp) -	REG_S x29, PT_T4(sp) -	REG_S x30, PT_T5(sp) -	REG_S x31, PT_T6(sp) +	save_from_x6_to_x31  	REG_L s0, TASK_TI_KERNEL_SP(tp)  	csrr s1, CSR_STATUS @@ -505,23 +253,20 @@ restore_caller_reg:  	REG_S s5, PT_TP(sp)  	move a0, sp  	tail handle_bad_stack +SYM_CODE_END(handle_kernel_stack_overflow)  #endif -END(handle_exception) - -ENTRY(ret_from_fork) -	la ra, ret_from_exception -	tail schedule_tail -ENDPROC(ret_from_fork) - -ENTRY(ret_from_kernel_thread) +SYM_CODE_START(ret_from_fork)  	call schedule_tail +	beqz s0, 1f	/* not from kernel thread */  	/* Call fn(arg) */ -	la ra, ret_from_exception  	move a0, s1 -	jr s0 -ENDPROC(ret_from_kernel_thread) - +	jalr s0 +1: +	move a0, sp /* pt_regs */ +	la ra, ret_from_exception +	tail syscall_exit_to_user_mode +SYM_CODE_END(ret_from_fork)  /*   * Integer register context switch @@ -533,7 +278,7 @@ ENDPROC(ret_from_kernel_thread)   * The value of a0 and a1 must be preserved by this function, as that's how   * arguments are passed to schedule_tail.   */ -ENTRY(__switch_to) +SYM_FUNC_START(__switch_to)  	/* Save context into prev->thread */  	li    a4,  TASK_THREAD_RA  	add   a3, a0, a4 @@ -570,7 +315,7 @@ ENTRY(__switch_to)  	/* The offset of thread_info in task_struct is zero. */  	move tp, a1  	ret -ENDPROC(__switch_to) +SYM_FUNC_END(__switch_to)  #ifndef CONFIG_MMU  #define do_page_fault do_trap_unknown @@ -579,7 +324,7 @@ ENDPROC(__switch_to)  	.section ".rodata"  	.align LGREG  	/* Exception vector table */ -ENTRY(excp_vect_table) +SYM_CODE_START(excp_vect_table)  	RISCV_PTR do_trap_insn_misaligned  	ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)  	RISCV_PTR do_trap_insn_illegal @@ -588,7 +333,7 @@ ENTRY(excp_vect_table)  	RISCV_PTR do_trap_load_fault  	RISCV_PTR do_trap_store_misaligned  	RISCV_PTR do_trap_store_fault -	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ +	RISCV_PTR do_trap_ecall_u /* system call */  	RISCV_PTR do_trap_ecall_s  	RISCV_PTR do_trap_unknown  	RISCV_PTR do_trap_ecall_m @@ -598,11 +343,11 @@ ENTRY(excp_vect_table)  	RISCV_PTR do_trap_unknown  	RISCV_PTR do_page_fault   /* store page fault */  excp_vect_table_end: -END(excp_vect_table) +SYM_CODE_END(excp_vect_table)  #ifndef CONFIG_MMU -ENTRY(__user_rt_sigreturn) +SYM_CODE_START(__user_rt_sigreturn)  	li a7, __NR_rt_sigreturn  	scall -END(__user_rt_sigreturn) +SYM_CODE_END(__user_rt_sigreturn)  #endif | 
