diff options
author | Mark Rutland <mark.rutland@arm.com> | 2021-08-02 15:07:32 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2021-08-05 14:09:49 +0100 |
commit | 4d1c2ee2709fd6e1655206cafcdb22737f5d7379 (patch) | |
tree | 1d3b9fe5c49f9c23cdc471c360508e96e195167d | |
parent | bc29b71f53b13c9e90a53a42d2431228a869b459 (diff) |
arm64: entry: move bulk of ret_to_user to C
In `ret_to_user` we perform some conditional work depending on the
thread flags, then perform some IRQ/context tracking which is intended
to balance with the IRQ/context tracking performed in the entry C code.
For simplicity and consistency, it would be preferable to move this all
to C. As a step towards that, this patch moves the conditional work and
IRQ/context tracking into a C helper function. To aid bisectability,
this is called from the `ret_to_user` assembly, and a subsequent patch
will move the call to C code.
As local_daif_mask() handles all necessary tracing and PMR manipulation,
we no longer need to handle this explicitly. As we call
exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer
used, and can be removed. As enter_from_user_mode() and
exit_to_user_mode() are no longer called from assembly, these can be
made static, and as these are typically very small, they are marked
__always_inline to avoid the overhead of a function call.
For now, enablement of single-step is left in entry.S, and for this we
still need to read the flags in ret_to_user(). It is safe to read this
separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Joey Gouly <joey.gouly@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Link: https://lore.kernel.org/r/20210802140733.52716-4-mark.rutland@arm.com
[catalin.marinas@arm.com: removed unused gic_prio_kentry_setup macro]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/exception.h | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/entry-common.c | 21 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 48 | ||||
-rw-r--r-- | arch/arm64/kernel/signal.c | 3 |
4 files changed, 26 insertions, 51 deletions
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4afbc45b8bb0..339477dca551 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); -asmlinkage void enter_from_user_mode(void); -asmlinkage void exit_to_user_mode(void); +asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); + void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); @@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); void do_serror(struct pt_regs *regs, unsigned int esr); +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index c4923e3ba065..3da4859c43a1 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -104,7 +104,7 @@ static __always_inline void __enter_from_user_mode(void) trace_hardirqs_off_finish(); } -asmlinkage void noinstr enter_from_user_mode(void) +static __always_inline void enter_from_user_mode(void) { __enter_from_user_mode(); } @@ -123,12 +123,29 @@ static __always_inline void __exit_to_user_mode(void) lockdep_hardirqs_on(CALLER_ADDR0); } -asmlinkage void noinstr exit_to_user_mode(void) +static __always_inline void exit_to_user_mode(void) { mte_check_tfsr_exit(); __exit_to_user_mode(); } +static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) +{ + unsigned long flags; + + local_daif_mask(); + + flags = READ_ONCE(current_thread_info()->flags); + if (unlikely(flags & _TIF_WORK_MASK)) + do_notify_resume(regs, flags); +} + +asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) +{ + prepare_exit_to_user_mode(regs); + exit_to_user_mode(); +} + /* * Handle IRQ/context state management when entering an NMI from user/kernel * mode. Before this function is called it is not safe to call regular kernel diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 863d44f73028..b59eee28cc1b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,16 +29,6 @@ #include <asm/asm-uaccess.h> #include <asm/unistd.h> -/* - * Context tracking and irqflag tracing need to instrument transitions between - * user and kernel mode. - */ - .macro user_enter_irqoff -#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) - bl exit_to_user_mode -#endif - .endm - .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 mov x\n, xzr @@ -474,18 +464,6 @@ SYM_CODE_END(__swpan_exit_el0) /* GPRs used by entry code */ tsk .req x28 // current thread_info -/* - * Interrupt handling. - */ - .macro gic_prio_kentry_setup, tmp:req -#ifdef CONFIG_ARM64_PSEUDO_NMI - alternative_if ARM64_HAS_IRQ_PRIO_MASKING - mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON) - msr_s SYS_ICC_PMR_EL1, \tmp - alternative_else_nop_endif -#endif - .endm - .text /* @@ -585,37 +563,17 @@ SYM_CODE_START_LOCAL(ret_to_kernel) kernel_exit 1 SYM_CODE_END(ret_to_kernel) -/* - * "slow" syscall return path. - */ SYM_CODE_START_LOCAL(ret_to_user) - disable_daif - gic_prio_kentry_setup tmp=x3 -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off -#endif - ldr x19, [tsk, #TSK_TI_FLAGS] - and x2, x19, #_TIF_WORK_MASK - cbnz x2, work_pending -finish_ret_to_user: - user_enter_irqoff + mov x0, sp + bl asm_exit_to_user_mode /* Ignore asynchronous tag check faults in the uaccess routines */ clear_mte_async_tcf + ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step enable_step_tsk x19, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase #endif kernel_exit 0 - -/* - * Ok, we need to do extra processing, enter the slow path. - */ -work_pending: - mov x0, sp // 'regs' - mov x1, x19 - bl do_notify_resume - ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step - b finish_ret_to_user SYM_CODE_END(ret_to_user) .popsection // .entry.text diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index f8192f4ae0b8..53c2c85efb34 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -924,8 +924,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs) system_32bit_el0_cpumask()); } -asmlinkage void do_notify_resume(struct pt_regs *regs, - unsigned long thread_flags) +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { if (thread_flags & _TIF_NEED_RESCHED) { |