diff options
Diffstat (limited to 'arch/arm64/kernel/process.c')
| -rw-r--r-- | arch/arm64/kernel/process.c | 18 | 
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1fb2819fc048..71f788cd2b18 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -17,6 +17,7 @@  #include <linux/sched/task.h>  #include <linux/sched/task_stack.h>  #include <linux/kernel.h> +#include <linux/lockdep.h>  #include <linux/mm.h>  #include <linux/stddef.h>  #include <linux/sysctl.h> @@ -44,6 +45,7 @@  #include <asm/alternative.h>  #include <asm/arch_gicv3.h>  #include <asm/compat.h> +#include <asm/cpufeature.h>  #include <asm/cacheflush.h>  #include <asm/exec.h>  #include <asm/fpsimd.h> @@ -631,3 +633,19 @@ static int __init tagged_addr_init(void)  core_initcall(tagged_addr_init);  #endif	/* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +asmlinkage void __sched arm64_preempt_schedule_irq(void) +{ +	lockdep_assert_irqs_disabled(); + +	/* +	 * Preempting a task from an IRQ means we leave copies of PSTATE +	 * on the stack. cpufeature's enable calls may modify PSTATE, but +	 * resuming one of these preempted tasks would undo those changes. +	 * +	 * Only allow a task to be preempted once cpufeatures have been +	 * enabled. +	 */ +	if (static_branch_likely(&arm64_const_caps_ready)) +		preempt_schedule_irq(); +}  | 
