diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2021-11-25 10:26:44 +0100 |
---|---|---|
committer | Ard Biesheuvel <ardb@kernel.org> | 2021-12-06 12:49:17 +0100 |
commit | 7b9896c352073156a325c3bb0dc4c46e06e2a468 (patch) | |
tree | a9e6e3be8a6a2b91a147f3e9ae82dfb7fbd860e5 /arch/arm/include/asm/percpu.h | |
parent | 4e918ab13eaf40f19938659cb5a22c93172778a8 (diff) |
ARM: percpu: add SMP_ON_UP support
Permit the use of the TPIDRPRW system register for carrying the per-CPU
offset in generic SMP configurations that also target non-SMP capable
ARMv6 cores. This uses the SMP_ON_UP code patching framework to turn all
TPIDRPRW accesses into reads/writes of entry #0 in the __per_cpu_offset
array.
While at it, switch over some existing direct TPIDRPRW accesses in asm
code to invocations of a new helper that is patched in the same way when
necessary.
Note that CPU_V6+SMP without SMP_ON_UP results in a kernel that does not
boot on v6 CPUs without SMP extensions, so add this dependency to
Kconfig as well.
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
Diffstat (limited to 'arch/arm/include/asm/percpu.h')
-rw-r--r-- | arch/arm/include/asm/percpu.h | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index e2fcb3cfd3de..a4a0d38d016a 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,15 +5,22 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include <asm/insn.h> + register unsigned long current_stack_pointer asm ("sp"); /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 */ -#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) +#ifdef CONFIG_SMP static inline void set_my_cpu_offset(unsigned long off) { + extern unsigned int smp_on_up; + + if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up) + return; + /* Set TPIDRPRW */ asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); } @@ -27,8 +34,20 @@ static inline unsigned long __my_cpu_offset(void) * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) - : "Q" (*(const unsigned long *)current_stack_pointer)); + asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t" +#ifdef CONFIG_CPU_V6 + "1: \n\t" + " .subsection 1 \n\t" + "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t" + " b 1b \n\t" + " .previous \n\t" + " .pushsection \".alt.smp.init\", \"a\" \n\t" + " .long 0b - . \n\t" + " b . + (2b - 0b) \n\t" + " .popsection \n\t" +#endif + : "=r" (off) + : "Q" (*(const unsigned long *)current_stack_pointer)); return off; } |