From 9dd78194a3722fa6712192cdd4f7032d45112a9a Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Fri, 11 Feb 2022 16:45:54 +0000 Subject: ARM: report Spectre v2 status through sysfs As per other architectures, add support for reporting the Spectre vulnerability status via sysfs CPU. Acked-by: Catalin Marinas Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/spectre.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 arch/arm/kernel/spectre.c (limited to 'arch/arm/kernel/spectre.c') diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c new file mode 100644 index 000000000000..6f6dd1cfd099 --- /dev/null +++ b/arch/arm/kernel/spectre.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include + +#include + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static unsigned int spectre_v2_state; +static unsigned int spectre_v2_methods; + +void spectre_v2_update_state(unsigned int state, unsigned int method) +{ + if (state > spectre_v2_state) + spectre_v2_state = state; + spectre_v2_methods |= method; +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *method; + + if (spectre_v2_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "%s\n", "Not affected"); + + if (spectre_v2_state != SPECTRE_MITIGATED) + return sprintf(buf, "%s\n", "Vulnerable"); + + switch (spectre_v2_methods) { + case SPECTRE_V2_METHOD_BPIALL: + method = "Branch predictor hardening"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + method = "I-cache invalidation"; + break; + + case SPECTRE_V2_METHOD_SMC: + case SPECTRE_V2_METHOD_HVC: + method = "Firmware call"; + break; + + default: + method = "Multiple mitigations"; + break; + } + + return sprintf(buf, "Mitigation: %s\n", method); +} -- cgit v1.2.3-70-g09d2 From b9baf5c8c5c356757f4f9d8180b5e9d234065bc3 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Thu, 10 Feb 2022 16:05:45 +0000 Subject: ARM: Spectre-BHB workaround Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57, Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as well to be safe, which is affected by Spectre V2 in the same ways as Cortex-A15. Reviewed-by: Catalin Marinas Signed-off-by: Russell King (Oracle) --- arch/arm/include/asm/assembler.h | 10 +++++ arch/arm/include/asm/spectre.h | 4 ++ arch/arm/include/asm/vmlinux.lds.h | 18 +++++++-- arch/arm/kernel/entry-armv.S | 79 +++++++++++++++++++++++++++++++++++--- arch/arm/kernel/entry-common.S | 24 ++++++++++++ arch/arm/kernel/spectre.c | 4 ++ arch/arm/kernel/traps.c | 38 ++++++++++++++++++ arch/arm/mm/Kconfig | 10 +++++ arch/arm/mm/proc-v7-bugs.c | 76 ++++++++++++++++++++++++++++++++++++ 9 files changed, 254 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel/spectre.c') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 7d23d4bb2168..49ea603fc8e3 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -107,6 +107,16 @@ .endm #endif +#if __LINUX_ARM_ARCH__ < 7 + .macro dsb, args + mcr p15, 0, r0, c7, c10, 4 + .endm + + .macro isb, args + mcr p15, 0, r0, c7, r5, 4 + .endm +#endif + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) .if \save diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h index 8a9019e08dba..d1fa5607d3aa 100644 --- a/arch/arm/include/asm/spectre.h +++ b/arch/arm/include/asm/spectre.h @@ -14,6 +14,7 @@ enum { __SPECTRE_V2_METHOD_ICIALLU, __SPECTRE_V2_METHOD_SMC, __SPECTRE_V2_METHOD_HVC, + __SPECTRE_V2_METHOD_LOOP8, }; enum { @@ -21,8 +22,11 @@ enum { SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), }; void spectre_v2_update_state(unsigned int state, unsigned int methods); +int spectre_bhb_update_vectors(unsigned int method); + #endif diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index e02710d17cf9..0ef21bfae9f6 100644 --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -116,11 +116,23 @@ */ #define ARM_VECTORS \ __vectors_lma = .; \ - .vectors 0xffff0000 : AT(__vectors_start) { \ - *(.vectors) \ + OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ + .vectors { \ + *(.vectors) \ + } \ + .vectors.bhb.loop8 { \ + *(.vectors.bhb.loop8) \ + } \ + .vectors.bhb.bpiall { \ + *(.vectors.bhb.bpiall) \ + } \ } \ ARM_LMA(__vectors, .vectors); \ - . = __vectors_lma + SIZEOF(.vectors); \ + ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \ + ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \ + . = __vectors_lma + SIZEOF(.vectors) + \ + SIZEOF(.vectors.bhb.loop8) + \ + SIZEOF(.vectors.bhb.bpiall); \ \ __stubs_lma = .; \ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 5cd057859fe9..676703cbfe4b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1002,12 +1002,11 @@ vector_\name: sub lr, lr, #\correction .endif - @ - @ Save r0, lr_ (parent PC) and spsr_ - @ (parent CPSR) - @ + @ Save r0, lr_ (parent PC) stmia sp, {r0, lr} @ save r0, lr - mrs lr, spsr + + @ Save spsr_ (parent CPSR) +2: mrs lr, spsr str lr, [sp, #8] @ save spsr @ @@ -1028,6 +1027,44 @@ vector_\name: movs pc, lr @ branch to handler in SVC mode ENDPROC(vector_\name) +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .subsection 1 + .align 5 +vector_bhb_loop8_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mov r0, #8 +1: b . + 4 + subs r0, r0, #1 + bne 1b + dsb + isb + b 2b +ENDPROC(vector_bhb_loop8_\name) + +vector_bhb_bpiall_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mcr p15, 0, r0, c7, c5, 6 @ BPIALL + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". + b 2b +ENDPROC(vector_bhb_bpiall_\name) + .previous +#endif + .align 2 @ handler addresses follow this label 1: @@ -1036,6 +1073,10 @@ ENDPROC(vector_\name) .section .stubs, "ax", %progbits @ This must be the first word .word vector_swi +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .word vector_bhb_loop8_swi + .word vector_bhb_bpiall_swi +#endif vector_rst: ARM( swi SYS_ERROR0 ) @@ -1150,8 +1191,10 @@ vector_addrexcptn: * FIQ "NMI" handler *----------------------------------------------------------------------------- * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 - * systems. + * systems. This must be the last vector stub, so lets place it in its own + * subsection. */ + .subsection 2 vector_stub fiq, FIQ_MODE, 4 .long __fiq_usr @ 0 (USR_26 / USR_32) @@ -1184,6 +1227,30 @@ vector_addrexcptn: W(b) vector_irq W(b) vector_fiq +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .section .vectors.bhb.loop8, "ax", %progbits +.L__vectors_bhb_loop8_start: + W(b) vector_rst + W(b) vector_bhb_loop8_und + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 + W(b) vector_bhb_loop8_pabt + W(b) vector_bhb_loop8_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_loop8_irq + W(b) vector_bhb_loop8_fiq + + .section .vectors.bhb.bpiall, "ax", %progbits +.L__vectors_bhb_bpiall_start: + W(b) vector_rst + W(b) vector_bhb_bpiall_und + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 + W(b) vector_bhb_bpiall_pabt + W(b) vector_bhb_bpiall_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_bpiall_irq + W(b) vector_bhb_bpiall_fiq +#endif + .data .align 2 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index ac86c34682bb..dbc1913ee30b 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -153,6 +153,29 @@ ENDPROC(ret_from_fork) *----------------------------------------------------------------------------- */ + .align 5 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +ENTRY(vector_bhb_loop8_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mov r8, #8 +1: b 2f +2: subs r8, r8, #1 + bne 1b + dsb + isb + b 3f +ENDPROC(vector_bhb_loop8_swi) + + .align 5 +ENTRY(vector_bhb_bpiall_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mcr p15, 0, r8, c7, c5, 6 @ BPIALL + isb + b 3f +ENDPROC(vector_bhb_bpiall_swi) +#endif .align 5 ENTRY(vector_swi) #ifdef CONFIG_CPU_V7M @@ -160,6 +183,7 @@ ENTRY(vector_swi) #else sub sp, sp, #PT_REGS_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 +3: ARM( add r8, sp, #S_PC ) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr THUMB( mov r8, sp ) diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c index 6f6dd1cfd099..ade967f18d06 100644 --- a/arch/arm/kernel/spectre.c +++ b/arch/arm/kernel/spectre.c @@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, method = "Firmware call"; break; + case SPECTRE_V2_METHOD_LOOP8: + method = "History overwrite"; + break; + default: method = "Multiple mitigations"; break; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index abc82dfeab51..90c887aa67a4 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -801,6 +802,43 @@ static void flush_vectors(void *vma, size_t offset, size_t size) flush_icache_range(start, end); } +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +int spectre_bhb_update_vectors(unsigned int method) +{ + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; + void *vec_start, *vec_end; + + if (system_state >= SYSTEM_FREEING_INITMEM) { + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", + smp_processor_id()); + return SPECTRE_VULNERABLE; + } + + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + vec_start = __vectors_bhb_loop8_start; + vec_end = __vectors_bhb_loop8_end; + break; + + case SPECTRE_V2_METHOD_BPIALL: + vec_start = __vectors_bhb_bpiall_start; + vec_end = __vectors_bhb_bpiall_end; + break; + + default: + pr_err("CPU%u: unknown Spectre BHB state %d\n", + smp_processor_id(), method); + return SPECTRE_VULNERABLE; + } + + copy_from_lma(vectors_page, vec_start, vec_end); + flush_vectors(vectors_page, 0, vec_end - vec_start); + + return SPECTRE_MITIGATED; +} +#endif + void __init early_trap_init(void *vectors_base) { extern char __stubs_start[], __stubs_end[]; diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 850329ea9bf8..9724c16e9076 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR If unsure, say Y. +config HARDEN_BRANCH_HISTORY + bool "Harden Spectre style attacks against branch history" if EXPERT + depends on CPU_SPECTRE + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. When + taking an exception, a sequence of branches overwrites the branch + history, or branch history is invalidated. + config TLS_REG_EMUL bool select NEED_KUSER_HELPERS diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index e438e59bb63e..c226feab2457 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void) spectre_v2_update_state(state, method); } +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +static int spectre_bhb_method; + +static const char *spectre_bhb_method_name(int method) +{ + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + return "loop"; + + case SPECTRE_V2_METHOD_BPIALL: + return "BPIALL"; + + default: + return "unknown"; + } +} + +static int spectre_bhb_install_workaround(int method) +{ + if (spectre_bhb_method != method) { + if (spectre_bhb_method) { + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", + smp_processor_id()); + + return SPECTRE_VULNERABLE; + } + + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) + return SPECTRE_VULNERABLE; + + spectre_bhb_method = method; + } + + pr_info("CPU%u: Spectre BHB: using %s workaround\n", + smp_processor_id(), spectre_bhb_method_name(method)); + + return SPECTRE_MITIGATED; +} +#else +static int spectre_bhb_install_workaround(int method) +{ + return SPECTRE_VULNERABLE; +} +#endif + +static void cpu_v7_spectre_bhb_init(void) +{ + unsigned int state, method = 0; + + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A57: + case ARM_CPU_PART_CORTEX_A72: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_LOOP8; + break; + + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_BPIALL; + break; + + default: + state = SPECTRE_UNAFFECTED; + break; + } + + if (state == SPECTRE_MITIGATED) + state = spectre_bhb_install_workaround(method); + + spectre_v2_update_state(state, method); +} + static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, u32 mask, const char *msg) { @@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void) void cpu_v7_bugs_init(void) { cpu_v7_spectre_v2_init(); + cpu_v7_spectre_bhb_init(); } -- cgit v1.2.3-70-g09d2 From 25875aa71dfefd1959f07e626c4d285b88b27ac2 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 7 Mar 2022 19:28:32 +0000 Subject: ARM: include unprivileged BPF status in Spectre V2 reporting The mitigations for Spectre-BHB are only applied when an exception is taken, but when unprivileged BPF is enabled, userspace can load BPF programs that can be used to exploit the problem. When unprivileged BPF is enabled, report the vulnerable status via the spectre_v2 sysfs file. Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/spectre.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/arm/kernel/spectre.c') diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c index ade967f18d06..e7fea962d632 100644 --- a/arch/arm/kernel/spectre.c +++ b/arch/arm/kernel/spectre.c @@ -1,9 +1,19 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include #include #include +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false +#endif +} + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { @@ -31,6 +41,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, if (spectre_v2_state != SPECTRE_MITIGATED) return sprintf(buf, "%s\n", "Vulnerable"); + if (_unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + switch (spectre_v2_methods) { case SPECTRE_V2_METHOD_BPIALL: method = "Branch predictor hardening"; -- cgit v1.2.3-70-g09d2 From 330f4c53d3c2d8b11d86ec03a964b86dc81452f5 Mon Sep 17 00:00:00 2001 From: Emmanuel Gil Peyrot Date: Tue, 8 Mar 2022 20:18:20 +0100 Subject: ARM: fix build error when BPF_SYSCALL is disabled It was missing a semicolon. Signed-off-by: Emmanuel Gil Peyrot Reviewed-by: Nathan Chancellor Fixes: 25875aa71dfe ("ARM: include unprivileged BPF status in Spectre V2 reporting"). Signed-off-by: Linus Torvalds --- arch/arm/kernel/spectre.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel/spectre.c') diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c index e7fea962d632..0dcefc36fb7a 100644 --- a/arch/arm/kernel/spectre.c +++ b/arch/arm/kernel/spectre.c @@ -10,7 +10,7 @@ static bool _unprivileged_ebpf_enabled(void) #ifdef CONFIG_BPF_SYSCALL return !sysctl_unprivileged_bpf_disabled; #else - return false + return false; #endif } -- cgit v1.2.3-70-g09d2