summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 12:52:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 12:52:03 -0800
commit5af5d43f848e95019d0e018e67a7a341c6a5e00d (patch)
treea2b3c1ad018c407b74bbc148545559bad3ad8f7a /arch
parentbe9318cd5a36ff67689e0fb0f8f5007a56290ac7 (diff)
parent62e724494db7954c47b4417769f1225cf98f4d77 (diff)
Merge tag 'x86_misc_for_6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 updates from Dave Hansen: "As usual for this branch, these are super random: a compile fix for some newish LLVM checks and making sure a Kconfig text reference to 'RSB' matches the normal definition: - Rework some CPU setup code to keep LLVM happy on 32-bit - Correct RSB terminology in Kconfig text" * tag 'x86_misc_for_6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu: Make sure flag_is_changeable_p() is always being used x86/bugs: Correct RSB terminology in Kconfig
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig17
-rw-r--r--arch/x86/include/asm/cpuid.h8
-rw-r--r--arch/x86/kernel/cpu/common.c39
3 files changed, 31 insertions, 33 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a3c31b784edc..948707a3615e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2564,15 +2564,14 @@ config MITIGATION_CALL_DEPTH_TRACKING
default y
help
Compile the kernel with call depth tracking to mitigate the Intel
- SKL Return-Speculation-Buffer (RSB) underflow issue. The
- mitigation is off by default and needs to be enabled on the
- kernel command line via the retbleed=stuff option. For
- non-affected systems the overhead of this option is marginal as
- the call depth tracking is using run-time generated call thunks
- in a compiler generated padding area and call patching. This
- increases text size by ~5%. For non affected systems this space
- is unused. On affected SKL systems this results in a significant
- performance gain over the IBRS mitigation.
+ SKL Return-Stack-Buffer (RSB) underflow issue. The mitigation is off
+ by default and needs to be enabled on the kernel command line via the
+ retbleed=stuff option. For non-affected systems the overhead of this
+ option is marginal as the call depth tracking is using run-time
+ generated call thunks in a compiler generated padding area and call
+ patching. This increases text size by ~5%. For non affected systems
+ this space is unused. On affected SKL systems this results in a
+ significant performance gain over the IBRS mitigation.
config CALL_THUNKS_DEBUG
bool "Enable call thunks and call depth tracking debugging"
diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h
index ca4243318aad..239b9ba5c398 100644
--- a/arch/x86/include/asm/cpuid.h
+++ b/arch/x86/include/asm/cpuid.h
@@ -6,6 +6,8 @@
#ifndef _ASM_X86_CPUID_H
#define _ASM_X86_CPUID_H
+#include <linux/types.h>
+
#include <asm/string.h>
struct cpuid_regs {
@@ -20,11 +22,11 @@ enum cpuid_regs_idx {
};
#ifdef CONFIG_X86_32
-extern int have_cpuid_p(void);
+bool have_cpuid_p(void);
#else
-static inline int have_cpuid_p(void)
+static inline bool have_cpuid_p(void)
{
- return 1;
+ return true;
}
#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 02637365d1a9..06a516f6795b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -276,21 +276,13 @@ static int __init x86_noinvpcid_setup(char *s)
}
early_param("noinvpcid", x86_noinvpcid_setup);
-#ifdef CONFIG_X86_32
-static int cachesize_override = -1;
-static int disable_x86_serial_nr = 1;
-
-static int __init cachesize_setup(char *str)
-{
- get_option(&str, &cachesize_override);
- return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
+static inline bool flag_is_changeable_p(unsigned long flag)
{
- u32 f1, f2;
+ unsigned long f1, f2;
+
+ if (!IS_ENABLED(CONFIG_X86_32))
+ return true;
/*
* Cyrix and IDT cpus allow disabling of CPUID
@@ -313,11 +305,22 @@ static inline int flag_is_changeable_p(u32 flag)
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
- return ((f1^f2) & flag) != 0;
+ return (f1 ^ f2) & flag;
}
+#ifdef CONFIG_X86_32
+static int cachesize_override = -1;
+static int disable_x86_serial_nr = 1;
+
+static int __init cachesize_setup(char *str)
+{
+ get_option(&str, &cachesize_override);
+ return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
/* Probe for the CPUID instruction */
-int have_cpuid_p(void)
+bool have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
@@ -349,10 +352,6 @@ static int __init x86_serial_nr_setup(char *s)
}
__setup("serialnumber", x86_serial_nr_setup);
#else
-static inline int flag_is_changeable_p(u32 flag)
-{
- return 1;
-}
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
}
@@ -1088,7 +1087,6 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_X86_32
int i;
/*
@@ -1109,7 +1107,6 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
break;
}
}
-#endif
}
#define NO_SPECULATION BIT(0)