diff options
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 107 | 
1 files changed, 97 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 66ca906aa790..c6fa3ef10b4e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -34,6 +34,7 @@  #include "cpu.h" +static void __init spectre_v1_select_mitigation(void);  static void __init spectre_v2_select_mitigation(void);  static void __init ssb_select_mitigation(void);  static void __init l1tf_select_mitigation(void); @@ -98,17 +99,11 @@ void __init check_bugs(void)  	if (boot_cpu_has(X86_FEATURE_STIBP))  		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; -	/* Select the proper spectre mitigation before patching alternatives */ +	/* Select the proper CPU mitigations before patching alternatives: */ +	spectre_v1_select_mitigation();  	spectre_v2_select_mitigation(); - -	/* -	 * Select proper mitigation for any exposure to the Speculative Store -	 * Bypass vulnerability. -	 */  	ssb_select_mitigation(); -  	l1tf_select_mitigation(); -  	mds_select_mitigation();  	arch_smt_update(); @@ -274,6 +269,98 @@ static int __init mds_cmdline(char *str)  early_param("mds", mds_cmdline);  #undef pr_fmt +#define pr_fmt(fmt)     "Spectre V1 : " fmt + +enum spectre_v1_mitigation { +	SPECTRE_V1_MITIGATION_NONE, +	SPECTRE_V1_MITIGATION_AUTO, +}; + +static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = +	SPECTRE_V1_MITIGATION_AUTO; + +static const char * const spectre_v1_strings[] = { +	[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", +	[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", +}; + +/* + * Does SMAP provide full mitigation against speculative kernel access to + * userspace? + */ +static bool smap_works_speculatively(void) +{ +	if (!boot_cpu_has(X86_FEATURE_SMAP)) +		return false; + +	/* +	 * On CPUs which are vulnerable to Meltdown, SMAP does not +	 * prevent speculative access to user data in the L1 cache. +	 * Consider SMAP to be non-functional as a mitigation on these +	 * CPUs. +	 */ +	if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) +		return false; + +	return true; +} + +static void __init spectre_v1_select_mitigation(void) +{ +	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { +		spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; +		return; +	} + +	if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { +		/* +		 * With Spectre v1, a user can speculatively control either +		 * path of a conditional swapgs with a user-controlled GS +		 * value.  The mitigation is to add lfences to both code paths. +		 * +		 * If FSGSBASE is enabled, the user can put a kernel address in +		 * GS, in which case SMAP provides no protection. +		 * +		 * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the +		 *	   FSGSBASE enablement patches have been merged. ] +		 * +		 * If FSGSBASE is disabled, the user can only put a user space +		 * address in GS.  That makes an attack harder, but still +		 * possible if there's no SMAP protection. +		 */ +		if (!smap_works_speculatively()) { +			/* +			 * Mitigation can be provided from SWAPGS itself or +			 * PTI as the CR3 write in the Meltdown mitigation +			 * is serializing. +			 * +			 * If neither is there, mitigate with an LFENCE to +			 * stop speculation through swapgs. +			 */ +			if (boot_cpu_has_bug(X86_BUG_SWAPGS) && +			    !boot_cpu_has(X86_FEATURE_PTI)) +				setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); + +			/* +			 * Enable lfences in the kernel entry (non-swapgs) +			 * paths, to prevent user entry from speculatively +			 * skipping swapgs. +			 */ +			setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); +		} +	} + +	pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); +} + +static int __init nospectre_v1_cmdline(char *str) +{ +	spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; +	return 0; +} +early_param("nospectre_v1", nospectre_v1_cmdline); + +#undef pr_fmt  #define pr_fmt(fmt)     "Spectre V2 : " fmt  static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = @@ -1226,7 +1313,7 @@ static ssize_t l1tf_show_state(char *buf)  static ssize_t mds_show_state(char *buf)  { -	if (!hypervisor_is_type(X86_HYPER_NATIVE)) { +	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {  		return sprintf(buf, "%s; SMT Host state unknown\n",  			       mds_strings[mds_mitigation]);  	} @@ -1290,7 +1377,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr  		break;  	case X86_BUG_SPECTRE_V1: -		return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +		return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);  	case X86_BUG_SPECTRE_V2:  		return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],  | 
