diff options
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
| -rw-r--r-- | arch/x86/include/asm/tlbflush.h | 40 | 
1 files changed, 40 insertions, 0 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -175,8 +175,16 @@ struct tlb_state {  	 * are on.  This means that it may not match current->active_mm,  	 * which will contain the previous user mm when we're in lazy TLB  	 * mode even if we've already switched back to swapper_pg_dir. +	 * +	 * During switch_mm_irqs_off(), loaded_mm will be set to +	 * LOADED_MM_SWITCHING during the brief interrupts-off window +	 * when CR3 and loaded_mm would otherwise be inconsistent.  This +	 * is for nmi_uaccess_okay()'s benefit.  	 */  	struct mm_struct *loaded_mm; + +#define LOADED_MM_SWITCHING ((struct mm_struct *)1) +  	u16 loaded_mm_asid;  	u16 next_asid;  	/* last user mm's ctx id */ @@ -246,6 +254,38 @@ struct tlb_state {  };  DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or + * switching the loaded mm.  It can also be dangerous if we + * interrupted some kernel code that was temporarily using a + * different mm. + */ +static inline bool nmi_uaccess_okay(void) +{ +	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); +	struct mm_struct *current_mm = current->mm; + +	VM_WARN_ON_ONCE(!loaded_mm); + +	/* +	 * The condition we want to check is +	 * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though, +	 * if we're running in a VM with shadow paging, and nmi_uaccess_okay() +	 * is supposed to be reasonably fast. +	 * +	 * Instead, we check the almost equivalent but somewhat conservative +	 * condition below, and we rely on the fact that switch_mm_irqs_off() +	 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. +	 */ +	if (loaded_mm != current_mm) +		return false; + +	VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); + +	return true; +} +  /* Initialize cr4 shadow for this CPU. */  static inline void cr4_init_shadow(void)  {  | 
