diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-23 16:54:46 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-23 16:54:46 -0800 |
commit | 6ac3bb167fed0b3d02b4fd3daa0d819841d5f6f4 (patch) | |
tree | 49f73d917f1cd7d848e9a48c9efffea8ab2f7142 /arch | |
parent | eb3e8d9de28a5385f75e5c42eba5fb5b0c7625be (diff) | |
parent | c280f7736ab26a601932b1ce017a3840dbedcfdc (diff) |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar:
"There's a number of fixes:
- a round of fixes for CPUID-less legacy CPUs
- a number of microcode loader fixes
- i8042 detection robustization fixes
- stack dump/unwinder fixes
- x86 SoC platform driver fixes
- a GCC 7 warning fix
- virtualization related fixes"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
Revert "x86/unwind: Detect bad stack return address"
x86/paravirt: Mark unused patch_default label
x86/microcode/AMD: Reload proper initrd start address
x86/platform/intel/quark: Add printf attribute to imr_self_test_result()
x86/platform/intel-mid: Switch MPU3050 driver to IIO
x86/alternatives: Do not use sync_core() to serialize I$
x86/topology: Document cpu_llc_id
x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic
x86/asm: Rewrite sync_core() to use IRET-to-self
x86/microcode/intel: Replace sync_core() with native_cpuid()
Revert "x86/boot: Fail the boot if !M486 and CPUID is missing"
x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels
x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6
x86/tools: Fix gcc-7 warning in relocs.c
x86/unwind: Dump stack data on warnings
x86/unwind: Adjust last frame check for aligned function stacks
x86/init: Fix a couple of comment typos
x86/init: Remove i8042_detect() from platform ops
Input: i8042 - Trust firmware a bit more when probing on X86
x86/init: Add i8042 state to the platform data
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/boot/cpu.c | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 80 | ||||
-rw-r--r-- | arch/x86/include/asm/unwind.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/x86_init.h | 26 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 40 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mshyperv.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/platform-quirks.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/unwind_frame.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/x86_init.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/ce4100/ce4100.c | 6 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/intel-mid.c | 7 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr_selftest.c | 3 | ||||
-rw-r--r-- | arch/x86/tools/relocs.c | 3 |
21 files changed, 263 insertions, 114 deletions
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 4224ede43b4e..26240dde081e 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c @@ -87,12 +87,6 @@ int validate_cpu(void) return -1; } - if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && - !has_eflag(X86_EFLAGS_ID)) { - printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); - return -1; - } - if (err_flags) { puts("This kernel requires the following features " "not present on the CPU:\n"); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6aa741fbe1df..eaf100508c36 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -602,33 +602,69 @@ static __always_inline void cpu_relax(void) rep_nop(); } -/* Stop speculative execution and prefetching of modified code. */ +/* + * This function forces the icache and prefetched instruction stream to + * catch up with reality in two very specific cases: + * + * a) Text was modified using one virtual address and is about to be executed + * from the same physical page at a different virtual address. + * + * b) Text was modified on a different CPU, may subsequently be + * executed on this CPU, and you want to make sure the new version + * gets executed. This generally means you're calling this in a IPI. + * + * If you're calling this for a different reason, you're probably doing + * it wrong. + */ static inline void sync_core(void) { - int tmp; - -#ifdef CONFIG_M486 /* - * Do a CPUID if available, otherwise do a jump. The jump - * can conveniently enough be the jump around CPUID. + * There are quite a few ways to do this. IRET-to-self is nice + * because it works on every CPU, at any CPL (so it's compatible + * with paravirtualization), and it never exits to a hypervisor. + * The only down sides are that it's a bit slow (it seems to be + * a bit more than 2x slower than the fastest options) and that + * it unmasks NMIs. The "push %cs" is needed because, in + * paravirtual environments, __KERNEL_CS may not be a valid CS + * value when we do IRET directly. + * + * In case NMI unmasking or performance ever becomes a problem, + * the next best option appears to be MOV-to-CR2 and an + * unconditional jump. That sequence also works on all CPUs, + * but it will fault at CPL3 (i.e. Xen PV and lguest). + * + * CPUID is the conventional way, but it's nasty: it doesn't + * exist on some 486-like CPUs, and it usually exits to a + * hypervisor. + * + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. */ - asm volatile("cmpl %2,%1\n\t" - "jl 1f\n\t" - "cpuid\n" - "1:" - : "=a" (tmp) - : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) - : "ebx", "ecx", "edx", "memory"); + register void *__sp asm(_ASM_SP); + +#ifdef CONFIG_X86_32 + asm volatile ( + "pushfl\n\t" + "pushl %%cs\n\t" + "pushl $1f\n\t" + "iret\n\t" + "1:" + : "+r" (__sp) : : "memory"); #else - /* - * CPUID is a barrier to speculative execution. - * Prefetched instructions are automatically - * invalidated when modified. - */ - asm volatile("cpuid" - : "=a" (tmp) - : "0" (1) - : "ebx", "ecx", "edx", "memory"); + unsigned int tmp; + + asm volatile ( + "mov %%ss, %0\n\t" + "pushq %q0\n\t" + "pushq %%rsp\n\t" + "addq $8, (%%rsp)\n\t" + "pushfq\n\t" + "mov %%cs, %0\n\t" + "pushq %q0\n\t" + "pushq $1f\n\t" + "iretq\n\t" + "1:" + : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); #endif } diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index c5a7f3a930dd..6fa75b17aec3 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -12,7 +12,7 @@ struct unwind_state { struct task_struct *task; int graph_idx; #ifdef CONFIG_FRAME_POINTER - unsigned long *bp; + unsigned long *bp, *orig_sp; struct pt_regs *regs; #else unsigned long *sp; diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6ba793178441..7ba7e90a9ad6 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -59,7 +59,7 @@ struct x86_init_irqs { /** * struct x86_init_oem - oem platform specific customizing functions - * @arch_setup: platform specific architecure setup + * @arch_setup: platform specific architecture setup * @banner: print a platform specific banner */ struct x86_init_oem { @@ -165,8 +165,25 @@ struct x86_legacy_devices { }; /** + * enum x86_legacy_i8042_state - i8042 keyboard controller state + * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on + * given platform/subarch. + * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller + * is absent. + * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be + * present, the i8042 driver should probe for controller existence. + */ +enum x86_legacy_i8042_state { + X86_LEGACY_I8042_PLATFORM_ABSENT, + X86_LEGACY_I8042_FIRMWARE_ABSENT, + X86_LEGACY_I8042_EXPECTED_PRESENT, +}; + +/** * struct x86_legacy_features - legacy x86 features * + * @i8042: indicated if we expect the device to have i8042 controller + * present. * @rtc: this device has a CMOS real-time clock present * @reserve_bios_regions: boot code will search for the EBDA address and the * start of the 640k - 1M BIOS region. If false, the platform must @@ -175,6 +192,7 @@ struct x86_legacy_devices { * documentation for further details. */ struct x86_legacy_features { + enum x86_legacy_i8042_state i8042; int rtc; int reserve_bios_regions; struct x86_legacy_devices devices; @@ -188,15 +206,14 @@ struct x86_legacy_features { * @set_wallclock: set time back to HW clock * @is_untracked_pat_range exclude from PAT logic * @nmi_init enable NMI on cpus - * @i8042_detect pre-detect if i8042 controller exists * @save_sched_clock_state: save state for sched_clock() on suspend * @restore_sched_clock_state: restore state for sched_clock() on resume - * @apic_post_init: adjust apic if neeeded + * @apic_post_init: adjust apic if needed * @legacy: legacy features * @set_legacy_features: override legacy features. Use of this callback * is highly discouraged. You should only need * this if your hardware platform requires further - * custom fine tuning far beyong what may be + * custom fine tuning far beyond what may be * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. @@ -210,7 +227,6 @@ struct x86_platform_ops { bool (*is_untracked_pat_range)(u64 start, u64 end); void (*nmi_init)(void); unsigned char (*get_nmi_reason)(void); - int (*i8042_detect)(void); void (*save_sched_clock_state)(void); void (*restore_sched_clock_state)(void); void (*apic_post_init)(void); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6f65b0eed384..64422f850e95 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -930,6 +930,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) x86_platform.legacy.devices.pnpbios = 0; } + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) && + x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) { + pr_debug("ACPI: i8042 controller is absent\n"); + x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT; + } + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { pr_debug("ACPI: not registering RTC platform device\n"); x86_platform.legacy.rtc = 0; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 5cb272a7a5a3..c5b8f760473c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -337,7 +337,11 @@ done: n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); } -static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) +/* + * "noinline" to cause control flow change and thus invalidate I$ and + * cause refetch after modification. + */ +static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) { unsigned long flags; @@ -346,7 +350,6 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) local_irq_save(flags); add_nops(instr + (a->instrlen - a->padlen), a->padlen); - sync_core(); local_irq_restore(flags); DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", @@ -359,9 +362,12 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) * This implies that asymmetric systems where APs have less capabilities than * the boot processor are not handled. Tough. Make sure you disable such * features by hand. + * + * Marked "noinline" to cause control flow change and thus insn cache + * to refetch changed I$ lines. */ -void __init_or_module apply_alternatives(struct alt_instr *start, - struct alt_instr *end) +void __init_or_module noinline apply_alternatives(struct alt_instr *start, + struct alt_instr *end) { struct alt_instr *a; u8 *instr, *replacement; @@ -667,7 +673,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, unsigned long flags; local_irq_save(flags); memcpy(addr, opcode, len); - sync_core(); local_irq_restore(flags); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1f6b50a449ab..dc1697ca5191 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -667,13 +667,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_1_EDX] = edx; } + /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ + if (c->cpuid_level >= 0x00000006) + c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); + /* Additional Intel-defined flags: level 0x00000007 */ if (c->cpuid_level >= 0x00000007) { cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - c->x86_capability[CPUID_7_0_EBX] = ebx; - - c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); c->x86_capability[CPUID_7_ECX] = ecx; } diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 6f353bdb3a25..6a31e2691f3a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -116,10 +116,11 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, /* * This scans the ucode blob for the proper container as we can have multiple - * containers glued together. + * containers glued together. Returns the equivalence ID from the equivalence + * table or 0 if none found. */ -static struct container -find_proper_container(u8 *ucode, size_t size, u16 *ret_id) +static u16 +find_proper_container(u8 *ucode, size_t size, struct container *ret_cont) { struct container ret = { NULL, 0 }; u32 eax, ebx, ecx, edx; @@ -138,7 +139,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) if (header[0] != UCODE_MAGIC || header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ header[2] == 0) /* size */ - return ret; + return eq_id; eax = 0x00000001; ecx = 0; @@ -163,8 +164,9 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) * ucode update loop below */ left = ret.size - offset; - *ret_id = eq_id; - return ret; + + *ret_cont = ret; + return eq_id; } /* @@ -189,7 +191,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id) ucode = data; } - return ret; + return eq_id; } static int __apply_microcode_amd(struct microcode_amd *mc_amd) @@ -214,17 +216,18 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd) * and on 32-bit during save_microcode_in_initrd_amd() -- we can call * load_microcode_amd() to save equivalent cpu table and microcode patches in * kernel heap memory. + * + * Returns true if container found (sets @ret_cont), false otherwise. */ -static struct container -apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) +static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch, + struct container *ret_cont) { - struct container ret = { NULL, 0 }; u8 (*patch)[PATCH_MAX_SIZE]; + u32 rev, *header, *new_rev; + struct container ret; int offset, left; - u32 rev, *header; - u8 *data; u16 eq_id = 0; - u32 *new_rev; + u8 *data; #ifdef CONFIG_X86_32 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); @@ -235,11 +238,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) #endif if (check_current_patch_level(&rev, true)) - return (struct container){ NULL, 0 }; + return false; - ret = find_proper_container(ucode, size, &eq_id); + eq_id = find_proper_container(ucode, size, &ret); if (!eq_id) - return (struct container){ NULL, 0 }; + return false; this_equiv_id = eq_id; header = (u32 *)ret.data; @@ -273,7 +276,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) data += offset; left -= offset; } - return ret; + + if (ret_cont) + *ret_cont = ret; + + return true; } static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) @@ -294,6 +301,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) void __init load_ucode_amd_bsp(unsigned int family) { struct ucode_cpu_info *uci; + u32 eax, ebx, ecx, edx; struct cpio_data cp; const char *path; bool use_pa; @@ -315,9 +323,12 @@ void __init load_ucode_amd_bsp(unsigned int family) return; /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_eax(1); + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + uci->cpu_sig.sig = eax; - apply_microcode_early_amd(cp.data, cp.size, true); + apply_microcode_early_amd(cp.data, cp.size, true, NULL); } #ifdef CONFIG_X86_32 @@ -349,7 +360,7 @@ void load_ucode_amd_ap(unsigned int family) * This would set amd_ucode_patch above so that the following APs can * use it directly instead of going down this path again. */ - apply_microcode_early_amd(cp.data, cp.size, true); + apply_microcode_early_amd(cp.data, cp.size, true, NULL); } #else void load_ucode_amd_ap(unsigned int family) @@ -387,8 +398,7 @@ reget: } } - cont = apply_microcode_early_amd(cp.data, cp.size, false); - if (!(cont.data && cont.size)) { + if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) { cont.size = -1; return; } @@ -443,7 +453,7 @@ int __init save_microcode_in_initrd_amd(unsigned int fam) return -EINVAL; } - cont = find_proper_container(cp.data, cp.size, &eq_id); + eq_id = find_proper_container(cp.data, cp.size, &cont); if (!eq_id) { cont.size = -1; return -EINVAL; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 6996413c78c3..2af69d27da62 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -44,7 +44,7 @@ #define DRIVER_VERSION "2.2" static struct microcode_ops *microcode_ops; -static bool dis_ucode_ldr; +static bool dis_ucode_ldr = true; LIST_HEAD(microcode_cache); @@ -76,6 +76,7 @@ struct cpu_info_ctx { static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; + u32 a, b, c, d; #ifdef CONFIG_X86_32 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); @@ -88,8 +89,23 @@ static bool __init check_loader_disabled_bsp(void) bool *res = &dis_ucode_ldr; #endif - if (cmdline_find_option_bool(cmdline, option)) - *res = true; + if (!have_cpuid_p()) + return *res; + + a = 1; + c = 0; + native_cpuid(&a, &b, &c, &d); + + /* + * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not + * completely accurate as xen pv guests don't see that CPUID bit set but + * that's good enough as they don't land on the BSP path anyway. + */ + if (c & BIT(31)) + return *res; + + if (cmdline_find_option_bool(cmdline, option) <= 0) + *res = false; return *res; } @@ -121,9 +137,6 @@ void __init load_ucode_bsp(void) if (check_loader_disabled_bsp()) return; - if (!have_cpuid_p()) - return; - vendor = x86_cpuid_vendor(); family = x86_cpuid_family(); @@ -157,9 +170,6 @@ void load_ucode_ap(void) if (check_loader_disabled_ap()) return; - if (!have_cpuid_p()) - return; - vendor = x86_cpuid_vendor(); family = x86_cpuid_family(); @@ -233,14 +243,12 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) # endif /* - * Did we relocate the ramdisk? - * - * So we possibly relocate the ramdisk *after* applying microcode on the - * BSP so we rely on use_pa (use physical addresses) - even if it is not - * absolutely correct - to determine whether we've done the ramdisk - * relocation already. + * Fixup the start address: after reserve_initrd() runs, initrd_start + * has the virtual address of the beginning of the initrd. It also + * possibly relocates the ramdisk. In either case, initrd_start contains + * the updated address so use that instead. */ - if (!use_pa && relocated_ramdisk) + if (!use_pa && initrd_start) start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 54d50c3694d8..b624b54912e1 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -368,6 +368,26 @@ next: return patch; } +static void cpuid_1(void) +{ + /* + * According to the Intel SDM, Volume 3, 9.11.7: + * + * CPUID returns a value in a model specific register in + * addition to its usual register return values. The + * semantics of CPUID cause it to deposit an update ID value + * in the 64-bit model-specific register at address 08BH + * (IA32_BIOS_SIGN_ID). If no update is present in the + * processor, the value in the MSR remains unmodified. + * + * Use native_cpuid -- this code runs very early and we don't + * want to mess with paravirt. + */ + unsigned int eax = 1, ebx, ecx = 0, edx; + + native_cpuid(&eax, &ebx, &ecx, &edx); +} + static int collect_cpu_info_early(struct ucode_cpu_info *uci) { unsigned int val[2]; @@ -393,7 +413,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) native_wrmsrl(MSR_IA32_UCODE_REV, 0); /* As documented in the SDM: Do a CPUID 1 here */ - sync_core(); + cpuid_1(); /* get the current revision from MSR 0x8B */ native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); @@ -593,7 +613,7 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) native_wrmsrl(MSR_IA32_UCODE_REV, 0); /* As documented in the SDM: Do a CPUID 1 here */ - sync_core(); + cpuid_1(); /* get the current revision from MSR 0x8B */ native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); @@ -805,7 +825,7 @@ static int apply_microcode_intel(int cpu) wrmsrl(MSR_IA32_UCODE_REV, 0); /* As documented in the SDM: Do a CPUID 1 here */ - sync_core(); + cpuid_1(); /* get the current revision from MSR 0x8B */ rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 6c044543545e..f37e02e41a77 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -30,6 +30,7 @@ #include <asm/apic.h> #include <asm/timer.h> #include <asm/reboot.h> +#include <asm/nmi.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); @@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void) return 0; } +#ifdef CONFIG_X86_LOCAL_APIC +/* + * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes + * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle + * unknown NMI on the first CPU which gets it. + */ +static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) +{ + static atomic_t nmi_cpu = ATOMIC_INIT(-1); + + if (!unknown_nmi_panic) + return NMI_DONE; + + if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) + return NMI_HANDLED; + + return NMI_DONE; +} +#endif + static void __init ms_hyperv_init_platform(void) { /* @@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void) pr_info("HyperV: LAPIC Timer Frequency: %#x\n", lapic_timer_frequency); } + + register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, + "hv_nmi_unknown"); #endif if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index d33ef165b1f8..553acbbb4d32 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -68,7 +68,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, #endif default: -patch_default: +patch_default: __maybe_unused ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index f4fcf26c9fce..11aaf1eaa0e4 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -80,7 +80,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, #endif default: -patch_default: +patch_default: __maybe_unused ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); break; diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c index 24a50301f150..91271122f0df 100644 --- a/arch/x86/kernel/platform-quirks.c +++ b/arch/x86/kernel/platform-quirks.c @@ -6,6 +6,7 @@ void __init x86_early_init_platform_quirks(void) { + x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT; x86_platform.legacy.rtc = 1; x86_platform.legacy.reserve_bios_regions = 0; x86_platform.legacy.devices.pnpbios = 1; @@ -16,10 +17,14 @@ void __init x86_early_init_platform_quirks(void) break; case X86_SUBARCH_XEN: case X86_SUBARCH_LGUEST: + x86_platform.legacy.devices.pnpbios = 0; + x86_platform.legacy.rtc = 0; + break; case X86_SUBARCH_INTEL_MID: case X86_SUBARCH_CE4100: x86_platform.legacy.devices.pnpbios = 0; x86_platform.legacy.rtc = 0; + x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT; break; } diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index ea7b7f9a3b9e..4443e499f279 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -6,6 +6,37 @@ #define FRAME_HEADER_SIZE (sizeof(long) * 2) +static void unwind_dump(struct unwind_state *state, unsigned long *sp) +{ + static bool dumped_before = false; + bool prev_zero, zero = false; + unsigned long word; + + if (dumped_before) + return; + + dumped_before = true; + + printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n", + state->stack_info.type, state->stack_info.next_sp, + state->stack_mask, state->graph_idx); + + for (sp = state->orig_sp; sp < state->stack_info.end; sp++) { + word = READ_ONCE_NOCHECK(*sp); + + prev_zero = zero; + zero = word == 0; + + if (zero) { + if (!prev_zero) + printk_deferred("%p: %016x ...\n", sp, 0); + continue; + } + + printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word); + } +} + unsigned long unwind_get_return_address(struct unwind_state *state) { unsigned long addr; @@ -20,15 +51,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state) addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, addr_p); - if (!__kernel_text_address(addr)) { - printk_deferred_once(KERN_WARNING - "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n", - (void *)addr, addr_p, state->task->comm, - state->task->pid); - return 0; - } - - return addr; + return __kernel_text_address(addr) ? addr : 0; } EXPORT_SYMBOL_GPL(unwind_get_return_address); @@ -46,7 +69,14 @@ static bool is_last_task_frame(struct unwind_state *state) unsigned long bp = (unsigned long)state->bp; unsigned long regs = (unsigned long)task_pt_regs(state->task); - return bp == regs - FRAME_HEADER_SIZE; + /* + * We have to check for the last task frame at two different locations + * because gcc can occasionally decide to realign the stack pointer and + * change the offset of the stack frame by a word in the prologue of a + * function called by head/entry code. + */ + return bp == regs - FRAME_HEADER_SIZE || + bp == regs - FRAME_HEADER_SIZE - sizeof(long); } /* @@ -67,6 +97,7 @@ static bool update_stack_state(struct unwind_state *state, void *addr, size_t len) { struct stack_info *info = &state->stack_info; + enum stack_type orig_type = info->type; /* * If addr isn't on the current stack, switch to the next one. @@ -80,6 +111,9 @@ static bool update_stack_state(struct unwind_state *state, void *addr, &state->stack_mask)) return false; + if (!state->orig_sp || info->type != orig_type) + state->orig_sp = addr; + return true; } @@ -178,11 +212,13 @@ bad_address: "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", state->regs, state->task->comm, state->task->pid, next_frame); + unwind_dump(state, (unsigned long *)state->regs); } else { printk_deferred_once(KERN_WARNING "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", state->bp, state->task->comm, state->task->pid, next_frame); + unwind_dump(state, state->bp); } the_end: state->stack_info.type = STACK_TYPE_UNKNOWN; diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 0bd9f1287f39..11a93f005268 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -89,7 +89,6 @@ struct x86_cpuinit_ops x86_cpuinit = { }; static void default_nmi_init(void) { }; -static int default_i8042_detect(void) { return 1; }; struct x86_platform_ops x86_platform __ro_after_init = { .calibrate_cpu = native_calibrate_cpu, @@ -100,7 +99,6 @@ struct x86_platform_ops x86_platform __ro_after_init = { .is_untracked_pat_range = is_ISA_range, .nmi_init = default_nmi_init, .get_nmi_reason = default_get_nmi_reason, - .i8042_detect = default_i8042_detect, .save_sched_clock_state = tsc_save_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state, }; diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 821cb41f00e6..ce4b06733c09 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -23,11 +23,6 @@ #include <asm/io_apic.h> #include <asm/emergency-restart.h> -static int ce4100_i8042_detect(void) -{ - return 0; -} - /* * The CE4100 platform has an internal 8051 Microcontroller which is * responsible for signaling to the external Power Management Unit the @@ -145,7 +140,6 @@ static void sdv_pci_init(void) void __init x86_ce4100_early_setup(void) { x86_init.oem.arch_setup = sdv_arch_setup; - x86_platform.i8042_detect = ce4100_i8042_detect; x86_init.resources.probe_roms = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; x86_init.mpparse.find_smp_config = x86_init_noop; diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index dd6cfa4ad3ac..61b5ed2b7d40 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -19,7 +19,7 @@ obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o # I2C Devices obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o -obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o +obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o # I2C GPIO Expanders diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 7850128f0026..12a272582cdc 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -161,12 +161,6 @@ out: regulator_has_full_constraints(); } -/* MID systems don't have i8042 controller */ -static int intel_mid_i8042_detect(void) -{ - return 0; -} - /* * Moorestown does not have external NMI source nor port 0x61 to report * NMI status. The possible NMI sources are from pmu as a result of NMI @@ -197,7 +191,6 @@ void __init x86_intel_mid_early_setup(void) x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; - x86_platform.i8042_detect = intel_mid_i8042_detect; x86_init.timers.wallclock_init = intel_mid_rtc_init; x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c index f5bad40936ac..b8f562049cad 100644 --- a/arch/x86/platform/intel-quark/imr_selftest.c +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -25,7 +25,8 @@ * @fmt: format string. * ... variadic argument list. */ -static void __init imr_self_test_result(int res, const char *fmt, ...) +static __printf(2, 3) +void __init imr_self_test_result(int res, const char *fmt, ...) { va_list vlist; diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0c2fae8d929d..73eb7fd4aec4 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode) die("Segment relocations found but --realmode not specified\n"); /* Order the relocations for more efficient processing */ - sort_relocs(&relocs16); sort_relocs(&relocs32); #if ELF_BITS == 64 sort_relocs(&relocs32neg); sort_relocs(&relocs64); +#else + sort_relocs(&relocs16); #endif /* Print the relocations */ |