diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-12-08 11:38:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-12-08 11:38:56 -0800 |
commit | 84262262177b98cf4e57e8c010119576d3c6bc2b (patch) | |
tree | 871311b28a1fae129dff754020864cecb1bcd1f3 /arch/x86/kernel/cpu/cacheinfo.c | |
parent | 553c89ec31746ff96fc5562943fe5b1c9b1e9276 (diff) | |
parent | 492077668fb453b8b16c842fcf3fafc2ebc190e9 (diff) |
Merge tag 'x86_urgent_for_v6.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov:
- Have the Automatic IBRS setting check on AMD does not falsely fire in
the guest when it has been set already on the host
- Make sure cacheinfo structures memory is allocated to address a boot
NULL ptr dereference on Intel Meteor Lake which has different numbers
of subleafs in its CPUID(4) leaf
- Take care of the GDT restoring on the kexec path too, as expected by
the kernel
- Make sure SMP is not disabled when IO-APIC is disabled on the kernel
cmdline
- Add a PGD flag _PAGE_NOPTISHADOW to instruct machinery not to
propagate changes to the kernelmode page tables, to the user portion,
in PTI
- Mark Intel Lunar Lake as affected by an issue where MONITOR wakeups
can get lost and thus user-visible delays happen
- Make sure PKRU is properly restored with XRSTOR on AMD after a PRKU
write of 0 (WRPKRU) which will mark PKRU in its init state and thus
lose the actual buffer
* tag 'x86_urgent_for_v6.13_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/CPU/AMD: WARN when setting EFER.AUTOIBRS if and only if the WRMSR fails
x86/cacheinfo: Delete global num_cache_leaves
cacheinfo: Allocate memory during CPU hotplug if not done from the primary CPU
x86/kexec: Restore GDT on return from ::preserve_context kexec
x86/cpu/topology: Remove limit of CPUs due to disabled IO/APIC
x86/mm: Add _PAGE_NOPTISHADOW bit to avoid updating userspace page tables
x86/cpu: Add Lunar Lake to list of CPUs with a broken MONITOR implementation
x86/pkeys: Ensure updated PKRU value is XRSTOR'd
x86/pkeys: Change caller of update_pkru_in_sigframe()
Diffstat (limited to 'arch/x86/kernel/cpu/cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/cacheinfo.c | 43 |
1 files changed, 21 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 392d09c936d6..e6fa03ed9172 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -178,8 +178,6 @@ struct _cpuid4_info_regs { struct amd_northbridge *nb; }; -static unsigned short num_cache_leaves; - /* AMD doesn't have CPUID4. Emulate it here to report the same information to the user. This makes some assumptions about the machine: L2 not shared, no SMT etc. that is currently true on AMD CPUs. @@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c) void init_amd_cacheinfo(struct cpuinfo_x86 *c) { + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { - num_cache_leaves = find_num_cache_leaves(c); + ci->num_leaves = find_num_cache_leaves(c); } else if (c->extended_cpuid_level >= 0x80000006) { if (cpuid_edx(0x80000006) & 0xf000) - num_cache_leaves = 4; + ci->num_leaves = 4; else - num_cache_leaves = 3; + ci->num_leaves = 3; } } void init_hygon_cacheinfo(struct cpuinfo_x86 *c) { - num_cache_leaves = find_num_cache_leaves(c); + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); + + ci->num_leaves = find_num_cache_leaves(c); } void init_intel_cacheinfo(struct cpuinfo_x86 *c) @@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index); if (c->cpuid_level > 3) { - static int is_initialized; - - if (is_initialized == 0) { - /* Init num_cache_leaves from boot CPU */ - num_cache_leaves = find_num_cache_leaves(c); - is_initialized++; - } + /* + * There should be at least one leaf. A non-zero value means + * that the number of leaves has been initialized. + */ + if (!ci->num_leaves) + ci->num_leaves = find_num_cache_leaves(c); /* * Whenever possible use cpuid(4), deterministic cache * parameters cpuid leaf to find the cache details */ - for (i = 0; i < num_cache_leaves; i++) { + for (i = 0; i < ci->num_leaves; i++) { struct _cpuid4_info_regs this_leaf = {}; int retval; @@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for * trace cache */ - if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { + if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int j, n; unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; - if (num_cache_leaves != 0 && c->x86 == 15) + if (ci->num_leaves && c->x86 == 15) only_trace = 1; /* Number of times to iterate */ @@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); - if (!num_cache_leaves) + /* There should be at least one leaf. */ + if (!ci->num_leaves) return -ENOENT; - if (!this_cpu_ci) - return -EINVAL; - this_cpu_ci->num_levels = 3; - this_cpu_ci->num_leaves = num_cache_leaves; + return 0; } |