summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 12:43:11 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-19 12:43:11 -0800
commit364eeb79a213fcf9164208b53764223ad522d6b3 (patch)
tree63ea70096151ca2a217edf29ba6fe65e6be4be1b /arch/x86
parentd8d78a90e7fca1ce7c90fa791400b287bc5b42a1 (diff)
parent3b49a347d751553b1d1be69c8619ae2e85fdc28d (diff)
Merge tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Lockdep: - Enable PROVE_RAW_LOCK_NESTING with PROVE_LOCKING (Sebastian Andrzej Siewior) - Add lockdep_cleanup_dead_cpu() (David Woodhouse) futexes: - Use atomic64_inc_return() in get_inode_sequence_number() (Uros Bizjak) - Use atomic64_try_cmpxchg_relaxed() in get_inode_sequence_number() (Uros Bizjak) RT locking: - Add sparse annotation PREEMPT_RT's locking (Sebastian Andrzej Siewior) spinlocks: - Use atomic_try_cmpxchg_release() in osq_unlock() (Uros Bizjak) atomics: - x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() (Uros Bizjak) - x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() (Uros Bizjak) KCSAN, seqlocks: - Support seqcount_latch_t (Marco Elver) <linux/cleanup.h>: - Add if_not_guard() conditional guard helper (David Lechner) - Adjust scoped_guard() macros to avoid potential warning (Przemek Kitszel) - Remove address space of returned pointer (Uros Bizjak) WW mutexes: - locking/ww_mutex: Adjust to lockdep nest_lock requirements (Thomas Hellström) Rust integration: - Fix raw_spin_lock initialization on PREEMPT_RT (Eder Zulian) Misc cleanups & fixes: - lockdep: Fix wait-type check related warnings (Ahmed Ehab) - lockdep: Use info level for initial info messages (Jiri Slaby) - spinlocks: Make __raw_* lock ops static (Geert Uytterhoeven) - pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase (Qiuxu Zhuo) - iio: magnetometer: Fix if () scoped_guard() formatting (Stephen Rothwell) - rtmutex: Fix misleading comment (Peter Zijlstra) - percpu-rw-semaphores: Fix grammar in percpu-rw-semaphore.rst (Xiu Jianfeng)" * tag 'locking-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits) locking/Documentation: Fix grammar in percpu-rw-semaphore.rst iio: magnetometer: fix if () scoped_guard() formatting rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT kcsan, seqlock: Fix incorrect assumption in read_seqbegin() seqlock, treewide: Switch to non-raw seqcount_latch interface kcsan, seqlock: Support seqcount_latch_t time/sched_clock: Broaden sched_clock()'s instrumentation coverage time/sched_clock: Swap update_clock_read_data() latch writes locking/atomic/x86: Use ALT_OUTPUT_SP() for __arch_{,try_}cmpxchg64_emu() locking/atomic/x86: Use ALT_OUTPUT_SP() for __alternative_atomic64() cleanup: Add conditional guard helper cleanup: Adjust scoped_guard() macros to avoid potential warning locking/osq_lock: Use atomic_try_cmpxchg_release() in osq_unlock() cleanup: Remove address space of returned pointer locking/rtmutex: Fix misleading comment locking/rt: Annotate unlock followed by lock for sparse. locking/rt: Add sparse annotation for RCU. locking/rt: Remove one __cond_lock() in RT's spin_trylock_irqsave() locking/rt: Add sparse annotation PREEMPT_RT's sleeping locks. locking/pvqspinlock: Convert fields of 'enum vcpu_state' to uppercase ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/atomic64_32.h3
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h6
-rw-r--r--arch/x86/kernel/tsc.c5
3 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 1f650b4dde50..6c6e9b9f98a4 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -51,7 +51,8 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %c[func]" \
- : out : [func] "i" (atomic64_##g##_cx8), ## in)
+ : ALT_OUTPUT_SP(out) \
+ : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 62cef2113ca7..fd1282a783dd 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -94,7 +94,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
asm volatile(ALTERNATIVE(_lock_loc \
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
- : "+a" (o.low), "+d" (o.high) \
+ : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
@@ -123,8 +123,8 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
"call cmpxchg8b_emu", \
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
CC_SET(e) \
- : CC_OUT(e) (ret), \
- "+a" (o.low), "+d" (o.high) \
+ : ALT_OUTPUT_SP(CC_OUT(e) (ret), \
+ "+a" (o.low), "+d" (o.high)) \
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
: "memory"); \
\
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index dfe6847fd99e..67aeaba4ba9c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -174,10 +174,11 @@ static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long ts
c2n = per_cpu_ptr(&cyc2ns, cpu);
- raw_write_seqcount_latch(&c2n->seq);
+ write_seqcount_latch_begin(&c2n->seq);
c2n->data[0] = data;
- raw_write_seqcount_latch(&c2n->seq);
+ write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
+ write_seqcount_latch_end(&c2n->seq);
}
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)