From edcd591c77a48da753456f92daf8bb50fe9bac93 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 7 Sep 2015 13:18:03 -0600 Subject: locking/static_keys: Fix a silly typo Commit: 412758cb2670 ("jump label, locking/static_keys: Update docs") introduced a typo that might as well get fixed. Signed-off-by: Jonathan Corbet Cc: Andrew Morton Cc: Jason Baron Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150907131803.54c027e1@lwn.net Signed-off-by: Ingo Molnar --- include/linux/jump_label.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7f653e8f6690..0684bd3a48fc 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -22,7 +22,7 @@ * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); * static_key_likely() - * statick_key_unlikely() + * static_key_unlikely() * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support, if we -- cgit v1.2.3-70-g09d2 From 43b3f02899f74ae9914a39547cc5492156f0027a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 4 Sep 2015 17:25:23 +0200 Subject: locking/qspinlock/x86: Fix performance regression under unaccelerated VMs Dave ran into horrible performance on a VM without PARAVIRT_SPINLOCKS set and Linus noted that the test-and-set implementation was retarded. One should spin on the variable with a load, not a RMW. While there, remove 'queued' from the name, as the lock isn't queued at all, but a simple test-and-set. Suggested-by: Linus Torvalds Reported-by: Dave Chinner Tested-by: Dave Chinner Signed-off-by: Peter Zijlstra (Intel) Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: stable@vger.kernel.org # v4.2+ Link: http://lkml.kernel.org/r/20150904152523.GR18673@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/include/asm/qspinlock.h | 16 ++++++++++++---- include/asm-generic/qspinlock.h | 4 ++-- kernel/locking/qspinlock.c | 2 +- 3 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 9d51fae1cba3..8dde3bdc4a05 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -39,15 +39,23 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #endif -#define virt_queued_spin_lock virt_queued_spin_lock +#define virt_spin_lock virt_spin_lock -static inline bool virt_queued_spin_lock(struct qspinlock *lock) +static inline bool virt_spin_lock(struct qspinlock *lock) { if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) - cpu_relax(); + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + + do { + while (atomic_read(&lock->val) != 0) + cpu_relax(); + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); return true; } diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 83bfb87f5bf1..e2aadbc7151f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) cpu_relax(); } -#ifndef virt_queued_spin_lock -static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) +#ifndef virt_spin_lock +static __always_inline bool virt_spin_lock(struct qspinlock *lock) { return false; } diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 337c8818541d..87e9ce6a63c5 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (pv_enabled()) goto queue; - if (virt_queued_spin_lock(lock)) + if (virt_spin_lock(lock)) return; /* -- cgit v1.2.3-70-g09d2 From 1975dbc276c6ab62230cf4f9df5ddc9ff0e0e473 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 14 Sep 2015 17:11:05 -0600 Subject: locking/static_keys: Fix up the static keys documentation Fix a few small mistakes in the static key documentation and delete an unneeded sentence. Suggested-by: Jason Baron Signed-off-by: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150914171105.511e1e21@lwn.net Signed-off-by: Ingo Molnar --- Documentation/static-keys.txt | 4 ++-- include/linux/jump_label.h | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index ec911583f6c5..477927becacb 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -15,8 +15,8 @@ The updated API replacements are: DEFINE_STATIC_KEY_TRUE(key); DEFINE_STATIC_KEY_FALSE(key); -static_key_likely() -static_key_unlikely() +static_branch_likely() +static_branch_unlikely() 0) Abstract diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0684bd3a48fc..f1094238ab2a 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -21,8 +21,8 @@ * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); - * static_key_likely() - * static_key_unlikely() + * static_branch_likely() + * static_branch_unlikely() * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support, if we @@ -45,12 +45,10 @@ * statement, setting the key to true requires us to patch in a jump * to the out-of-line of true branch. * - * In addtion to static_branch_{enable,disable}, we can also reference count + * In addition to static_branch_{enable,disable}, we can also reference count * the key or branch direction via static_branch_{inc,dec}. Thus, * static_branch_inc() can be thought of as a 'make more true' and - * static_branch_dec() as a 'make more false'. The inc()/dec() - * interface is meant to be used exclusively from the inc()/dec() for a given - * key. + * static_branch_dec() as a 'make more false'. * * Since this relies on modifying code, the branch modifying functions * must be considered absolute slow paths (machine wide synchronization etc.). -- cgit v1.2.3-70-g09d2