summaryrefslogtreecommitdiff
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-02-18 10:12:12 -0500
committerMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-02-18 10:12:12 -0500
commit5f09bc8cc4010a3ea17c5881a62fc45192ebe7b0 (patch)
treec8e30ba4eaf357699ef97a786bf56661f4591be8 /kernel/locking/rtmutex.c
parent6fd369dd1cb65a032f1ab9227033ecb7b759656d (diff)
parenta3b22b9f11d9fbc48b0291ea92259a5a810e9438 (diff)
Merge tag 'v5.0-rc7' into patchwork
Linux 5.0-rc7 * tag 'v5.0-rc7': (1667 commits) Linux 5.0-rc7 Input: elan_i2c - add ACPI ID for touchpad in Lenovo V330-15ISK Input: st-keyscan - fix potential zalloc NULL dereference Input: apanel - switch to using brightness_set_blocking() powerpc/64s: Fix possible corruption on big endian due to pgd/pud_present() efi/arm: Revert "Defer persistent reservations until after paging_init()" arm64, mm, efi: Account for GICv3 LPI tables in static memblock reserve table sunrpc: fix 4 more call sites that were using stack memory with a scatterlist include/linux/module.h: copy __init/__exit attrs to init/cleanup_module Compiler Attributes: add support for __copy (gcc >= 9) lib/crc32.c: mark crc32_le_base/__crc32c_le_base aliases as __pure auxdisplay: ht16k33: fix potential user-after-free on module unload x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls i2c: bcm2835: Clear current buffer pointers and counts after a transfer i2c: cadence: Fix the hold bit setting drm: Use array_size() when creating lease dm thin: fix bug where bio that overwrites thin block ignores FUA Revert "exec: load_script: don't blindly truncate shebang string" Revert "gfs2: read journal in large chunks to locate the head" net: ethernet: freescale: set FEC ethtool regs version ... Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c37
1 files changed, 32 insertions, 5 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 581edcc63c26..978d63a8261c 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
rt_mutex_set_owner(lock, NULL);
}
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @task: the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ * 0 - task blocked on lock
+ * 1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;
+ lockdep_assert_held(&lock->wait_lock);
+
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-
debug_rt_mutex_print_deadlock(waiter);
return ret;
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
- * Special API call for FUTEX_REQUEUE_PI support.
+ * Special API call for PI-futex support.
*/
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
raw_spin_lock_irq(&lock->wait_lock);
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
* @lock: the rt_mutex we were woken on
* @waiter: the pre-initialized rt_mutex_waiter
*
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
*
* Unless we acquired the lock; we're still enqueued on the wait-list and can
* in fact still be granted ownership until we're removed. Therefore we can