summaryrefslogtreecommitdiff
path: root/kernel/locking/rwbase_rt.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-09-28 07:47:07 -0600
committerJens Axboe <axboe@kernel.dk>2023-09-28 07:47:07 -0600
commit52e856c38761bae0cea09b25cfbb4d46cd930d45 (patch)
treee94c5b41e4ea4c8f95b993c6d875e2e7db78bbf8 /kernel/locking/rwbase_rt.c
parent73c7e7a91f041f4c2e3c0db1e727163b331c60c9 (diff)
parentcfa92b6d52071aaa8f27d21affdcb14e7448fbc1 (diff)
Merge branch 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into io_uring-futex
Pull in locking/core from the tip tree, to get the futex2 dependencies from Peter Zijlstra. * 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/ww_mutex/test: Make sure we bail out instead of livelock locking/ww_mutex/test: Fix potential workqueue corruption locking/ww_mutex/test: Use prng instead of rng to avoid hangs at bootup futex: Add sys_futex_requeue() futex: Add flags2 argument to futex_requeue() futex: Propagate flags into get_futex_key() futex: Add sys_futex_wait() futex: FLAGS_STRICT futex: Add sys_futex_wake() futex: Validate futex value against futex size futex: Flag conversion futex: Extend the FUTEX2 flags futex: Clarify FUTEX2 flags asm-generic: ticket-lock: Optimize arch_spin_value_unlocked() futex/pi: Fix recursive rt_mutex waiter state locking/rtmutex: Add a lockdep assert to catch potential nested blocking locking/rtmutex: Use rt_mutex specific scheduler helpers sched: Provide rt_mutex specific scheduler helpers sched: Extract __schedule_loop() locking/rtmutex: Avoid unconditional slowpath for DEBUG_RT_MUTEXES ...
Diffstat (limited to 'kernel/locking/rwbase_rt.c')
-rw-r--r--kernel/locking/rwbase_rt.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
index 25ec0239477c..34a59569db6b 100644
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
struct rt_mutex_base *rtm = &rwb->rtmutex;
int ret;
+ rwbase_pre_schedule();
raw_spin_lock_irq(&rtm->wait_lock);
/*
@@ -125,12 +126,15 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
rwbase_rtmutex_unlock(rtm);
trace_contention_end(rwb, ret);
+ rwbase_post_schedule();
return ret;
}
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
unsigned int state)
{
+ lockdep_assert(!current->pi_blocked_on);
+
if (rwbase_read_trylock(rwb))
return 0;
@@ -237,6 +241,8 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
/* Force readers into slow path */
atomic_sub(READER_BIAS, &rwb->readers);
+ rwbase_pre_schedule();
+
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (__rwbase_write_trylock(rwb))
goto out_unlock;
@@ -248,6 +254,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
if (rwbase_signal_pending_state(state, current)) {
rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
+ rwbase_post_schedule();
trace_contention_end(rwb, -EINTR);
return -EINTR;
}
@@ -266,6 +273,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+ rwbase_post_schedule();
return 0;
}