summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-09-28 07:47:07 -0600
committerJens Axboe <axboe@kernel.dk>2023-09-28 07:47:07 -0600
commit52e856c38761bae0cea09b25cfbb4d46cd930d45 (patch)
treee94c5b41e4ea4c8f95b993c6d875e2e7db78bbf8 /kernel/sched/core.c
parent73c7e7a91f041f4c2e3c0db1e727163b331c60c9 (diff)
parentcfa92b6d52071aaa8f27d21affdcb14e7448fbc1 (diff)
Merge branch 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into io_uring-futex
Pull in locking/core from the tip tree, to get the futex2 dependencies from Peter Zijlstra. * 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/ww_mutex/test: Make sure we bail out instead of livelock locking/ww_mutex/test: Fix potential workqueue corruption locking/ww_mutex/test: Use prng instead of rng to avoid hangs at bootup futex: Add sys_futex_requeue() futex: Add flags2 argument to futex_requeue() futex: Propagate flags into get_futex_key() futex: Add sys_futex_wait() futex: FLAGS_STRICT futex: Add sys_futex_wake() futex: Validate futex value against futex size futex: Flag conversion futex: Extend the FUTEX2 flags futex: Clarify FUTEX2 flags asm-generic: ticket-lock: Optimize arch_spin_value_unlocked() futex/pi: Fix recursive rt_mutex waiter state locking/rtmutex: Add a lockdep assert to catch potential nested blocking locking/rtmutex: Use rt_mutex specific scheduler helpers sched: Provide rt_mutex specific scheduler helpers sched: Extract __schedule_loop() locking/rtmutex: Avoid unconditional slowpath for DEBUG_RT_MUTEXES ...
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c62
1 files changed, 50 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 802551e0009b..9b075b541865 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6720,10 +6720,14 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
unsigned int task_flags;
- if (task_is_running(tsk))
- return;
+ /*
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
+ * will use a blocking primitive -- which would lead to recursion.
+ */
+ lock_map_acquire_try(&sched_map);
task_flags = tsk->flags;
/*
@@ -6749,6 +6753,8 @@ static inline void sched_submit_work(struct task_struct *tsk)
* make sure to submit it to avoid deadlocks.
*/
blk_flush_plug(tsk->plug, true);
+
+ lock_map_release(&sched_map);
}
static void sched_update_worker(struct task_struct *tsk)
@@ -6761,16 +6767,26 @@ static void sched_update_worker(struct task_struct *tsk)
}
}
-asmlinkage __visible void __sched schedule(void)
+static __always_inline void __schedule_loop(unsigned int sched_mode)
{
- struct task_struct *tsk = current;
-
- sched_submit_work(tsk);
do {
preempt_disable();
- __schedule(SM_NONE);
+ __schedule(sched_mode);
sched_preempt_enable_no_resched();
} while (need_resched());
+}
+
+asmlinkage __visible void __sched schedule(void)
+{
+ struct task_struct *tsk = current;
+
+#ifdef CONFIG_RT_MUTEXES
+ lockdep_assert(!tsk->sched_rt_mutex);
+#endif
+
+ if (!task_is_running(tsk))
+ sched_submit_work(tsk);
+ __schedule_loop(SM_NONE);
sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
@@ -6834,11 +6850,7 @@ void __sched schedule_preempt_disabled(void)
#ifdef CONFIG_PREEMPT_RT
void __sched notrace schedule_rtlock(void)
{
- do {
- preempt_disable();
- __schedule(SM_RTLOCK_WAIT);
- sched_preempt_enable_no_resched();
- } while (need_resched());
+ __schedule_loop(SM_RTLOCK_WAIT);
}
NOKPROBE_SYMBOL(schedule_rtlock);
#endif
@@ -7034,6 +7046,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
#ifdef CONFIG_RT_MUTEXES
+/*
+ * Would be more useful with typeof()/auto_type but they don't mix with
+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
+ * name such that if someone were to implement this function we get to compare
+ * notes.
+ */
+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
+
+void rt_mutex_pre_schedule(void)
+{
+ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
+ sched_submit_work(current);
+}
+
+void rt_mutex_schedule(void)
+{
+ lockdep_assert(current->sched_rt_mutex);
+ __schedule_loop(SM_NONE);
+}
+
+void rt_mutex_post_schedule(void)
+{
+ sched_update_worker(current);
+ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
+}
+
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
{
if (pi_task)