summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-12-09 10:28:55 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-12-09 10:28:55 -0800
commitdf9e2102de839c587eb50b2a5d1d19b07a6caaf5 (patch)
tree0e2d8aedc4dc6c9b432a45d14aad33aadb8ad99b /kernel
parentaeb68937614f4aeceaaa762bd7f0212ce842b797 (diff)
parent82f9cc094975240885c93effbca7f4603f5de1bf (diff)
Merge tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Remove wrong enqueueing of a task for a later wakeup when a task blocks on a RT mutex - Do not setup a new deadline entity on a boosted task as that has happened already - Update preempt= kernel command line param - Prevent needless softirqd wakeups in the idle task's context - Detect the case where the idle load balancer CPU becomes busy and avoid unnecessary load balancing invocation - Remove an unnecessary load balancing need_resched() call in nohz_csd_func() - Allow for raising of SCHED_SOFTIRQ softirq type on RT but retain the warning to catch any other cases - Remove a wrong warning when a cpuset update makes the task affinity no longer a subset of the cpuset * tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking: rtmutex: Fix wake_q logic in task_blocks_on_rt_mutex sched/deadline: Fix warning in migrate_enable for boosted tasks sched/core: Update kernel boot parameters for LAZY preempt. sched/core: Prevent wakeup of ksoftirqd during idle load balance sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy sched/core: Remove the unnecessary need_resched() check in nohz_csd_func() softirq: Allow raising SCHED_SOFTIRQ from SMP-call-function on RT kernel sched: fix warning in sched_setaffinity sched/deadline: Fix replenish_dl_new_period dl_server condition
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex.c3
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/syscalls.c2
-rw-r--r--kernel/softirq.c15
6 files changed, 17 insertions, 12 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index ac1365afcc4a..e858de203eb6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1248,10 +1248,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
- preempt_disable();
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
- wake_up_q(wake_q);
- preempt_enable();
if (res) {
raw_spin_lock(&task->pi_lock);
rt_mutex_dequeue(lock, waiter);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 95e40895a519..c6d8232ad9ee 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1283,9 +1283,9 @@ static void nohz_csd_func(void *info)
WARN_ON(!(flags & NOHZ_KICK_MASK));
rq->idle_balance = idle_cpu(cpu);
- if (rq->idle_balance && !need_resched()) {
+ if (rq->idle_balance) {
rq->nohz_idle_balance = flags;
- raise_softirq_irqoff(SCHED_SOFTIRQ);
+ __raise_softirq_irqoff(SCHED_SOFTIRQ);
}
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d9d5a702f1a6..db47f33cb7d2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
* If it is a deferred reservation, and the server
* is not handling an starvation case, defer it.
*/
- if (dl_se->dl_defer & !dl_se->dl_defer_running) {
+ if (dl_se->dl_defer && !dl_se->dl_defer_running) {
dl_se->dl_throttled = 1;
dl_se->dl_defer_armed = 1;
}
@@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
+ !is_dl_boosted(dl_se) &&
dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
setup_new_dl_entity(dl_se);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a59ae2e23daf..aa0238ee4857 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12574,7 +12574,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
* work being done for other CPUs. Next load
* balancing owner will pick it up.
*/
- if (need_resched()) {
+ if (!idle_cpu(this_cpu) && need_resched()) {
if (flags & NOHZ_STATS_KICK)
has_blocked_load = true;
if (flags & NOHZ_NEXT_KICK)
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 0d71fcbaf1e3..ff0e5ab4e37c 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -1200,7 +1200,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
bool empty = !cpumask_and(new_mask, new_mask,
ctx->user_mask);
- if (WARN_ON_ONCE(empty))
+ if (empty)
cpumask_copy(new_mask, cpus_allowed);
}
__set_cpus_allowed_ptr(p, ctx);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8b41bd13cc3d..4dae6ac2e83f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -280,17 +280,24 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
+#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
+
/*
* flush_smp_call_function_queue() can raise a soft interrupt in a function
- * call. On RT kernels this is undesired and the only known functionality
- * in the block layer which does this is disabled on RT. If soft interrupts
- * get raised which haven't been raised before the flush, warn so it can be
+ * call. On RT kernels this is undesired and the only known functionalities
+ * are in the block layer which is disabled on RT, and in the scheduler for
+ * idle load balancing. If soft interrupts get raised which haven't been
+ * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
* investigated.
*/
void do_softirq_post_smp_call_flush(unsigned int was_pending)
{
- if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
+ unsigned int is_pending = local_softirq_pending();
+
+ if (unlikely(was_pending != is_pending)) {
+ WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
invoke_softirq();
+ }
}
#else /* CONFIG_PREEMPT_RT */