diff options
-rw-r--r-- | Documentation/admin-guide/kernel-parameters.txt | 5 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 3 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/syscalls.c | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 15 |
7 files changed, 22 insertions, 12 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index dc663c0ca670..3872bc6ec49d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4822,6 +4822,11 @@ can be preempted anytime. Tasks will also yield contended spinlocks (if the critical section isn't explicitly preempt disabled beyond the lock itself). + lazy - Scheduler controlled. Similar to full but instead + of preempting the task immediately, the task gets + one HZ tick time to yield itself before the + preemption will be forced. One preemption is when the + task returns to user space. print-fatal-signals= [KNL] debug: print fatal signals diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index ac1365afcc4a..e858de203eb6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1248,10 +1248,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, /* Check whether the waiter should back out immediately */ rtm = container_of(lock, struct rt_mutex, rtmutex); - preempt_disable(); res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q); - wake_up_q(wake_q); - preempt_enable(); if (res) { raw_spin_lock(&task->pi_lock); rt_mutex_dequeue(lock, waiter); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 95e40895a519..c6d8232ad9ee 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1283,9 +1283,9 @@ static void nohz_csd_func(void *info) WARN_ON(!(flags & NOHZ_KICK_MASK)); rq->idle_balance = idle_cpu(cpu); - if (rq->idle_balance && !need_resched()) { + if (rq->idle_balance) { rq->nohz_idle_balance = flags; - raise_softirq_irqoff(SCHED_SOFTIRQ); + __raise_softirq_irqoff(SCHED_SOFTIRQ); } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d9d5a702f1a6..db47f33cb7d2 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, * If it is a deferred reservation, and the server * is not handling an starvation case, defer it. */ - if (dl_se->dl_defer & !dl_se->dl_defer_running) { + if (dl_se->dl_defer && !dl_se->dl_defer_running) { dl_se->dl_throttled = 1; dl_se->dl_defer_armed = 1; } @@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) } else if (flags & ENQUEUE_REPLENISH) { replenish_dl_entity(dl_se); } else if ((flags & ENQUEUE_RESTORE) && + !is_dl_boosted(dl_se) && dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { setup_new_dl_entity(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a59ae2e23daf..aa0238ee4857 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12574,7 +12574,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) * work being done for other CPUs. Next load * balancing owner will pick it up. */ - if (need_resched()) { + if (!idle_cpu(this_cpu) && need_resched()) { if (flags & NOHZ_STATS_KICK) has_blocked_load = true; if (flags & NOHZ_NEXT_KICK) diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 0d71fcbaf1e3..ff0e5ab4e37c 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -1200,7 +1200,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) bool empty = !cpumask_and(new_mask, new_mask, ctx->user_mask); - if (WARN_ON_ONCE(empty)) + if (empty) cpumask_copy(new_mask, cpus_allowed); } __set_cpus_allowed_ptr(p, ctx); diff --git a/kernel/softirq.c b/kernel/softirq.c index 8b41bd13cc3d..4dae6ac2e83f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -280,17 +280,24 @@ static inline void invoke_softirq(void) wakeup_softirqd(); } +#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ) + /* * flush_smp_call_function_queue() can raise a soft interrupt in a function - * call. On RT kernels this is undesired and the only known functionality - * in the block layer which does this is disabled on RT. If soft interrupts - * get raised which haven't been raised before the flush, warn so it can be + * call. On RT kernels this is undesired and the only known functionalities + * are in the block layer which is disabled on RT, and in the scheduler for + * idle load balancing. If soft interrupts get raised which haven't been + * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be * investigated. */ void do_softirq_post_smp_call_flush(unsigned int was_pending) { - if (WARN_ON_ONCE(was_pending != local_softirq_pending())) + unsigned int is_pending = local_softirq_pending(); + + if (unlikely(was_pending != is_pending)) { + WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK)); invoke_softirq(); + } } #else /* CONFIG_PREEMPT_RT */ |