diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
-rw-r--r-- | kernel/sched/fair.c | 14 | ||||
-rw-r--r-- | kernel/sched/syscalls.c | 2 |
4 files changed, 15 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 95e40895a519..c6d8232ad9ee 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1283,9 +1283,9 @@ static void nohz_csd_func(void *info) WARN_ON(!(flags & NOHZ_KICK_MASK)); rq->idle_balance = idle_cpu(cpu); - if (rq->idle_balance && !need_resched()) { + if (rq->idle_balance) { rq->nohz_idle_balance = flags; - raise_softirq_irqoff(SCHED_SOFTIRQ); + __raise_softirq_irqoff(SCHED_SOFTIRQ); } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d9d5a702f1a6..db47f33cb7d2 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, * If it is a deferred reservation, and the server * is not handling an starvation case, defer it. */ - if (dl_se->dl_defer & !dl_se->dl_defer_running) { + if (dl_se->dl_defer && !dl_se->dl_defer_running) { dl_se->dl_throttled = 1; dl_se->dl_defer_armed = 1; } @@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) } else if (flags & ENQUEUE_REPLENISH) { replenish_dl_entity(dl_se); } else if ((flags & ENQUEUE_RESTORE) && + !is_dl_boosted(dl_se) && dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { setup_new_dl_entity(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fbdca89c677f..aa0238ee4857 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3399,10 +3399,16 @@ retry_pids: /* Initialise new per-VMA NUMAB state. */ if (!vma->numab_state) { - vma->numab_state = kzalloc(sizeof(struct vma_numab_state), - GFP_KERNEL); - if (!vma->numab_state) + struct vma_numab_state *ptr; + + ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); + if (!ptr) + continue; + + if (cmpxchg(&vma->numab_state, NULL, ptr)) { + kfree(ptr); continue; + } vma->numab_state->start_scan_seq = mm->numa_scan_seq; @@ -12568,7 +12574,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) * work being done for other CPUs. Next load * balancing owner will pick it up. */ - if (need_resched()) { + if (!idle_cpu(this_cpu) && need_resched()) { if (flags & NOHZ_STATS_KICK) has_blocked_load = true; if (flags & NOHZ_NEXT_KICK) diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 0d71fcbaf1e3..ff0e5ab4e37c 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -1200,7 +1200,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) bool empty = !cpumask_and(new_mask, new_mask, ctx->user_mask); - if (WARN_ON_ONCE(empty)) + if (empty) cpumask_copy(new_mask, cpus_allowed); } __set_cpus_allowed_ptr(p, ctx); |