diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-02-24 11:36:09 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-02-24 11:36:09 +0100 |
commit | 546121b65f47384e11ec1fa2e55449fc9f4846b2 (patch) | |
tree | 8f18470ec7c0c77b0f48eb1b2338e591b0b0aaff /kernel/sched/fair.c | |
parent | 000619680c3714020ce9db17eef6a4a7ce2dc28b (diff) | |
parent | f8788d86ab28f61f7b46eb6be375f8a726783636 (diff) |
Merge tag 'v5.6-rc3' into sched/core, to pick up fixes and dependent patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a7e11b1bb64c..f38ff5a335d3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3516,7 +3516,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) * attach_entity_load_avg - attach this entity to its cfs_rq load avg * @cfs_rq: cfs_rq to attach to * @se: sched_entity to attach - * @flags: migration hints * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. @@ -5970,6 +5969,20 @@ symmetric: (available_idle_cpu(prev) || sched_idle_cpu(prev))) return prev; + /* + * Allow a per-cpu kthread to stack with the wakee if the + * kworker thread and the tasks previous CPUs are the same. + * The assumption is that the wakee queued work for the + * per-cpu kthread that is now complete and the wakeup is + * essentially a sync wakeup. An obvious example of this + * pattern is IO completions. + */ + if (is_per_cpu_kthread(current) && + prev == smp_processor_id() && + this_rq()->nr_running <= 1) { + return prev; + } + /* Check a recently used CPU as a potential idle candidate: */ recent_used_cpu = p->recent_used_cpu; if (recent_used_cpu != prev && |