diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-11 16:33:00 -0600 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-11-11 16:33:00 -0600 |
commit | 024d4d4c0cf486dee5240183125edbddc6cf2d55 (patch) | |
tree | 59bdfb85bb91e7dc97580d0e3bd1f24fdf6e4d73 | |
parent | 1acf93ca6c53144f5ffd408f1820ef5431656eb7 (diff) | |
parent | e1ff516a56ad56c476b47795d3811eef79d25fbe (diff) |
Merge branch 'sched/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner:
"Two small scheduler fixes:
- Take hotplug lock in sched_init_smp(). Technically not really
required, but lockdep will complain other.
- Trivial comment fix in sched/fair"
* 'sched/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Fix a comment in task_numa_fault()
sched/core: Take the hotplug lock in sched_init_smp()
-rw-r--r-- | kernel/sched/core.c | 5 | ||||
-rw-r--r-- | kernel/sched/fair.c | 4 |
2 files changed, 6 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f12225f26b70..091e089063be 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5851,11 +5851,14 @@ void __init sched_init_smp(void) /* * There's no userspace yet to cause hotplug operations; hence all the * CPU masks are stable and all blatant races in the below code cannot - * happen. + * happen. The hotplug lock is nevertheless taken to satisfy lockdep, + * but there won't be any contention on it. */ + cpus_read_lock(); mutex_lock(&sched_domains_mutex); sched_init_domains(cpu_active_mask); mutex_unlock(&sched_domains_mutex); + cpus_read_unlock(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee271bb661cc..3648d0300fdf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) local = 1; /* - * Retry task to preferred node migration periodically, in case it - * case it previously failed, or the scheduler moved us. + * Retry to migrate task to preferred node periodically, in case it + * previously failed, or the scheduler moved us. */ if (time_after(jiffies, p->numa_migrate_retry)) { task_numa_placement(p); |