From 443378f0664a78756c3e3aeaab92750fe1e05735 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 30 Nov 2021 17:00:30 -0800 Subject: workqueue: Upgrade queue_work_on() comment The current queue_work_on() docbook comment says that the caller must ensure that the specified CPU can't go away, but does not spell out the consequences, which turn out to be quite mild. Therefore expand this comment to explicitly say that the penalty for failing to nail down the specified CPU is that the workqueue handler might find itself executing on some other CPU. Cc: Tejun Heo Cc: Lai Jiangshan Signed-off-by: Paul E. McKenney Signed-off-by: Tejun Heo --- kernel/workqueue.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 613917bbc4e7..332361cf215f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1531,7 +1531,8 @@ out: * @work: work to queue * * We queue the work to a specific CPU, the caller must ensure it - * can't go away. + * can't go away. Callers that fail to ensure that the specified + * CPU cannot go away will execute on a randomly chosen CPU. * * Return: %false if @work was already on a queue, %true otherwise. */ -- cgit v1.2.3-70-g09d2 From 07edfece8bcb0580a1828d939e6f8d91a8603eb2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 1 Dec 2021 16:19:44 +0100 Subject: workqueue: Fix unbind_workers() VS wq_worker_running() race At CPU-hotplug time, unbind_worker() may preempt a worker while it is waking up. In that case the following scenario can happen: unbind_workers() wq_worker_running() -------------- ------------------- if (!(worker->flags & WORKER_NOT_RUNNING)) //PREEMPTED by unbind_workers worker->flags |= WORKER_UNBOUND; [...] atomic_set(&pool->nr_running, 0); //resume to worker atomic_inc(&worker->pool->nr_running); After unbind_worker() resets pool->nr_running, the value is expected to remain 0 until the pool ever gets rebound in case cpu_up() is called on the target CPU in the future. But here the race leaves pool->nr_running with a value of 1, triggering the following warning when the worker goes idle: WARNING: CPU: 3 PID: 34 at kernel/workqueue.c:1823 worker_enter_idle+0x95/0xc0 Modules linked in: CPU: 3 PID: 34 Comm: kworker/3:0 Not tainted 5.16.0-rc1+ #34 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014 Workqueue: 0x0 (rcu_par_gp) RIP: 0010:worker_enter_idle+0x95/0xc0 Code: 04 85 f8 ff ff ff 39 c1 7f 09 48 8b 43 50 48 85 c0 74 1b 83 e2 04 75 99 8b 43 34 39 43 30 75 91 8b 83 00 03 00 00 85 c0 74 87 <0f> 0b 5b c3 48 8b 35 70 f1 37 01 48 8d 7b 48 48 81 c6 e0 93 0 RSP: 0000:ffff9b7680277ed0 EFLAGS: 00010086 RAX: 00000000ffffffff RBX: ffff93465eae9c00 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff9346418a0000 RDI: ffff934641057140 RBP: ffff934641057170 R08: 0000000000000001 R09: ffff9346418a0080 R10: ffff9b768027fdf0 R11: 0000000000002400 R12: ffff93465eae9c20 R13: ffff93465eae9c20 R14: ffff93465eae9c70 R15: ffff934641057140 FS: 0000000000000000(0000) GS:ffff93465eac0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000001cc0c000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: worker_thread+0x89/0x3d0 ? process_one_work+0x400/0x400 kthread+0x162/0x190 ? set_kthread_struct+0x40/0x40 ret_from_fork+0x22/0x30 Also due to this incorrect "nr_running == 1", further queued work may end up not being served, because no worker is awaken at work insert time. This raises rcutorture writer stalls for example. Fix this with disabling preemption in the right place in wq_worker_running(). It's worth noting that if the worker migrates and runs concurrently with unbind_workers(), it is guaranteed to see the WORKER_UNBOUND flag update due to set_cpus_allowed_ptr() acquiring/releasing rq->lock. Fixes: 6d25be5782e4 ("sched/core, workqueues: Distangle worker accounting from rq lock") Reviewed-by: Lai Jiangshan Tested-by: Paul E. McKenney Acked-by: Peter Zijlstra (Intel) Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Sebastian Andrzej Siewior Cc: Daniel Bristot de Oliveira Signed-off-by: Tejun Heo --- kernel/workqueue.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 332361cf215f..5094573e8b45 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -868,8 +868,17 @@ void wq_worker_running(struct task_struct *task) if (!worker->sleeping) return; + + /* + * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check + * and the nr_running increment below, we may ruin the nr_running reset + * and leave with an unexpected pool->nr_running == 1 on the newly unbound + * pool. Protect against such race. + */ + preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); + preempt_enable(); worker->sleeping = 0; } -- cgit v1.2.3-70-g09d2 From 45c753f5f24d2d4717acb38ce35e604ff9abcb50 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 1 Dec 2021 16:19:45 +0100 Subject: workqueue: Fix unbind_workers() VS wq_worker_sleeping() race At CPU-hotplug time, unbind_workers() may preempt a worker while it is going to sleep. In that case the following scenario can happen: unbind_workers() wq_worker_sleeping() -------------- ------------------- if (worker->flags & WORKER_NOT_RUNNING) return; //PREEMPTED by unbind_workers worker->flags |= WORKER_UNBOUND; [...] atomic_set(&pool->nr_running, 0); //resume to worker atomic_dec_and_test(&pool->nr_running); After unbind_worker() resets pool->nr_running, the value is expected to remain 0 until the pool ever gets rebound in case cpu_up() is called on the target CPU in the future. But here the race leaves pool->nr_running with a value of -1, triggering the following warning when the worker goes idle: WARNING: CPU: 3 PID: 34 at kernel/workqueue.c:1823 worker_enter_idle+0x95/0xc0 Modules linked in: CPU: 3 PID: 34 Comm: kworker/3:0 Not tainted 5.16.0-rc1+ #34 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014 Workqueue: 0x0 (rcu_par_gp) RIP: 0010:worker_enter_idle+0x95/0xc0 Code: 04 85 f8 ff ff ff 39 c1 7f 09 48 8b 43 50 48 85 c0 74 1b 83 e2 04 75 99 8b 43 34 39 43 30 75 91 8b 83 00 03 00 00 85 c0 74 87 <0f> 0b 5b c3 48 8b 35 70 f1 37 01 48 8d 7b 48 48 81 c6 e0 93 0 RSP: 0000:ffff9b7680277ed0 EFLAGS: 00010086 RAX: 00000000ffffffff RBX: ffff93465eae9c00 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff9346418a0000 RDI: ffff934641057140 RBP: ffff934641057170 R08: 0000000000000001 R09: ffff9346418a0080 R10: ffff9b768027fdf0 R11: 0000000000002400 R12: ffff93465eae9c20 R13: ffff93465eae9c20 R14: ffff93465eae9c70 R15: ffff934641057140 FS: 0000000000000000(0000) GS:ffff93465eac0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000001cc0c000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: worker_thread+0x89/0x3d0 ? process_one_work+0x400/0x400 kthread+0x162/0x190 ? set_kthread_struct+0x40/0x40 ret_from_fork+0x22/0x30 Also due to this incorrect "nr_running == -1", all sorts of hazards can happen, starting with queued works being ignored because no workers are awaken at insert_work() time. Fix this with checking again the worker flags while pool->lock is locked. Fixes: b945efcdd07d ("sched: Remove pointless preemption disable in sched_submit_work()") Reviewed-by: Lai Jiangshan Tested-by: Paul E. McKenney Acked-by: Peter Zijlstra (Intel) Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Paul E. McKenney Cc: Sebastian Andrzej Siewior Cc: Daniel Bristot de Oliveira Signed-off-by: Tejun Heo --- kernel/workqueue.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5094573e8b45..5557d19ea81c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -912,6 +912,16 @@ void wq_worker_sleeping(struct task_struct *task) worker->sleeping = 1; raw_spin_lock_irq(&pool->lock); + /* + * Recheck in case unbind_workers() preempted us. We don't + * want to decrement nr_running after the worker is unbound + * and nr_running has been reset. + */ + if (worker->flags & WORKER_NOT_RUNNING) { + raw_spin_unlock_irq(&pool->lock); + return; + } + /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). -- cgit v1.2.3-70-g09d2 From ccf45156fd167a234baf038c11c1f367c7ccabd4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:37 +0800 Subject: workqueue: Remove the outdated comment before wq_worker_sleeping() It isn't called with preempt disabled now. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 613917bbc4e7..2964dbb783fe 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -878,8 +878,7 @@ void wq_worker_running(struct task_struct *task) * @task: task going to sleep * * This function is called from schedule() when a busy worker is - * going to sleep. Preemption needs to be disabled to protect ->sleeping - * assignment. + * going to sleep. */ void wq_worker_sleeping(struct task_struct *task) { -- cgit v1.2.3-70-g09d2 From 3e5f39ea33b1189ccaa4ae2a9de2bce07753d2e0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:38 +0800 Subject: workqueue: Remove the advanced kicking of the idle workers in rebind_workers() The commit 6d25be5782e4 ("sched/core, workqueues: Distangle worker accounting from rq lock") changed the schedule callbacks for workqueue and removed the local-wake-up functionality. Now the wakingup of workers is done by normal fashion and workers not yet migrated to the specific CPU in concurrency managed pool can also be woken up by workers that already bound to the specific cpu now. So this advanced kicking of the idle workers to migrate them to the associated CPU is unneeded now. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2964dbb783fe..f7f4a5fc7736 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5057,17 +5057,6 @@ static void rebind_workers(struct worker_pool *pool) for_each_pool_worker(worker, pool) { unsigned int worker_flags = worker->flags; - /* - * A bound idle worker should actually be on the runqueue - * of the associated CPU for local wake-ups targeting it to - * work. Kick all idle workers so that they migrate to the - * associated CPU. Doing this in the same loop as - * replacing UNBOUND with REBOUND is safe as no worker will - * be bound before @pool->lock is released. - */ - if (worker_flags & WORKER_IDLE) - wake_up_process(worker->task); - /* * We want to clear UNBOUND but can't directly call * worker_clr_flags() or adjust nr_running. Atomically -- cgit v1.2.3-70-g09d2 From 11b45b0bf402b53c94c86737a440363fc36f03cd Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:39 +0800 Subject: workqueue: Remove outdated comment about exceptional workers in unbind_workers() Long time before, workers are not ALL bound after CPU_ONLINE, they can still be running in other CPUs before self rebinding. But the commit a9ab775bcadf ("workqueue: directly restore CPU affinity of workers from CPU_ONLINE") makes rebind_workers() bind them all. So all workers are on the CPU before the CPU is down. And the comment in unbind_workers() refers to the workers "which are still executing works from before the last CPU down" is outdated. Just removed it. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7f4a5fc7736..ae58c6ace23f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4978,9 +4978,7 @@ static void unbind_workers(int cpu) /* * We've blocked all attach/detach operations. Make all workers * unbound and set DISASSOCIATED. Before this, all workers - * except for the ones which are still executing works from - * before the last CPU down must be on the cpu. After - * this, they may become diasporas. + * must be on the cpu. After this, they may become diasporas. */ for_each_pool_worker(worker, pool) worker->flags |= WORKER_UNBOUND; -- cgit v1.2.3-70-g09d2 From b4ac9384ac057c5bf035fbe82fc162fa2f7b15a9 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:40 +0800 Subject: workqueue: Remove schedule() in unbind_workers() The commit 6d25be5782e4 ("sched/core, workqueues: Distangle worker accounting from rq lock") changed the schedule callbacks for workqueue and moved the schedule callback from the wakeup code to at end of schedule() in the worker's process context. It means that the callback wq_worker_running() is guaranteed that it sees the %WORKER_UNBOUND flag after scheduled since unbind_workers() is running on the same CPU that all the pool's workers bound to. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ae58c6ace23f..499a264183ef 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4979,6 +4979,9 @@ static void unbind_workers(int cpu) * We've blocked all attach/detach operations. Make all workers * unbound and set DISASSOCIATED. Before this, all workers * must be on the cpu. After this, they may become diasporas. + * And the preemption disabled section in their sched callbacks + * are guaranteed to see WORKER_UNBOUND since the code here + * is on the same cpu. */ for_each_pool_worker(worker, pool) worker->flags |= WORKER_UNBOUND; @@ -4994,14 +4997,6 @@ static void unbind_workers(int cpu) mutex_unlock(&wq_pool_attach_mutex); - /* - * Call schedule() so that we cross rq->lock and thus can - * guarantee sched callbacks see the %WORKER_UNBOUND flag. - * This is necessary as scheduler callbacks may be invoked - * from other cpus. - */ - schedule(); - /* * Sched callbacks are disabled now. Zap nr_running. * After this, nr_running stays zero and need_more_worker() -- cgit v1.2.3-70-g09d2 From 989442d73757868118a73b92732b549a73c9ce35 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:41 +0800 Subject: workqueue: Move the code of waking a worker up in unbind_workers() In unbind_workers(), there are two pool->lock held sections separated by the code of zapping nr_running. wake_up_worker() needs to be in pool->lock held section and after zapping nr_running. And zapping nr_running had to be after schedule() when the local wake up functionality was in use. Now, the call to schedule() has been removed along with the local wake up functionality, so the code can be merged into the same pool->lock held section. The diffstat shows that it is other code moved down because the diff tools can not know the meaning of merging lock sections by swapping two code blocks. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 499a264183ef..403387e9a924 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1810,14 +1810,8 @@ static void worker_enter_idle(struct worker *worker) if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); - /* - * Sanity check nr_running. Because unbind_workers() releases - * pool->lock between setting %WORKER_UNBOUND and zapping - * nr_running, the warning may trigger spuriously. Check iff - * unbind is not in progress. - */ - WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && - pool->nr_workers == pool->nr_idle && + /* Sanity check nr_running. */ + WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && atomic_read(&pool->nr_running)); } @@ -4988,21 +4982,12 @@ static void unbind_workers(int cpu) pool->flags |= POOL_DISASSOCIATED; - raw_spin_unlock_irq(&pool->lock); - - for_each_pool_worker(worker, pool) { - kthread_set_per_cpu(worker->task, -1); - WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); - } - - mutex_unlock(&wq_pool_attach_mutex); - /* - * Sched callbacks are disabled now. Zap nr_running. - * After this, nr_running stays zero and need_more_worker() - * and keep_working() are always true as long as the - * worklist is not empty. This pool now behaves as an - * unbound (in terms of concurrency management) pool which + * The handling of nr_running in sched callbacks are disabled + * now. Zap nr_running. After this, nr_running stays zero and + * need_more_worker() and keep_working() are always true as + * long as the worklist is not empty. This pool now behaves as + * an unbound (in terms of concurrency management) pool which * are served by workers tied to the pool. */ atomic_set(&pool->nr_running, 0); @@ -5012,9 +4997,16 @@ static void unbind_workers(int cpu) * worker blocking could lead to lengthy stalls. Kick off * unbound chain execution of currently pending work items. */ - raw_spin_lock_irq(&pool->lock); wake_up_worker(pool); + raw_spin_unlock_irq(&pool->lock); + + for_each_pool_worker(worker, pool) { + kthread_set_per_cpu(worker->task, -1); + WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); + } + + mutex_unlock(&wq_pool_attach_mutex); } } -- cgit v1.2.3-70-g09d2 From 84f91c62d675480ffd3d870ee44c07965cbd8b21 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 7 Dec 2021 15:35:42 +0800 Subject: workqueue: Remove the cacheline_aligned for nr_running nr_running is never modified remotely after the schedule callback in wakeup path is removed. Rather nr_running is often accessed with other fields in the pool together, so the cacheline_aligned for nr_running isn't needed. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 403387e9a924..b583141c5481 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -154,6 +154,9 @@ struct worker_pool { unsigned long watchdog_ts; /* L: watchdog timestamp */ + /* The current concurrency level. */ + atomic_t nr_running; + struct list_head worklist; /* L: list of pending works */ int nr_workers; /* L: total number of workers */ @@ -177,19 +180,12 @@ struct worker_pool { struct hlist_node hash_node; /* PL: unbound_pool_hash node */ int refcnt; /* PL: refcnt for unbound pools */ - /* - * The current concurrency level. As it's likely to be accessed - * from other CPUs during try_to_wake_up(), put it in a separate - * cacheline. - */ - atomic_t nr_running ____cacheline_aligned_in_smp; - /* * Destruction of pool is RCU protected to allow dereferences * from get_work_pool(). */ struct rcu_head rcu; -} ____cacheline_aligned_in_smp; +}; /* * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS -- cgit v1.2.3-70-g09d2