diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 22 | 
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9880b6c0e272..894bb885b40b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1849,18 +1849,17 @@ static void worker_attach_to_pool(struct worker *worker,  	mutex_lock(&wq_pool_attach_mutex);  	/* -	 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any -	 * online CPUs.  It'll be re-applied when any of the CPUs come up. -	 */ -	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); - -	/*  	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains  	 * stable across this function.  See the comments above the flag  	 * definition for details.  	 */  	if (pool->flags & POOL_DISASSOCIATED)  		worker->flags |= WORKER_UNBOUND; +	else +		kthread_set_per_cpu(worker->task, pool->cpu); + +	if (worker->rescue_wq) +		set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);  	list_add_tail(&worker->node, &pool->workers);  	worker->pool = pool; @@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)  	mutex_lock(&wq_pool_attach_mutex); +	kthread_set_per_cpu(worker->task, -1);  	list_del(&worker->node);  	worker->pool = NULL; @@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)  		raw_spin_unlock_irq(&pool->lock); -		for_each_pool_worker(worker, pool) -			WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0); +		for_each_pool_worker(worker, pool) { +			kthread_set_per_cpu(worker->task, -1); +			WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); +		}  		mutex_unlock(&wq_pool_attach_mutex); @@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)  	 * of all workers first and then clear UNBOUND.  As we're called  	 * from CPU_ONLINE, the following shouldn't fail.  	 */ -	for_each_pool_worker(worker, pool) +	for_each_pool_worker(worker, pool) { +		kthread_set_per_cpu(worker->task, pool->cpu);  		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,  						  pool->attrs->cpumask) < 0); +	}  	raw_spin_lock_irq(&pool->lock);  | 
