diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 54 | 
1 files changed, 24 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ddee541ea97a..56180c9286f5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -841,43 +841,32 @@ static void wake_up_worker(struct worker_pool *pool)  }  /** - * wq_worker_waking_up - a worker is waking up + * wq_worker_running - a worker is running again   * @task: task waking up - * @cpu: CPU @task is waking up to   * - * This function is called during try_to_wake_up() when a worker is - * being awoken. - * - * CONTEXT: - * spin_lock_irq(rq->lock) + * This function is called when a worker returns from schedule()   */ -void wq_worker_waking_up(struct task_struct *task, int cpu) +void wq_worker_running(struct task_struct *task)  {  	struct worker *worker = kthread_data(task); -	if (!(worker->flags & WORKER_NOT_RUNNING)) { -		WARN_ON_ONCE(worker->pool->cpu != cpu); +	if (!worker->sleeping) +		return; +	if (!(worker->flags & WORKER_NOT_RUNNING))  		atomic_inc(&worker->pool->nr_running); -	} +	worker->sleeping = 0;  }  /**   * wq_worker_sleeping - a worker is going to sleep   * @task: task going to sleep   * - * This function is called during schedule() when a busy worker is - * going to sleep.  Worker on the same cpu can be woken up by - * returning pointer to its task. - * - * CONTEXT: - * spin_lock_irq(rq->lock) - * - * Return: - * Worker task on @cpu to wake up, %NULL if none. + * This function is called from schedule() when a busy worker is + * going to sleep.   */ -struct task_struct *wq_worker_sleeping(struct task_struct *task) +void wq_worker_sleeping(struct task_struct *task)  { -	struct worker *worker = kthread_data(task), *to_wakeup = NULL; +	struct worker *next, *worker = kthread_data(task);  	struct worker_pool *pool;  	/* @@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)  	 * checking NOT_RUNNING.  	 */  	if (worker->flags & WORKER_NOT_RUNNING) -		return NULL; +		return;  	pool = worker->pool; -	/* this can only happen on the local cpu */ -	if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) -		return NULL; +	if (WARN_ON_ONCE(worker->sleeping)) +		return; + +	worker->sleeping = 1; +	spin_lock_irq(&pool->lock);  	/*  	 * The counterpart of the following dec_and_test, implied mb, @@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)  	 * lock is safe.  	 */  	if (atomic_dec_and_test(&pool->nr_running) && -	    !list_empty(&pool->worklist)) -		to_wakeup = first_idle_worker(pool); -	return to_wakeup ? to_wakeup->task : NULL; +	    !list_empty(&pool->worklist)) { +		next = first_idle_worker(pool); +		if (next) +			wake_up_process(next->task); +	} +	spin_unlock_irq(&pool->lock);  }  /** @@ -4929,7 +4923,7 @@ static void rebind_workers(struct worker_pool *pool)  		 *  		 * WRITE_ONCE() is necessary because @worker->flags may be  		 * tested without holding any lock in -		 * wq_worker_waking_up().  Without it, NOT_RUNNING test may +		 * wq_worker_running().  Without it, NOT_RUNNING test may  		 * fail incorrectly leading to premature concurrency  		 * management operations.  		 */  | 
