diff options
| author | Tejun Heo <tj@kernel.org> | 2013-03-12 11:30:04 -0700 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-03-12 11:30:04 -0700 | 
| commit | c9178087acd71b4ea010ea48e147cf66952d2da9 (patch) | |
| tree | 0b226a810036ee110d0f894c821df50df64db29b | |
| parent | 75ccf5950f828d53aebfd3a852283a00abf2c5bf (diff) | |
| download | olio-linux-3.10-c9178087acd71b4ea010ea48e147cf66952d2da9.tar.xz olio-linux-3.10-c9178087acd71b4ea010ea48e147cf66952d2da9.zip  | |
workqueue: perform non-reentrancy test when queueing to unbound workqueues too
Because per-cpu workqueues have multiple pwqs (pool_workqueues) to
serve the CPUs, to guarantee that a single work item isn't queued on
one pwq while still executing another, __queue_work() takes a look at
the previous pool the target work item was on and if it's still
executing there, queue the work item on that pool.
To support changing workqueue_attrs on the fly, unbound workqueues too
will have multiple pwqs and thus need non-reentrancy test when
queueing.  This patch modifies __queue_work() such that the reentrancy
test is performed regardless of the workqueue type.
per_cpu_ptr(wq->cpu_pwqs, cpu) used to be used to determine the
matching pwq for the last pool.  This can't be used for unbound
workqueues and is replaced with worker->current_pwq which also happens
to be simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
| -rw-r--r-- | kernel/workqueue.c | 42 | 
1 files changed, 19 insertions, 23 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e933979678e..16fb6747276 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,  			 struct work_struct *work)  {  	struct pool_workqueue *pwq; +	struct worker_pool *last_pool;  	struct list_head *worklist;  	unsigned int work_flags;  	unsigned int req_cpu = cpu; @@ -1228,41 +1229,36 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,  	    WARN_ON_ONCE(!is_chained_work(wq)))  		return; -	/* determine the pwq to use */ +	/* pwq which will be used unless @work is executing elsewhere */  	if (!(wq->flags & WQ_UNBOUND)) { -		struct worker_pool *last_pool; -  		if (cpu == WORK_CPU_UNBOUND)  			cpu = raw_smp_processor_id(); - -		/* -		 * It's multi cpu.  If @work was previously on a different -		 * cpu, it might still be running there, in which case the -		 * work needs to be queued on that cpu to guarantee -		 * non-reentrancy. -		 */  		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); -		last_pool = get_work_pool(work); +	} else { +		pwq = first_pwq(wq); +	} -		if (last_pool && last_pool != pwq->pool) { -			struct worker *worker; +	/* +	 * If @work was previously on a different pool, it might still be +	 * running there, in which case the work needs to be queued on that +	 * pool to guarantee non-reentrancy. +	 */ +	last_pool = get_work_pool(work); +	if (last_pool && last_pool != pwq->pool) { +		struct worker *worker; -			spin_lock(&last_pool->lock); +		spin_lock(&last_pool->lock); -			worker = find_worker_executing_work(last_pool, work); +		worker = find_worker_executing_work(last_pool, work); -			if (worker && worker->current_pwq->wq == wq) { -				pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu); -			} else { -				/* meh... not running there, queue here */ -				spin_unlock(&last_pool->lock); -				spin_lock(&pwq->pool->lock); -			} +		if (worker && worker->current_pwq->wq == wq) { +			pwq = worker->current_pwq;  		} else { +			/* meh... not running there, queue here */ +			spin_unlock(&last_pool->lock);  			spin_lock(&pwq->pool->lock);  		}  	} else { -		pwq = first_pwq(wq);  		spin_lock(&pwq->pool->lock);  	}  |