diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 51 | 
1 files changed, 29 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811e..b48cd597145 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)  	int ret;  	mutex_lock(&worker_pool_idr_mutex); -	idr_pre_get(&worker_pool_idr, GFP_KERNEL); -	ret = idr_get_new(&worker_pool_idr, pool, &pool->id); +	ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); +	if (ret >= 0) +		pool->id = ret;  	mutex_unlock(&worker_pool_idr_mutex); -	return ret; +	return ret < 0 ? ret : 0;  }  /* @@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)  		spin_unlock_irq(&pool->lock);  		mutex_unlock(&pool->assoc_mutex); -	} -	/* -	 * Call schedule() so that we cross rq->lock and thus can guarantee -	 * sched callbacks see the %WORKER_UNBOUND flag.  This is necessary -	 * as scheduler callbacks may be invoked from other cpus. -	 */ -	schedule(); +		/* +		 * Call schedule() so that we cross rq->lock and thus can +		 * guarantee sched callbacks see the %WORKER_UNBOUND flag. +		 * This is necessary as scheduler callbacks may be invoked +		 * from other cpus. +		 */ +		schedule(); -	/* -	 * Sched callbacks are disabled now.  Zap nr_running.  After this, -	 * nr_running stays zero and need_more_worker() and keep_working() -	 * are always true as long as the worklist is not empty.  Pools on -	 * @cpu now behave as unbound (in terms of concurrency management) -	 * pools which are served by workers tied to the CPU. -	 * -	 * On return from this function, the current worker would trigger -	 * unbound chain execution of pending work items if other workers -	 * didn't already. -	 */ -	for_each_std_worker_pool(pool, cpu) +		/* +		 * Sched callbacks are disabled now.  Zap nr_running. +		 * After this, nr_running stays zero and need_more_worker() +		 * and keep_working() are always true as long as the +		 * worklist is not empty.  This pool now behaves as an +		 * unbound (in terms of concurrency management) pool which +		 * are served by workers tied to the pool. +		 */  		atomic_set(&pool->nr_running, 0); + +		/* +		 * With concurrency management just turned off, a busy +		 * worker blocking could lead to lengthy stalls.  Kick off +		 * unbound chain execution of currently pending work items. +		 */ +		spin_lock_irq(&pool->lock); +		wake_up_worker(pool); +		spin_unlock_irq(&pool->lock); +	}  }  /*  |