diff options
Diffstat (limited to 'net/sunrpc/sched.c')
| -rw-r--r-- | net/sunrpc/sched.c | 33 | 
1 files changed, 20 insertions, 13 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 385f427beda..ff50a054686 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task)  	rpc_clear_queued(task);  	if (rpc_test_and_set_running(task))  		return; -	/* We might have raced */ -	if (RPC_IS_QUEUED(task)) { -		rpc_clear_running(task); -		return; -	}  	if (RPC_IS_ASYNC(task)) {  		int status; @@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)   */  static void __rpc_execute(struct rpc_task *task)  { -	int		status = 0; +	struct rpc_wait_queue *queue; +	int task_is_async = RPC_IS_ASYNC(task); +	int status = 0;  	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",  			task->tk_pid, task->tk_flags); @@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task)  		 */  		if (!RPC_IS_QUEUED(task))  			continue; -		rpc_clear_running(task); -		if (RPC_IS_ASYNC(task)) { -			/* Careful! we may have raced... */ -			if (RPC_IS_QUEUED(task)) -				return; -			if (rpc_test_and_set_running(task)) -				return; +		/* +		 * The queue->lock protects against races with +		 * rpc_make_runnable(). +		 * +		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous +		 * rpc_task, rpc_make_runnable() can assign it to a +		 * different workqueue. We therefore cannot assume that the +		 * rpc_task pointer may still be dereferenced. +		 */ +		queue = task->tk_waitqueue; +		spin_lock_bh(&queue->lock); +		if (!RPC_IS_QUEUED(task)) { +			spin_unlock_bh(&queue->lock);  			continue;  		} +		rpc_clear_running(task); +		spin_unlock_bh(&queue->lock); +		if (task_is_async) +			return;  		/* sync task: sleep here */  		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);  |