diff options
| -rw-r--r-- | block/blk-cgroup.c | 2 | ||||
| -rw-r--r-- | block/blk-core.c | 2 | ||||
| -rw-r--r-- | block/blk-ioc.c | 92 | ||||
| -rw-r--r-- | block/cfq-iosched.c | 2 | ||||
| -rw-r--r-- | fs/ioprio.c | 2 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 3 | ||||
| -rw-r--r-- | include/linux/iocontext.h | 5 | ||||
| -rw-r--r-- | kernel/fork.c | 2 | 
8 files changed, 18 insertions, 92 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fa8f2630944..75642a352a8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,  		ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);  		if (ioc) {  			ioc_cgroup_changed(ioc); -			put_io_context(ioc, NULL); +			put_io_context(ioc);  		}  	}  } diff --git a/block/blk-core.c b/block/blk-core.c index 63670257511..532b3a21b38 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)  	if (rq->cmd_flags & REQ_ELVPRIV) {  		elv_put_request(q, rq);  		if (rq->elv.icq) -			put_io_context(rq->elv.icq->ioc, q); +			put_io_context(rq->elv.icq->ioc);  	}  	mempool_free(rq, q->rq.rq_pool); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 7490b6da245..9884fd7427f 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc)  }  EXPORT_SYMBOL(get_io_context); -/* - * Releasing ioc may nest into another put_io_context() leading to nested - * fast path release.  As the ioc's can't be the same, this is okay but - * makes lockdep whine.  Keep track of nesting and use it as subclass. - */ -#ifdef CONFIG_LOCKDEP -#define ioc_release_depth(q)		((q) ? (q)->ioc_release_depth : 0) -#define ioc_release_depth_inc(q)	(q)->ioc_release_depth++ -#define ioc_release_depth_dec(q)	(q)->ioc_release_depth-- -#else -#define ioc_release_depth(q)		0 -#define ioc_release_depth_inc(q)	do { } while (0) -#define ioc_release_depth_dec(q)	do { } while (0) -#endif -  static void icq_free_icq_rcu(struct rcu_head *head)  {  	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); @@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq)  	if (rcu_dereference_raw(ioc->icq_hint) == icq)  		rcu_assign_pointer(ioc->icq_hint, NULL); -	if (et->ops.elevator_exit_icq_fn) { -		ioc_release_depth_inc(q); +	if (et->ops.elevator_exit_icq_fn)  		et->ops.elevator_exit_icq_fn(icq); -		ioc_release_depth_dec(q); -	}  	/*  	 * @icq->q might have gone away by the time RCU callback runs @@ -149,81 +131,29 @@ static void ioc_release_fn(struct work_struct *work)  /**   * put_io_context - put a reference of io_context   * @ioc: io_context to put - * @locked_q: request_queue the caller is holding queue_lock of (hint)   *   * Decrement reference count of @ioc and release it if the count reaches - * zero.  If the caller is holding queue_lock of a queue, it can indicate - * that with @locked_q.  This is an optimization hint and the caller is - * allowed to pass in %NULL even when it's holding a queue_lock. + * zero.   */ -void put_io_context(struct io_context *ioc, struct request_queue *locked_q) +void put_io_context(struct io_context *ioc)  { -	struct request_queue *last_q = locked_q;  	unsigned long flags;  	if (ioc == NULL)  		return;  	BUG_ON(atomic_long_read(&ioc->refcount) <= 0); -	if (locked_q) -		lockdep_assert_held(locked_q->queue_lock); - -	if (!atomic_long_dec_and_test(&ioc->refcount)) -		return;  	/* -	 * Destroy @ioc.  This is a bit messy because icq's are chained -	 * from both ioc and queue, and ioc->lock nests inside queue_lock. -	 * The inner ioc->lock should be held to walk our icq_list and then -	 * for each icq the outer matching queue_lock should be grabbed. -	 * ie. We need to do reverse-order double lock dancing. -	 * -	 * Another twist is that we are often called with one of the -	 * matching queue_locks held as indicated by @locked_q, which -	 * prevents performing double-lock dance for other queues. -	 * -	 * So, we do it in two stages.  The fast path uses the queue_lock -	 * the caller is holding and, if other queues need to be accessed, -	 * uses trylock to avoid introducing locking dependency.  This can -	 * handle most cases, especially if @ioc was performing IO on only -	 * single device. -	 * -	 * If trylock doesn't cut it, we defer to @ioc->release_work which -	 * can do all the double-locking dancing. +	 * Releasing ioc requires reverse order double locking and we may +	 * already be holding a queue_lock.  Do it asynchronously from wq.  	 */ -	spin_lock_irqsave_nested(&ioc->lock, flags, -				 ioc_release_depth(locked_q)); - -	while (!hlist_empty(&ioc->icq_list)) { -		struct io_cq *icq = hlist_entry(ioc->icq_list.first, -						struct io_cq, ioc_node); -		struct request_queue *this_q = icq->q; - -		if (this_q != last_q) { -			if (last_q && last_q != locked_q) -				spin_unlock(last_q->queue_lock); -			last_q = NULL; - -			/* spin_trylock() always successes in UP case */ -			if (this_q != locked_q && -			    !spin_trylock(this_q->queue_lock)) -				break; -			last_q = this_q; -			continue; -		} -		ioc_exit_icq(icq); +	if (atomic_long_dec_and_test(&ioc->refcount)) { +		spin_lock_irqsave(&ioc->lock, flags); +		if (!hlist_empty(&ioc->icq_list)) +			schedule_work(&ioc->release_work); +		spin_unlock_irqrestore(&ioc->lock, flags);  	} - -	if (last_q && last_q != locked_q) -		spin_unlock(last_q->queue_lock); - -	spin_unlock_irqrestore(&ioc->lock, flags); - -	/* if no icq is left, we're done; otherwise, kick release_work */ -	if (hlist_empty(&ioc->icq_list)) -		kmem_cache_free(iocontext_cachep, ioc); -	else -		schedule_work(&ioc->release_work);  }  EXPORT_SYMBOL(put_io_context); @@ -238,7 +168,7 @@ void exit_io_context(struct task_struct *task)  	task_unlock(task);  	atomic_dec(&ioc->nr_tasks); -	put_io_context(ioc, NULL); +	put_io_context(ioc);  }  /** diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index da21c24dbed..5684df6848b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1794,7 +1794,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,  		cfqd->active_queue = NULL;  	if (cfqd->active_cic) { -		put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); +		put_io_context(cfqd->active_cic->icq.ioc);  		cfqd->active_cic = NULL;  	}  } diff --git a/fs/ioprio.c b/fs/ioprio.c index f84b380d65e..0f1b9515213 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)  	ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);  	if (ioc) {  		ioc_ioprio_changed(ioc, ioprio); -		put_io_context(ioc, NULL); +		put_io_context(ioc);  	}  	return err; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6c6a1f00806..606cf339bb5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -399,9 +399,6 @@ struct request_queue {  	/* Throttle data */  	struct throtl_data *td;  #endif -#ifdef CONFIG_LOCKDEP -	int			ioc_release_depth; -#endif  };  #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 7e1371c4bcc..119773eebe3 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -133,7 +133,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)  struct task_struct;  #ifdef CONFIG_BLOCK -void put_io_context(struct io_context *ioc, struct request_queue *locked_q); +void put_io_context(struct io_context *ioc);  void exit_io_context(struct task_struct *task);  struct io_context *get_task_io_context(struct task_struct *task,  				       gfp_t gfp_flags, int node); @@ -141,8 +141,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio);  void ioc_cgroup_changed(struct io_context *ioc);  #else  struct io_context; -static inline void put_io_context(struct io_context *ioc, -				  struct request_queue *locked_q) { } +static inline void put_io_context(struct io_context *ioc) { }  static inline void exit_io_context(struct task_struct *task) { }  #endif diff --git a/kernel/fork.c b/kernel/fork.c index 051f090d40c..c574aefa8d1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -890,7 +890,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)  			return -ENOMEM;  		new_ioc->ioprio = ioc->ioprio; -		put_io_context(new_ioc, NULL); +		put_io_context(new_ioc);  	}  #endif  	return 0;  |