diff options
| author | Tony Lindgren <tony@atomide.com> | 2011-03-10 18:54:14 -0800 | 
|---|---|---|
| committer | Tony Lindgren <tony@atomide.com> | 2011-03-10 18:54:14 -0800 | 
| commit | 94a06b74e724caabcf0464c81527cfbcae0c8aff (patch) | |
| tree | 3570b6a627382a5eb5c8328b4959f615544d8e62 /block/blk-core.c | |
| parent | 0dde52a9f5330eec240660191a94b51bd911ffcd (diff) | |
| parent | 9062511097683b4422f023d181b4a8b2db1a7a72 (diff) | |
| download | olio-linux-3.10-94a06b74e724caabcf0464c81527cfbcae0c8aff.tar.xz olio-linux-3.10-94a06b74e724caabcf0464c81527cfbcae0c8aff.zip  | |
Merge branch 'for_2.6.39/pm-misc' of ssh://master.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into omap-for-linus
Diffstat (limited to 'block/blk-core.c')
| -rw-r--r-- | block/blk-core.c | 18 | 
1 files changed, 6 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 2f4002f79a2..518dd423a5f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)  	WARN_ON(!irqs_disabled());  	queue_flag_clear(QUEUE_FLAG_STOPPED, q); -	__blk_run_queue(q); +	__blk_run_queue(q, false);  }  EXPORT_SYMBOL(blk_start_queue); @@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);  /**   * __blk_run_queue - run a single device queue   * @q:	The queue to run + * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.   *   * Description:   *    See @blk_run_queue. This variant must be called with the queue lock   *    held and interrupts disabled.   *   */ -void __blk_run_queue(struct request_queue *q) +void __blk_run_queue(struct request_queue *q, bool force_kblockd)  {  	blk_remove_plug(q); @@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)  	 * Only recurse once to avoid overrunning the stack, let the unplug  	 * handling reinvoke the handler shortly if we already got there.  	 */ -	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { +	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {  		q->request_fn(q);  		queue_flag_clear(QUEUE_FLAG_REENTER, q);  	} else { @@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)  	unsigned long flags;  	spin_lock_irqsave(q->queue_lock, flags); -	__blk_run_queue(q); +	__blk_run_queue(q, false);  	spin_unlock_irqrestore(q->queue_lock, flags);  }  EXPORT_SYMBOL(blk_run_queue); @@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,  	drive_stat_acct(rq, 1);  	__elv_add_request(q, rq, where, 0); -	__blk_run_queue(q); +	__blk_run_queue(q, false);  	spin_unlock_irqrestore(q->queue_lock, flags);  }  EXPORT_SYMBOL(blk_insert_request); @@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)  }  EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, -			struct delayed_work *dwork, unsigned long delay) -{ -	return queue_delayed_work(kblockd_workqueue, dwork, delay); -} -EXPORT_SYMBOL(kblockd_schedule_delayed_work); -  int __init blk_dev_init(void)  {  	BUILD_BUG_ON(__REQ_NR_BITS > 8 *  |