diff options
Diffstat (limited to 'mm/filemap.c')
| -rw-r--r-- | mm/filemap.c | 67 | 
1 files changed, 6 insertions, 61 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 83a45d35468..380776c2a9a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -155,45 +155,15 @@ void remove_from_page_cache(struct page *page)  }  EXPORT_SYMBOL(remove_from_page_cache); -static int sync_page(void *word) +static int sleep_on_page(void *word)  { -	struct address_space *mapping; -	struct page *page; - -	page = container_of((unsigned long *)word, struct page, flags); - -	/* -	 * page_mapping() is being called without PG_locked held. -	 * Some knowledge of the state and use of the page is used to -	 * reduce the requirements down to a memory barrier. -	 * The danger here is of a stale page_mapping() return value -	 * indicating a struct address_space different from the one it's -	 * associated with when it is associated with one. -	 * After smp_mb(), it's either the correct page_mapping() for -	 * the page, or an old page_mapping() and the page's own -	 * page_mapping() has gone NULL. -	 * The ->sync_page() address_space operation must tolerate -	 * page_mapping() going NULL. By an amazing coincidence, -	 * this comes about because none of the users of the page -	 * in the ->sync_page() methods make essential use of the -	 * page_mapping(), merely passing the page down to the backing -	 * device's unplug functions when it's non-NULL, which in turn -	 * ignore it for all cases but swap, where only page_private(page) is -	 * of interest. When page_mapping() does go NULL, the entire -	 * call stack gracefully ignores the page and returns. -	 * -- wli -	 */ -	smp_mb(); -	mapping = page_mapping(page); -	if (mapping && mapping->a_ops && mapping->a_ops->sync_page) -		mapping->a_ops->sync_page(page);  	io_schedule();  	return 0;  } -static int sync_page_killable(void *word) +static int sleep_on_page_killable(void *word)  { -	sync_page(word); +	sleep_on_page(word);  	return fatal_signal_pending(current) ? -EINTR : 0;  } @@ -479,12 +449,6 @@ struct page *__page_cache_alloc(gfp_t gfp)  EXPORT_SYMBOL(__page_cache_alloc);  #endif -static int __sleep_on_page_lock(void *word) -{ -	io_schedule(); -	return 0; -} -  /*   * In order to wait for pages to become available there must be   * waitqueues associated with pages. By using a hash table of @@ -512,7 +476,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)  	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);  	if (test_bit(bit_nr, &page->flags)) -		__wait_on_bit(page_waitqueue(page), &wait, sync_page, +		__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,  							TASK_UNINTERRUPTIBLE);  }  EXPORT_SYMBOL(wait_on_page_bit); @@ -576,17 +540,12 @@ EXPORT_SYMBOL(end_page_writeback);  /**   * __lock_page - get a lock on the page, assuming we need to sleep to get it   * @page: the page to lock - * - * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some - * random driver's requestfn sets TASK_RUNNING, we could busywait.  However - * chances are that on the second loop, the block layer's plug list is empty, - * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.   */  void __lock_page(struct page *page)  {  	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); -	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, +	__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,  							TASK_UNINTERRUPTIBLE);  }  EXPORT_SYMBOL(__lock_page); @@ -596,24 +555,10 @@ int __lock_page_killable(struct page *page)  	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);  	return __wait_on_bit_lock(page_waitqueue(page), &wait, -					sync_page_killable, TASK_KILLABLE); +					sleep_on_page_killable, TASK_KILLABLE);  }  EXPORT_SYMBOL_GPL(__lock_page_killable); -/** - * __lock_page_nosync - get a lock on the page, without calling sync_page() - * @page: the page to lock - * - * Variant of lock_page that does not require the caller to hold a reference - * on the page's mapping. - */ -void __lock_page_nosync(struct page *page) -{ -	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); -	__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, -							TASK_UNINTERRUPTIBLE); -} -  int __lock_page_or_retry(struct page *page, struct mm_struct *mm,  			 unsigned int flags)  {  |