summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2012-11-09 14:58:01 -0800
committerTony Lindgren <tony@atomide.com>2012-11-09 14:58:01 -0800
commitedf8dde393f879fc2d8c22d4bc01ff8d37b80e1a (patch)
tree61425adc8b2059a9c12a3ce66ba7361fea1b0bb3 /mm/rmap.c
parent6ba54ab4a49bbad736b0254aa6bdf0cb83013815 (diff)
parent3d70f8c617a436c7146ecb81df2265b4626dfe89 (diff)
downloadolio-linux-3.10-edf8dde393f879fc2d8c22d4bc01ff8d37b80e1a.tar.xz
olio-linux-3.10-edf8dde393f879fc2d8c22d4bc01ff8d37b80e1a.zip
Merge branch 'linus' into omap-for-v3.8/cleanup-headers-prepare-multiplatform-v3
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 7df7984d476..2ee1ef0f317 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
+#include <linux/backing-dev.h>
#include <asm/tlbflush.h>
@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping) {
+ if (mapping)
ret = page_mkclean_file(mapping, page);
- if (page_test_and_clear_dirty(page_to_pfn(page), 1))
- ret = 1;
- }
}
return ret;
@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
*/
void page_remove_rmap(struct page *page)
{
+ struct address_space *mapping = page_mapping(page);
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
* this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
+ *
+ * And we can skip it on file pages, so long as the filesystem
+ * participates in dirty tracking; but need to catch shm and tmpfs
+ * and ramfs pages which have been modified since creation by read
+ * fault.
+ *
+ * Note that mapping must be decided above, before decrementing
+ * mapcount (which luckily provides a barrier): once page is unmapped,
+ * it could be truncated and page->mapping reset to NULL at any moment.
+ * Note also that we are relying on page_mapping(page) to set mapping
+ * to &swapper_space when PageSwapCache(page).
*/
- if ((!anon || PageSwapCache(page)) &&
+ if (mapping && !mapping_cap_account_dirty(mapping) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*