diff options
| author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2007-10-16 01:26:11 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:02 -0700 | 
| commit | a5d76b54a3f3a40385d7f76069a2feac9f1bad63 (patch) | |
| tree | f58c432a4224b3be032bd4a4afa79dfa55d198a6 /mm/page_isolation.c | |
| parent | 75884fb1c6388f3713ddcca662f3647b3129aaeb (diff) | |
| download | olio-linux-3.10-a5d76b54a3f3a40385d7f76069a2feac9f1bad63.tar.xz olio-linux-3.10-a5d76b54a3f3a40385d7f76069a2feac9f1bad63.zip  | |
memory unplug: page isolation
Implement generic chunk-of-pages isolation method by using page grouping ops.
This patch add MIGRATE_ISOLATE to MIGRATE_TYPES. By this
 - MIGRATE_TYPES increases.
 - bitmap for migratetype is enlarged.
pages of MIGRATE_ISOLATE migratetype will not be allocated even if it is free.
By this, you can isolated *freed* pages from users. How-to-free pages is not
a purpose of this patch. You may use reclaim and migrate codes to free pages.
If start_isolate_page_range(start,end) is called,
 - migratetype of the range turns to be MIGRATE_ISOLATE  if
   its type is MIGRATE_MOVABLE. (*) this check can be updated if other
   memory reclaiming works make progress.
 - MIGRATE_ISOLATE is not on migratetype fallback list.
 - All free pages and will-be-freed pages are isolated.
To check all pages in the range are isolated or not,  use test_pages_isolated(),
To cancel isolation, use undo_isolate_page_range().
Changes V6 -> V7
 - removed unnecessary #ifdef
There are HOLES_IN_ZONE handling codes...I'm glad if we can remove them..
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
| -rw-r--r-- | mm/page_isolation.c | 138 | 
1 files changed, 138 insertions, 0 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c new file mode 100644 index 00000000000..8f92a29695c --- /dev/null +++ b/mm/page_isolation.c @@ -0,0 +1,138 @@ +/* + * linux/mm/page_isolation.c + */ + +#include <stddef.h> +#include <linux/mm.h> +#include <linux/page-isolation.h> +#include <linux/pageblock-flags.h> +#include "internal.h" + +static inline struct page * +__first_valid_page(unsigned long pfn, unsigned long nr_pages) +{ +	int i; +	for (i = 0; i < nr_pages; i++) +		if (pfn_valid_within(pfn + i)) +			break; +	if (unlikely(i == nr_pages)) +		return NULL; +	return pfn_to_page(pfn + i); +} + +/* + * start_isolate_page_range() -- make page-allocation-type of range of pages + * to be MIGRATE_ISOLATE. + * @start_pfn: The lower PFN of the range to be isolated. + * @end_pfn: The upper PFN of the range to be isolated. + * + * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in + * the range will never be allocated. Any free pages and pages freed in the + * future will not be allocated again. + * + * start_pfn/end_pfn must be aligned to pageblock_order. + * Returns 0 on success and -EBUSY if any part of range cannot be isolated. + */ +int +start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) +{ +	unsigned long pfn; +	unsigned long undo_pfn; +	struct page *page; + +	BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); +	BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); + +	for (pfn = start_pfn; +	     pfn < end_pfn; +	     pfn += pageblock_nr_pages) { +		page = __first_valid_page(pfn, pageblock_nr_pages); +		if (page && set_migratetype_isolate(page)) { +			undo_pfn = pfn; +			goto undo; +		} +	} +	return 0; +undo: +	for (pfn = start_pfn; +	     pfn <= undo_pfn; +	     pfn += pageblock_nr_pages) +		unset_migratetype_isolate(pfn_to_page(pfn)); + +	return -EBUSY; +} + +/* + * Make isolated pages available again. + */ +int +undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) +{ +	unsigned long pfn; +	struct page *page; +	BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); +	BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); +	for (pfn = start_pfn; +	     pfn < end_pfn; +	     pfn += pageblock_nr_pages) { +		page = __first_valid_page(pfn, pageblock_nr_pages); +		if (!page || get_pageblock_flags(page) != MIGRATE_ISOLATE) +			continue; +		unset_migratetype_isolate(page); +	} +	return 0; +} +/* + * Test all pages in the range is free(means isolated) or not. + * all pages in [start_pfn...end_pfn) must be in the same zone. + * zone->lock must be held before call this. + * + * Returns 0 if all pages in the range is isolated. + */ +static int +__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) +{ +	struct page *page; + +	while (pfn < end_pfn) { +		if (!pfn_valid_within(pfn)) { +			pfn++; +			continue; +		} +		page = pfn_to_page(pfn); +		if (PageBuddy(page)) +			pfn += 1 << page_order(page); +		else if (page_count(page) == 0 && +				page_private(page) == MIGRATE_ISOLATE) +			pfn += 1; +		else +			break; +	} +	if (pfn < end_pfn) +		return 0; +	return 1; +} + +int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) +{ +	unsigned long pfn; +	struct page *page; + +	pfn = start_pfn; +	/* +	 * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page +	 * is not aligned to pageblock_nr_pages. +	 * Then we just check pagetype fist. +	 */ +	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { +		page = __first_valid_page(pfn, pageblock_nr_pages); +		if (page && get_pageblock_flags(page) != MIGRATE_ISOLATE) +			break; +	} +	if (pfn < end_pfn) +		return -EBUSY; +	/* Check all pages are free or Marked as ISOLATED */ +	if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) +		return 0; +	return -EBUSY; +}  |