diff options
| author | Minchan Kim <minchan@kernel.org> | 2012-07-31 16:43:50 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 18:42:45 -0700 | 
| commit | ee6f509c3274014d1f52e7a7a10aee9f85393c5e (patch) | |
| tree | d8e5c816de0752b70b63f5de50ed52808ef3be9f /mm/page_isolation.c | |
| parent | 876aafbfd9ba5bb352f1b14622c27f3fe9a99013 (diff) | |
| download | olio-linux-3.10-ee6f509c3274014d1f52e7a7a10aee9f85393c5e.tar.xz olio-linux-3.10-ee6f509c3274014d1f52e7a7a10aee9f85393c5e.zip  | |
mm: factor out memory isolate functions
mm/page_alloc.c has some memory isolation functions but they are used only
when we enable CONFIG_{CMA|MEMORY_HOTPLUG|MEMORY_FAILURE}.  So let's make
it configurable by new CONFIG_MEMORY_ISOLATION so that it can reduce
binary size and we can check it simple by CONFIG_MEMORY_ISOLATION, not if
defined CONFIG_{CMA|MEMORY_HOTPLUG|MEMORY_FAILURE}.
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_isolation.c')
| -rw-r--r-- | mm/page_isolation.c | 71 | 
1 files changed, 71 insertions, 0 deletions
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c9f04774f2b..fb482cf438d 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -5,8 +5,79 @@  #include <linux/mm.h>  #include <linux/page-isolation.h>  #include <linux/pageblock-flags.h> +#include <linux/memory.h>  #include "internal.h" +int set_migratetype_isolate(struct page *page) +{ +	struct zone *zone; +	unsigned long flags, pfn; +	struct memory_isolate_notify arg; +	int notifier_ret; +	int ret = -EBUSY; + +	zone = page_zone(page); + +	spin_lock_irqsave(&zone->lock, flags); + +	pfn = page_to_pfn(page); +	arg.start_pfn = pfn; +	arg.nr_pages = pageblock_nr_pages; +	arg.pages_found = 0; + +	/* +	 * It may be possible to isolate a pageblock even if the +	 * migratetype is not MIGRATE_MOVABLE. The memory isolation +	 * notifier chain is used by balloon drivers to return the +	 * number of pages in a range that are held by the balloon +	 * driver to shrink memory. If all the pages are accounted for +	 * by balloons, are free, or on the LRU, isolation can continue. +	 * Later, for example, when memory hotplug notifier runs, these +	 * pages reported as "can be isolated" should be isolated(freed) +	 * by the balloon driver through the memory notifier chain. +	 */ +	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); +	notifier_ret = notifier_to_errno(notifier_ret); +	if (notifier_ret) +		goto out; +	/* +	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. +	 * We just check MOVABLE pages. +	 */ +	if (!has_unmovable_pages(zone, page, arg.pages_found)) +		ret = 0; + +	/* +	 * immobile means "not-on-lru" paes. If immobile is larger than +	 * removable-by-driver pages reported by notifier, we'll fail. +	 */ + +out: +	if (!ret) { +		set_pageblock_migratetype(page, MIGRATE_ISOLATE); +		move_freepages_block(zone, page, MIGRATE_ISOLATE); +	} + +	spin_unlock_irqrestore(&zone->lock, flags); +	if (!ret) +		drain_all_pages(); +	return ret; +} + +void unset_migratetype_isolate(struct page *page, unsigned migratetype) +{ +	struct zone *zone; +	unsigned long flags; +	zone = page_zone(page); +	spin_lock_irqsave(&zone->lock, flags); +	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) +		goto out; +	set_pageblock_migratetype(page, migratetype); +	move_freepages_block(zone, page, migratetype); +out: +	spin_unlock_irqrestore(&zone->lock, flags); +} +  static inline struct page *  __first_valid_page(unsigned long pfn, unsigned long nr_pages)  {  |