diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 78 | 
1 files changed, 38 insertions, 40 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bab8e3bc420..6092f331b32 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);  int page_group_by_mobility_disabled __read_mostly; -static void set_pageblock_migratetype(struct page *page, int migratetype) +void set_pageblock_migratetype(struct page *page, int migratetype)  {  	if (unlikely(page_group_by_mobility_disabled)) @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,  	return pages_moved;  } -static int move_freepages_block(struct zone *zone, struct page *page, -				int migratetype) +int move_freepages_block(struct zone *zone, struct page *page, +			 int migratetype)  {  	unsigned long start_pfn, end_pfn;  	struct page *start_page, *end_page; @@ -4300,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,  #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE -/* Return a sensible default order for the pageblock size. */ -static inline int pageblock_default_order(void) -{ -	if (HPAGE_SHIFT > PAGE_SHIFT) -		return HUGETLB_PAGE_ORDER; - -	return MAX_ORDER-1; -} -  /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ -static inline void __init set_pageblock_order(unsigned int order) +static inline void __init set_pageblock_order(void)  { +	unsigned int order; +  	/* Check that pageblock_nr_pages has not already been setup */  	if (pageblock_order)  		return; +	if (HPAGE_SHIFT > PAGE_SHIFT) +		order = HUGETLB_PAGE_ORDER; +	else +		order = MAX_ORDER - 1; +  	/*  	 * Assume the largest contiguous order of interest is a huge page. -	 * This value may be variable depending on boot parameters on IA64 +	 * This value may be variable depending on boot parameters on IA64 and +	 * powerpc.  	 */  	pageblock_order = order;  } @@ -4326,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)  /*   * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() - * and pageblock_default_order() are unused as pageblock_order is set - * at compile-time. See include/linux/pageblock-flags.h for the values of - * pageblock_order based on the kernel config + * is unused as pageblock_order is set at compile-time. See + * include/linux/pageblock-flags.h for the values of pageblock_order based on + * the kernel config   */ -static inline int pageblock_default_order(unsigned int order) +static inline void set_pageblock_order(void)  { -	return MAX_ORDER-1;  } -#define set_pageblock_order(x)	do {} while (0)  #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ @@ -4361,7 +4358,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,  	for (j = 0; j < MAX_NR_ZONES; j++) {  		struct zone *zone = pgdat->node_zones + j;  		unsigned long size, realsize, memmap_pages; -		enum lru_list lru;  		size = zone_spanned_pages_in_node(nid, j, zones_size);  		realsize = size - zone_absent_pages_in_node(nid, j, @@ -4411,18 +4407,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,  		zone->zone_pgdat = pgdat;  		zone_pcp_init(zone); -		for_each_lru(lru) -			INIT_LIST_HEAD(&zone->lruvec.lists[lru]); -		zone->reclaim_stat.recent_rotated[0] = 0; -		zone->reclaim_stat.recent_rotated[1] = 0; -		zone->reclaim_stat.recent_scanned[0] = 0; -		zone->reclaim_stat.recent_scanned[1] = 0; +		lruvec_init(&zone->lruvec, zone);  		zap_zone_vm_stats(zone);  		zone->flags = 0;  		if (!size)  			continue; -		set_pageblock_order(pageblock_default_order()); +		set_pageblock_order();  		setup_usemap(pgdat, zone, size);  		ret = init_currently_empty_zone(zone, zone_start_pfn,  						size, MEMMAP_EARLY); @@ -4815,7 +4806,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)  	find_zone_movable_pfns_for_nodes();  	/* Print out the zone ranges */ -	printk("Zone PFN ranges:\n"); +	printk("Zone ranges:\n");  	for (i = 0; i < MAX_NR_ZONES; i++) {  		if (i == ZONE_MOVABLE)  			continue; @@ -4824,22 +4815,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)  				arch_zone_highest_possible_pfn[i])  			printk(KERN_CONT "empty\n");  		else -			printk(KERN_CONT "%0#10lx -> %0#10lx\n", -				arch_zone_lowest_possible_pfn[i], -				arch_zone_highest_possible_pfn[i]); +			printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n", +				arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, +				(arch_zone_highest_possible_pfn[i] +					<< PAGE_SHIFT) - 1);  	}  	/* Print out the PFNs ZONE_MOVABLE begins at in each node */ -	printk("Movable zone start PFN for each node\n"); +	printk("Movable zone start for each node\n");  	for (i = 0; i < MAX_NUMNODES; i++) {  		if (zone_movable_pfn[i]) -			printk("  Node %d: %lu\n", i, zone_movable_pfn[i]); +			printk("  Node %d: %#010lx\n", i, +			       zone_movable_pfn[i] << PAGE_SHIFT);  	}  	/* Print out the early_node_map[] */ -	printk("Early memory PFN ranges\n"); +	printk("Early memory node ranges\n");  	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) -		printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn); +		printk("  node %3d: [mem %#010lx-%#010lx]\n", nid, +		       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);  	/* Initialise every node */  	mminit_verify_pageflags_layout(); @@ -5657,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)  		.nr_migratepages = 0,  		.order = -1,  		.zone = page_zone(pfn_to_page(start)), -		.sync = true, +		.mode = COMPACT_SYNC,  	};  	INIT_LIST_HEAD(&cc.migratepages); @@ -5938,7 +5932,7 @@ bool is_free_buddy_page(struct page *page)  }  #endif -static struct trace_print_flags pageflag_names[] = { +static const struct trace_print_flags pageflag_names[] = {  	{1UL << PG_locked,		"locked"	},  	{1UL << PG_error,		"error"		},  	{1UL << PG_referenced,		"referenced"	}, @@ -5973,7 +5967,9 @@ static struct trace_print_flags pageflag_names[] = {  #ifdef CONFIG_MEMORY_FAILURE  	{1UL << PG_hwpoison,		"hwpoison"	},  #endif -	{-1UL,				NULL		}, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +	{1UL << PG_compound_lock,	"compound_lock"	}, +#endif  };  static void dump_page_flags(unsigned long flags) @@ -5982,12 +5978,14 @@ static void dump_page_flags(unsigned long flags)  	unsigned long mask;  	int i; +	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); +  	printk(KERN_ALERT "page flags: %#lx(", flags);  	/* remove zone id */  	flags &= (1UL << NR_PAGEFLAGS) - 1; -	for (i = 0; pageflag_names[i].name && flags; i++) { +	for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {  		mask = pageflag_names[i].mask;  		if ((flags & mask) != mask)  |