diff options
| author | Haicheng Li <haicheng.li@linux.intel.com> | 2010-05-24 14:32:51 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 08:07:01 -0700 | 
| commit | 1f522509c77a5dea8dc384b735314f03908a6415 (patch) | |
| tree | 4b848527b90877a8a64c46e8e2d76723405c319d | |
| parent | 319774e25fa4b7641bdc3b0a464dd84e62103347 (diff) | |
| download | olio-linux-3.10-1f522509c77a5dea8dc384b735314f03908a6415.tar.xz olio-linux-3.10-1f522509c77a5dea8dc384b735314f03908a6415.zip  | |
mem-hotplug: avoid multiple zones sharing same boot strapping boot_pageset
For each new populated zone of hotadded node, need to update its pagesets
with dynamically allocated per_cpu_pageset struct for all possible CPUs:
    1) Detach zone->pageset from the shared boot_pageset
       at end of __build_all_zonelists().
    2) Use mutex to protect zone->pageset when it's still
       shared in onlined_pages()
Otherwises, multiple zones of different nodes would share same boot strapping
boot_pageset for same CPU, which will finally cause below kernel panic:
  ------------[ cut here ]------------
  kernel BUG at mm/page_alloc.c:1239!
  invalid opcode: 0000 [#1] SMP
  ...
  Call Trace:
   [<ffffffff811300c1>] __alloc_pages_nodemask+0x131/0x7b0
   [<ffffffff81162e67>] alloc_pages_current+0x87/0xd0
   [<ffffffff81128407>] __page_cache_alloc+0x67/0x70
   [<ffffffff811325f0>] __do_page_cache_readahead+0x120/0x260
   [<ffffffff81132751>] ra_submit+0x21/0x30
   [<ffffffff811329c6>] ondemand_readahead+0x166/0x2c0
   [<ffffffff81132ba0>] page_cache_async_readahead+0x80/0xa0
   [<ffffffff8112a0e4>] generic_file_aio_read+0x364/0x670
   [<ffffffff81266cfa>] nfs_file_read+0xca/0x130
   [<ffffffff8117b20a>] do_sync_read+0xfa/0x140
   [<ffffffff8117bf75>] vfs_read+0xb5/0x1a0
   [<ffffffff8117c151>] sys_read+0x51/0x80
   [<ffffffff8103c032>] system_call_fastpath+0x16/0x1b
  RIP  [<ffffffff8112ff13>] get_page_from_freelist+0x883/0x900
   RSP <ffff88000d1e78a8>
  ---[ end trace 4bda28328b9990db ]
[akpm@linux-foundation.org: merge fix]
Signed-off-by: Haicheng Li <haicheng.li@linux.intel.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Reviewed-by: Andi Kleen <andi.kleen@intel.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | include/linux/mmzone.h | 2 | ||||
| -rw-r--r-- | init/main.c | 2 | ||||
| -rw-r--r-- | kernel/cpu.c | 2 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 18 | ||||
| -rw-r--r-- | mm/page_alloc.c | 17 | 
5 files changed, 29 insertions, 12 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f6f2c505fa7..a367ed5bb3f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -652,7 +652,7 @@ typedef struct pglist_data {  void get_zone_counts(unsigned long *active, unsigned long *inactive,  			unsigned long *free); -void build_all_zonelists(void); +void build_all_zonelists(void *data);  void wakeup_kswapd(struct zone *zone, int order);  int zone_watermark_ok(struct zone *z, int order, unsigned long mark,  		int classzone_idx, int alloc_flags); diff --git a/init/main.c b/init/main.c index 22881b5e95e..3bdb152f412 100644 --- a/init/main.c +++ b/init/main.c @@ -567,7 +567,7 @@ asmlinkage void __init start_kernel(void)  	setup_per_cpu_areas();  	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */ -	build_all_zonelists(); +	build_all_zonelists(NULL);  	page_alloc_init();  	printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); diff --git a/kernel/cpu.c b/kernel/cpu.c index a3fbcc0a0ab..3e8b3ba2717 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -358,7 +358,7 @@ int __cpuinit cpu_up(unsigned int cpu)  	}  	if (pgdat->node_zonelists->_zonerefs->zone == NULL) -		build_all_zonelists(); +		build_all_zonelists(NULL);  #endif  	cpu_maps_update_begin(); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 85eb4d342ac..089cc97aed3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -389,6 +389,11 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)  	int nid;  	int ret;  	struct memory_notify arg; +	/* +	 * mutex to protect zone->pageset when it's still shared +	 * in onlined_pages() +	 */ +	static DEFINE_MUTEX(zone_pageset_mutex);  	arg.start_pfn = pfn;  	arg.nr_pages = nr_pages; @@ -415,12 +420,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)  	 * This means the page allocator ignores this zone.  	 * So, zonelist must be updated after online.  	 */ +	mutex_lock(&zone_pageset_mutex);  	if (!populated_zone(zone))  		need_zonelists_rebuild = 1;  	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,  		online_pages_range);  	if (ret) { +		mutex_unlock(&zone_pageset_mutex);  		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",  			nr_pages, pfn);  		memory_notify(MEM_CANCEL_ONLINE, &arg); @@ -429,8 +436,12 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)  	zone->present_pages += onlined_pages;  	zone->zone_pgdat->node_present_pages += onlined_pages; +	if (need_zonelists_rebuild) +		build_all_zonelists(zone); +	else +		zone_pcp_update(zone); -	zone_pcp_update(zone); +	mutex_unlock(&zone_pageset_mutex);  	setup_per_zone_wmarks();  	calculate_zone_inactive_ratio(zone);  	if (onlined_pages) { @@ -438,10 +449,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)  		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);  	} -	if (need_zonelists_rebuild) -		build_all_zonelists(); -	else -		vm_total_pages = nr_free_pagecache_pages(); +	vm_total_pages = nr_free_pagecache_pages();  	writeback_set_ratelimit(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 595d0ac211e..21c52d2d862 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2572,7 +2572,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,  				NUMA_ZONELIST_ORDER_LEN);  			user_zonelist_order = oldval;  		} else if (oldval != user_zonelist_order) -			build_all_zonelists(); +			build_all_zonelists(NULL);  	}  out:  	mutex_unlock(&zl_order_mutex); @@ -2922,9 +2922,10 @@ static void build_zonelist_cache(pg_data_t *pgdat)   */  static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);  static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); +static void setup_zone_pageset(struct zone *zone);  /* return values int ....just for stop_machine() */ -static int __build_all_zonelists(void *dummy) +static __init_refok int __build_all_zonelists(void *data)  {  	int nid;  	int cpu; @@ -2939,6 +2940,14 @@ static int __build_all_zonelists(void *dummy)  		build_zonelist_cache(pgdat);  	} +#ifdef CONFIG_MEMORY_HOTPLUG +	/* Setup real pagesets for the new zone */ +	if (data) { +		struct zone *zone = data; +		setup_zone_pageset(zone); +	} +#endif +  	/*  	 * Initialize the boot_pagesets that are going to be used  	 * for bootstrapping processors. The real pagesets for @@ -2958,7 +2967,7 @@ static int __build_all_zonelists(void *dummy)  	return 0;  } -void build_all_zonelists(void) +void build_all_zonelists(void *data)  {  	set_zonelist_order(); @@ -2969,7 +2978,7 @@ void build_all_zonelists(void)  	} else {  		/* we have to stop all cpus to guarantee there is no user  		   of zonelist */ -		stop_machine(__build_all_zonelists, NULL, NULL); +		stop_machine(__build_all_zonelists, data, NULL);  		/* cpuset refresh routine should be here */  	}  	vm_total_pages = nr_free_pagecache_pages();  |