diff options
| author | Olof Johansson <olof@lixom.net> | 2012-11-30 09:12:33 -0800 | 
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2012-11-30 09:12:33 -0800 | 
| commit | 5c1af2a7011bf719807de360cb64c2f610269a38 (patch) | |
| tree | fc4e6657d30c6eb48367c40e50c6b7428f96aaea /mm/memcontrol.c | |
| parent | ef7848683f4de4903376638b69e6b4ac729b3ead (diff) | |
| parent | 573e5bbe653d01dc0f27e2d97754db9246b501c8 (diff) | |
| download | olio-linux-3.10-5c1af2a7011bf719807de360cb64c2f610269a38.tar.xz olio-linux-3.10-5c1af2a7011bf719807de360cb64c2f610269a38.zip  | |
Merge branch 'next/pm-samsung' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung into next/pm
From Kukjin Kim:
* 'next/pm-samsung' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene/linux-samsung:
  ARM: EXYNOS: Add flush_cache_all in suspend finisher
  ARM: EXYNOS: Remove scu_enable from cpuidle
  ARM: EXYNOS: Fix soft reboot hang after suspend/resume
  ARM: EXYNOS: Add support for rtc wakeup
  ARM: EXYNOS: fix the hotplug for Cortex-A15
  + Linux 3.7-rc6
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 67 | 
1 files changed, 50 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7acf43bf04a..dd39ba000b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,  				      struct mem_cgroup *memcg)  {  	struct mem_cgroup_per_zone *mz; +	struct lruvec *lruvec; -	if (mem_cgroup_disabled()) -		return &zone->lruvec; +	if (mem_cgroup_disabled()) { +		lruvec = &zone->lruvec; +		goto out; +	}  	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); -	return &mz->lruvec; +	lruvec = &mz->lruvec; +out: +	/* +	 * Since a node can be onlined after the mem_cgroup was created, +	 * we have to be prepared to initialize lruvec->zone here; +	 * and if offlined then reonlined, we need to reinitialize it. +	 */ +	if (unlikely(lruvec->zone != zone)) +		lruvec->zone = zone; +	return lruvec;  }  /* @@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)  	struct mem_cgroup_per_zone *mz;  	struct mem_cgroup *memcg;  	struct page_cgroup *pc; +	struct lruvec *lruvec; -	if (mem_cgroup_disabled()) -		return &zone->lruvec; +	if (mem_cgroup_disabled()) { +		lruvec = &zone->lruvec; +		goto out; +	}  	pc = lookup_page_cgroup(page);  	memcg = pc->mem_cgroup; @@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)  		pc->mem_cgroup = memcg = root_mem_cgroup;  	mz = page_cgroup_zoneinfo(memcg, page); -	return &mz->lruvec; +	lruvec = &mz->lruvec; +out: +	/* +	 * Since a node can be onlined after the mem_cgroup was created, +	 * we have to be prepared to initialize lruvec->zone here; +	 * and if offlined then reonlined, we need to reinitialize it. +	 */ +	if (unlikely(lruvec->zone != zone)) +		lruvec->zone = zone; +	return lruvec;  }  /** @@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)  static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)  {  	u64 limit; -	u64 memsw;  	limit = res_counter_read_u64(&memcg->res, RES_LIMIT); -	limit += total_swap_pages << PAGE_SHIFT; -	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);  	/* -	 * If memsw is finite and limits the amount of swap space available -	 * to this memcg, return that limit. +	 * Do not consider swap space if we cannot swap due to swappiness  	 */ -	return min(limit, memsw); +	if (mem_cgroup_swappiness(memcg)) { +		u64 memsw; + +		limit += total_swap_pages << PAGE_SHIFT; +		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + +		/* +		 * If memsw is finite and limits the amount of swap space +		 * available to this memcg, return that limit. +		 */ +		limit = min(limit, memsw); +	} + +	return limit;  }  void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, @@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,  static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,  				int node, int zid, enum lru_list lru)  { -	struct mem_cgroup_per_zone *mz; +	struct lruvec *lruvec;  	unsigned long flags, loop;  	struct list_head *list;  	struct page *busy;  	struct zone *zone;  	zone = &NODE_DATA(node)->node_zones[zid]; -	mz = mem_cgroup_zoneinfo(memcg, node, zid); -	list = &mz->lruvec.lists[lru]; +	lruvec = mem_cgroup_zone_lruvec(zone, memcg); +	list = &lruvec->lists[lru]; -	loop = mz->lru_size[lru]; +	loop = mem_cgroup_get_lru_size(lruvec, lru);  	/* give some margin against EBUSY etc...*/  	loop += 256;  	busy = NULL; @@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)  	for (zone = 0; zone < MAX_NR_ZONES; zone++) {  		mz = &pn->zoneinfo[zone]; -		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); +		lruvec_init(&mz->lruvec);  		mz->usage_in_excess = 0;  		mz->on_tree = false;  		mz->memcg = memcg;  |