diff options
| author | Olof Johansson <olof@lixom.net> | 2012-11-25 21:34:34 -0800 | 
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2012-11-25 21:34:34 -0800 | 
| commit | 0f9cb211ba5db93d488fe6b154138231fdd0e22d (patch) | |
| tree | 293871b042e9ebc49b1d783f1b110eef541ddc97 /mm/memcontrol.c | |
| parent | 007108a2279123ad6639b6c653ad1a731febb60f (diff) | |
| parent | 9489e9dcae718d5fde988e4a684a0f55b5f94d17 (diff) | |
| download | olio-linux-3.10-0f9cb211ba5db93d488fe6b154138231fdd0e22d.tar.xz olio-linux-3.10-0f9cb211ba5db93d488fe6b154138231fdd0e22d.zip  | |
Merge tag 'v3.7-rc7' into next/cleanup
Merging in mainline back to next/cleanup since it has collected a few
conflicts between fixes going upstream and some of the cleanup patches.
Git doesn't auto-resolve some of them, and they're mostly noise so let's
take care of it locally.
Conflicts are in:
	arch/arm/mach-omap2/omap_hwmod_44xx_data.c
	arch/arm/plat-omap/i2c.c
	drivers/video/omap2/dss/dss.c
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 67 | 
1 files changed, 50 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7acf43bf04a..dd39ba000b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,  				      struct mem_cgroup *memcg)  {  	struct mem_cgroup_per_zone *mz; +	struct lruvec *lruvec; -	if (mem_cgroup_disabled()) -		return &zone->lruvec; +	if (mem_cgroup_disabled()) { +		lruvec = &zone->lruvec; +		goto out; +	}  	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); -	return &mz->lruvec; +	lruvec = &mz->lruvec; +out: +	/* +	 * Since a node can be onlined after the mem_cgroup was created, +	 * we have to be prepared to initialize lruvec->zone here; +	 * and if offlined then reonlined, we need to reinitialize it. +	 */ +	if (unlikely(lruvec->zone != zone)) +		lruvec->zone = zone; +	return lruvec;  }  /* @@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)  	struct mem_cgroup_per_zone *mz;  	struct mem_cgroup *memcg;  	struct page_cgroup *pc; +	struct lruvec *lruvec; -	if (mem_cgroup_disabled()) -		return &zone->lruvec; +	if (mem_cgroup_disabled()) { +		lruvec = &zone->lruvec; +		goto out; +	}  	pc = lookup_page_cgroup(page);  	memcg = pc->mem_cgroup; @@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)  		pc->mem_cgroup = memcg = root_mem_cgroup;  	mz = page_cgroup_zoneinfo(memcg, page); -	return &mz->lruvec; +	lruvec = &mz->lruvec; +out: +	/* +	 * Since a node can be onlined after the mem_cgroup was created, +	 * we have to be prepared to initialize lruvec->zone here; +	 * and if offlined then reonlined, we need to reinitialize it. +	 */ +	if (unlikely(lruvec->zone != zone)) +		lruvec->zone = zone; +	return lruvec;  }  /** @@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)  static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)  {  	u64 limit; -	u64 memsw;  	limit = res_counter_read_u64(&memcg->res, RES_LIMIT); -	limit += total_swap_pages << PAGE_SHIFT; -	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);  	/* -	 * If memsw is finite and limits the amount of swap space available -	 * to this memcg, return that limit. +	 * Do not consider swap space if we cannot swap due to swappiness  	 */ -	return min(limit, memsw); +	if (mem_cgroup_swappiness(memcg)) { +		u64 memsw; + +		limit += total_swap_pages << PAGE_SHIFT; +		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + +		/* +		 * If memsw is finite and limits the amount of swap space +		 * available to this memcg, return that limit. +		 */ +		limit = min(limit, memsw); +	} + +	return limit;  }  void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, @@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,  static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,  				int node, int zid, enum lru_list lru)  { -	struct mem_cgroup_per_zone *mz; +	struct lruvec *lruvec;  	unsigned long flags, loop;  	struct list_head *list;  	struct page *busy;  	struct zone *zone;  	zone = &NODE_DATA(node)->node_zones[zid]; -	mz = mem_cgroup_zoneinfo(memcg, node, zid); -	list = &mz->lruvec.lists[lru]; +	lruvec = mem_cgroup_zone_lruvec(zone, memcg); +	list = &lruvec->lists[lru]; -	loop = mz->lru_size[lru]; +	loop = mem_cgroup_get_lru_size(lruvec, lru);  	/* give some margin against EBUSY etc...*/  	loop += 256;  	busy = NULL; @@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)  	for (zone = 0; zone < MAX_NR_ZONES; zone++) {  		mz = &pn->zoneinfo[zone]; -		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); +		lruvec_init(&mz->lruvec);  		mz->usage_in_excess = 0;  		mz->on_tree = false;  		mz->memcg = memcg;  |