diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 115 | 
1 files changed, 43 insertions, 72 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36896f3eb7f..c0cbd7790c5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -32,6 +32,7 @@  #include <linux/fs.h>  #include <linux/seq_file.h>  #include <linux/vmalloc.h> +#include <linux/mm_inline.h>  #include <asm/uaccess.h> @@ -85,22 +86,13 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,  /*   * per-zone information in memory controller.   */ - -enum mem_cgroup_zstat_index { -	MEM_CGROUP_ZSTAT_ACTIVE, -	MEM_CGROUP_ZSTAT_INACTIVE, - -	NR_MEM_CGROUP_ZSTAT, -}; -  struct mem_cgroup_per_zone {  	/*  	 * spin_lock to protect the per cgroup LRU  	 */  	spinlock_t		lru_lock; -	struct list_head	active_list; -	struct list_head	inactive_list; -	unsigned long count[NR_MEM_CGROUP_ZSTAT]; +	struct list_head	lists[NR_LRU_LISTS]; +	unsigned long		count[NR_LRU_LISTS];  };  /* Macro for accessing counter */  #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)]) @@ -227,7 +219,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)  }  static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, -					enum mem_cgroup_zstat_index idx) +					enum lru_list idx)  {  	int nid, zid;  	struct mem_cgroup_per_zone *mz; @@ -297,11 +289,9 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,  			struct page_cgroup *pc)  {  	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; +	int lru = !!from; -	if (from) -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; -	else -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; +	MEM_CGROUP_ZSTAT(mz, lru) -= 1;  	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);  	list_del(&pc->lru); @@ -310,37 +300,35 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,  static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,  				struct page_cgroup *pc)  { -	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; +	int lru = LRU_INACTIVE; + +	if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) +		lru += LRU_ACTIVE; + +	MEM_CGROUP_ZSTAT(mz, lru) += 1; +	list_add(&pc->lru, &mz->lists[lru]); -	if (!to) { -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; -		list_add(&pc->lru, &mz->inactive_list); -	} else { -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; -		list_add(&pc->lru, &mz->active_list); -	}  	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);  }  static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)  { -	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;  	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); +	int lru = LRU_INACTIVE; -	if (from) -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; -	else -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; +	if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE) +		lru += LRU_ACTIVE; -	if (active) { -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; +	MEM_CGROUP_ZSTAT(mz, lru) -= 1; + +	if (active)  		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; -		list_move(&pc->lru, &mz->active_list); -	} else { -		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; +	else  		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; -		list_move(&pc->lru, &mz->inactive_list); -	} + +	lru = !!active; +	MEM_CGROUP_ZSTAT(mz, lru) += 1; +	list_move(&pc->lru, &mz->lists[lru]);  }  int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) @@ -412,8 +400,8 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)  {  	unsigned long active, inactive;  	/* active and inactive are the number of pages. 'long' is ok.*/ -	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); -	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); +	active = mem_cgroup_get_all_zonestat(mem, LRU_ACTIVE); +	inactive = mem_cgroup_get_all_zonestat(mem, LRU_INACTIVE);  	return (long) (active / (inactive + 1));  } @@ -444,28 +432,17 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)   * (see include/linux/mmzone.h)   */ -long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, -				   struct zone *zone, int priority) +long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, +					int priority, enum lru_list lru)  { -	long nr_active; +	long nr_pages;  	int nid = zone->zone_pgdat->node_id;  	int zid = zone_idx(zone);  	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); -	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); -	return (nr_active >> priority); -} - -long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, -					struct zone *zone, int priority) -{ -	long nr_inactive; -	int nid = zone->zone_pgdat->node_id; -	int zid = zone_idx(zone); -	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); +	nr_pages = MEM_CGROUP_ZSTAT(mz, lru); -	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); -	return (nr_inactive >> priority); +	return (nr_pages >> priority);  }  unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, @@ -484,14 +461,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,  	int nid = z->zone_pgdat->node_id;  	int zid = zone_idx(z);  	struct mem_cgroup_per_zone *mz; +	int lru = !!active;  	BUG_ON(!mem_cont);  	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); -	if (active) -		src = &mz->active_list; -	else -		src = &mz->inactive_list; - +	src = &mz->lists[lru];  	spin_lock(&mz->lru_lock);  	scan = 0; @@ -863,7 +837,7 @@ int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)  #define FORCE_UNCHARGE_BATCH	(128)  static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,  			    struct mem_cgroup_per_zone *mz, -			    int active) +			    enum lru_list lru)  {  	struct page_cgroup *pc;  	struct page *page; @@ -871,10 +845,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,  	unsigned long flags;  	struct list_head *list; -	if (active) -		list = &mz->active_list; -	else -		list = &mz->inactive_list; +	list = &mz->lists[lru];  	spin_lock_irqsave(&mz->lru_lock, flags);  	while (!list_empty(list)) { @@ -922,11 +893,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)  		for_each_node_state(node, N_POSSIBLE)  			for (zid = 0; zid < MAX_NR_ZONES; zid++) {  				struct mem_cgroup_per_zone *mz; +				enum lru_list l;  				mz = mem_cgroup_zoneinfo(mem, node, zid); -				/* drop all page_cgroup in active_list */ -				mem_cgroup_force_empty_list(mem, mz, 1); -				/* drop all page_cgroup in inactive_list */ -				mem_cgroup_force_empty_list(mem, mz, 0); +				for_each_lru(l) +					mem_cgroup_force_empty_list(mem, mz, l);  			}  	}  	ret = 0; @@ -1015,9 +985,9 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,  		unsigned long active, inactive;  		inactive = mem_cgroup_get_all_zonestat(mem_cont, -						MEM_CGROUP_ZSTAT_INACTIVE); +						LRU_INACTIVE);  		active = mem_cgroup_get_all_zonestat(mem_cont, -						MEM_CGROUP_ZSTAT_ACTIVE); +						LRU_ACTIVE);  		cb->fill(cb, "active", (active) * PAGE_SIZE);  		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);  	} @@ -1062,6 +1032,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)  {  	struct mem_cgroup_per_node *pn;  	struct mem_cgroup_per_zone *mz; +	enum lru_list l;  	int zone, tmp = node;  	/*  	 * This routine is called against possible nodes. @@ -1082,9 +1053,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)  	for (zone = 0; zone < MAX_NR_ZONES; zone++) {  		mz = &pn->zoneinfo[zone]; -		INIT_LIST_HEAD(&mz->active_list); -		INIT_LIST_HEAD(&mz->inactive_list);  		spin_lock_init(&mz->lru_lock); +		for_each_lru(l) +			INIT_LIST_HEAD(&mz->lists[l]);  	}  	return 0;  }  |