diff options
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 105 | 
1 files changed, 97 insertions, 8 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 428328a05fa..35a7b5e1946 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -81,21 +81,31 @@ struct zone_padding {  enum zone_stat_item {  	/* First 128 byte cacheline (assuming 64 bit words) */  	NR_FREE_PAGES, -	NR_INACTIVE, -	NR_ACTIVE, +	NR_LRU_BASE, +	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ +	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */ +	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */ +	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */ +#ifdef CONFIG_UNEVICTABLE_LRU +	NR_UNEVICTABLE,		/*  "     "     "   "       "         */ +	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */ +#else +	NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ +	NR_MLOCK = NR_ACTIVE_FILE, +#endif  	NR_ANON_PAGES,	/* Mapped anonymous pages */  	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.  			   only modified from process context */  	NR_FILE_PAGES,  	NR_FILE_DIRTY,  	NR_WRITEBACK, -	/* Second 128 byte cacheline */  	NR_SLAB_RECLAIMABLE,  	NR_SLAB_UNRECLAIMABLE,  	NR_PAGETABLE,		/* used for pagetables */  	NR_UNSTABLE_NFS,	/* NFS unstable pages */  	NR_BOUNCE,  	NR_VMSCAN_WRITE, +	/* Second 128 byte cacheline */  	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */  #ifdef CONFIG_NUMA  	NUMA_HIT,		/* allocated in intended node */ @@ -107,6 +117,55 @@ enum zone_stat_item {  #endif  	NR_VM_ZONE_STAT_ITEMS }; +/* + * We do arithmetic on the LRU lists in various places in the code, + * so it is important to keep the active lists LRU_ACTIVE higher in + * the array than the corresponding inactive lists, and to keep + * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. + * + * This has to be kept in sync with the statistics in zone_stat_item + * above and the descriptions in vmstat_text in mm/vmstat.c + */ +#define LRU_BASE 0 +#define LRU_ACTIVE 1 +#define LRU_FILE 2 + +enum lru_list { +	LRU_INACTIVE_ANON = LRU_BASE, +	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, +	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, +	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, +#ifdef CONFIG_UNEVICTABLE_LRU +	LRU_UNEVICTABLE, +#else +	LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ +#endif +	NR_LRU_LISTS +}; + +#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) + +#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) + +static inline int is_file_lru(enum lru_list l) +{ +	return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); +} + +static inline int is_active_lru(enum lru_list l) +{ +	return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); +} + +static inline int is_unevictable_lru(enum lru_list l) +{ +#ifdef CONFIG_UNEVICTABLE_LRU +	return (l == LRU_UNEVICTABLE); +#else +	return 0; +#endif +} +  struct per_cpu_pages {  	int count;		/* number of pages in the list */  	int high;		/* high watermark, emptying needed */ @@ -251,10 +310,22 @@ struct zone {  	/* Fields commonly accessed by the page reclaim scanner */  	spinlock_t		lru_lock;	 -	struct list_head	active_list; -	struct list_head	inactive_list; -	unsigned long		nr_scan_active; -	unsigned long		nr_scan_inactive; +	struct { +		struct list_head list; +		unsigned long nr_scan; +	} lru[NR_LRU_LISTS]; + +	/* +	 * The pageout code in vmscan.c keeps track of how many of the +	 * mem/swap backed and file backed pages are refeferenced. +	 * The higher the rotated/scanned ratio, the more valuable +	 * that cache is. +	 * +	 * The anon LRU stats live in [0], file LRU stats in [1] +	 */ +	unsigned long		recent_rotated[2]; +	unsigned long		recent_scanned[2]; +  	unsigned long		pages_scanned;	   /* since last reclaim */  	unsigned long		flags;		   /* zone flags, see below */ @@ -276,6 +347,12 @@ struct zone {  	 */  	int prev_priority; +	/* +	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on +	 * this zone's LRU.  Maintained by the pageout code. +	 */ +	unsigned int inactive_ratio; +  	ZONE_PADDING(_pad2_)  	/* Rarely used or read-mostly fields */ @@ -524,8 +601,11 @@ typedef struct pglist_data {  	struct zone node_zones[MAX_NR_ZONES];  	struct zonelist node_zonelists[MAX_ZONELISTS];  	int nr_zones; -#ifdef CONFIG_FLAT_NODE_MEM_MAP +#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */  	struct page *node_mem_map; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +	struct page_cgroup *node_page_cgroup; +#endif  #endif  	struct bootmem_data *bdata;  #ifdef CONFIG_MEMORY_HOTPLUG @@ -854,6 +934,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)  #endif  struct page; +struct page_cgroup;  struct mem_section {  	/*  	 * This is, logically, a pointer to an array of struct @@ -871,6 +952,14 @@ struct mem_section {  	/* See declaration of similar field in struct zone */  	unsigned long *pageblock_flags; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +	/* +	 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use +	 * section. (see memcontrol.h/page_cgroup.h about this.) +	 */ +	struct page_cgroup *page_cgroup; +	unsigned long pad; +#endif  };  #ifdef CONFIG_SPARSEMEM_EXTREME  |