diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/backing-dev.h | 13 | ||||
| -rw-r--r-- | include/linux/memcontrol.h | 2 | ||||
| -rw-r--r-- | include/linux/mm_inline.h | 50 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 47 | ||||
| -rw-r--r-- | include/linux/pagevec.h | 29 | ||||
| -rw-r--r-- | include/linux/swap.h | 20 | ||||
| -rw-r--r-- | include/linux/vmstat.h | 10 | 
7 files changed, 140 insertions, 31 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 0a24d5550eb..bee52abb8a4 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -175,6 +175,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);   * BDI_CAP_READ_MAP:       Can be mapped for reading   * BDI_CAP_WRITE_MAP:      Can be mapped for writing   * BDI_CAP_EXEC_MAP:       Can be mapped for execution + * + * BDI_CAP_SWAP_BACKED:    Count shmem/tmpfs objects as swap-backed.   */  #define BDI_CAP_NO_ACCT_DIRTY	0x00000001  #define BDI_CAP_NO_WRITEBACK	0x00000002 @@ -184,6 +186,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);  #define BDI_CAP_WRITE_MAP	0x00000020  #define BDI_CAP_EXEC_MAP	0x00000040  #define BDI_CAP_NO_ACCT_WB	0x00000080 +#define BDI_CAP_SWAP_BACKED	0x00000100  #define BDI_CAP_VMFLAGS \  	(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) @@ -248,6 +251,11 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)  				      BDI_CAP_NO_WRITEBACK));  } +static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) +{ +	return bdi->capabilities & BDI_CAP_SWAP_BACKED; +} +  static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)  {  	return bdi_cap_writeback_dirty(mapping->backing_dev_info); @@ -258,4 +266,9 @@ static inline bool mapping_cap_account_dirty(struct address_space *mapping)  	return bdi_cap_account_dirty(mapping->backing_dev_info);  } +static inline bool mapping_cap_swap_backed(struct address_space *mapping) +{ +	return bdi_cap_swap_backed(mapping->backing_dev_info); +} +  #endif		/* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index a6ac0d491fe..8d8f05c1515 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -44,7 +44,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,  					unsigned long *scanned, int order,  					int mode, struct zone *z,  					struct mem_cgroup *mem_cont, -					int active); +					int active, int file);  extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);  int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 96e970485b6..2eb599465d5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -5,7 +5,7 @@   * page_is_file_cache - should the page be on a file LRU or anon LRU?   * @page: the page to test   * - * Returns !0 if @page is page cache page backed by a regular filesystem, + * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,   * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.   * Used by functions that manipulate the LRU lists, to sort a page   * onto the right LRU list. @@ -20,7 +20,7 @@ static inline int page_is_file_cache(struct page *page)  		return 0;  	/* The page is page cache backed by a normal filesystem. */ -	return 1; +	return LRU_FILE;  }  static inline void @@ -38,39 +38,64 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)  }  static inline void -add_page_to_active_list(struct zone *zone, struct page *page) +add_page_to_inactive_anon_list(struct zone *zone, struct page *page)  { -	add_page_to_lru_list(zone, page, LRU_ACTIVE); +	add_page_to_lru_list(zone, page, LRU_INACTIVE_ANON);  }  static inline void -add_page_to_inactive_list(struct zone *zone, struct page *page) +add_page_to_active_anon_list(struct zone *zone, struct page *page)  { -	add_page_to_lru_list(zone, page, LRU_INACTIVE); +	add_page_to_lru_list(zone, page, LRU_ACTIVE_ANON);  }  static inline void -del_page_from_active_list(struct zone *zone, struct page *page) +add_page_to_inactive_file_list(struct zone *zone, struct page *page)  { -	del_page_from_lru_list(zone, page, LRU_ACTIVE); +	add_page_to_lru_list(zone, page, LRU_INACTIVE_FILE);  }  static inline void -del_page_from_inactive_list(struct zone *zone, struct page *page) +add_page_to_active_file_list(struct zone *zone, struct page *page)  { -	del_page_from_lru_list(zone, page, LRU_INACTIVE); +	add_page_to_lru_list(zone, page, LRU_ACTIVE_FILE); +} + +static inline void +del_page_from_inactive_anon_list(struct zone *zone, struct page *page) +{ +	del_page_from_lru_list(zone, page, LRU_INACTIVE_ANON); +} + +static inline void +del_page_from_active_anon_list(struct zone *zone, struct page *page) +{ +	del_page_from_lru_list(zone, page, LRU_ACTIVE_ANON); +} + +static inline void +del_page_from_inactive_file_list(struct zone *zone, struct page *page) +{ +	del_page_from_lru_list(zone, page, LRU_INACTIVE_FILE); +} + +static inline void +del_page_from_active_file_list(struct zone *zone, struct page *page) +{ +	del_page_from_lru_list(zone, page, LRU_INACTIVE_FILE);  }  static inline void  del_page_from_lru(struct zone *zone, struct page *page)  { -	enum lru_list l = LRU_INACTIVE; +	enum lru_list l = LRU_BASE;  	list_del(&page->lru);  	if (PageActive(page)) {  		__ClearPageActive(page); -		l = LRU_ACTIVE; +		l += LRU_ACTIVE;  	} +	l += page_is_file_cache(page);  	__dec_zone_state(zone, NR_LRU_BASE + l);  } @@ -87,6 +112,7 @@ static inline enum lru_list page_lru(struct page *page)  	if (PageActive(page))  		lru += LRU_ACTIVE; +	lru += page_is_file_cache(page);  	return lru;  } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 156e18f3919..59a4c8fd6eb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -82,21 +82,23 @@ enum zone_stat_item {  	/* First 128 byte cacheline (assuming 64 bit words) */  	NR_FREE_PAGES,  	NR_LRU_BASE, -	NR_INACTIVE = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ -	NR_ACTIVE,	/*  "     "     "   "       "         */ +	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ +	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */ +	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */ +	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */  	NR_ANON_PAGES,	/* Mapped anonymous pages */  	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.  			   only modified from process context */  	NR_FILE_PAGES,  	NR_FILE_DIRTY,  	NR_WRITEBACK, -	/* Second 128 byte cacheline */  	NR_SLAB_RECLAIMABLE,  	NR_SLAB_UNRECLAIMABLE,  	NR_PAGETABLE,		/* used for pagetables */  	NR_UNSTABLE_NFS,	/* NFS unstable pages */  	NR_BOUNCE,  	NR_VMSCAN_WRITE, +	/* Second 128 byte cacheline */  	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */  #ifdef CONFIG_NUMA  	NUMA_HIT,		/* allocated in intended node */ @@ -108,17 +110,36 @@ enum zone_stat_item {  #endif  	NR_VM_ZONE_STAT_ITEMS }; +/* + * We do arithmetic on the LRU lists in various places in the code, + * so it is important to keep the active lists LRU_ACTIVE higher in + * the array than the corresponding inactive lists, and to keep + * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. + * + * This has to be kept in sync with the statistics in zone_stat_item + * above and the descriptions in vmstat_text in mm/vmstat.c + */ +#define LRU_BASE 0 +#define LRU_ACTIVE 1 +#define LRU_FILE 2 +  enum lru_list { -	LRU_BASE, -	LRU_INACTIVE=LRU_BASE,	/* must match order of NR_[IN]ACTIVE */ -	LRU_ACTIVE,		/*  "     "     "   "       "        */ +	LRU_INACTIVE_ANON = LRU_BASE, +	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, +	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, +	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,  	NR_LRU_LISTS };  #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) +static inline int is_file_lru(enum lru_list l) +{ +	return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); +} +  static inline int is_active_lru(enum lru_list l)  { -	return (l == LRU_ACTIVE); +	return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);  }  struct per_cpu_pages { @@ -269,6 +290,18 @@ struct zone {  		struct list_head list;  		unsigned long nr_scan;  	} lru[NR_LRU_LISTS]; + +	/* +	 * The pageout code in vmscan.c keeps track of how many of the +	 * mem/swap backed and file backed pages are refeferenced. +	 * The higher the rotated/scanned ratio, the more valuable +	 * that cache is. +	 * +	 * The anon LRU stats live in [0], file LRU stats in [1] +	 */ +	unsigned long		recent_rotated[2]; +	unsigned long		recent_scanned[2]; +  	unsigned long		pages_scanned;	   /* since last reclaim */  	unsigned long		flags;		   /* zone flags, see below */ diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index fea3a982ee5..5fc96a4e760 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -81,20 +81,37 @@ static inline void pagevec_free(struct pagevec *pvec)  		__pagevec_free(pvec);  } -static inline void __pagevec_lru_add(struct pagevec *pvec) +static inline void __pagevec_lru_add_anon(struct pagevec *pvec)  { -	____pagevec_lru_add(pvec, LRU_INACTIVE); +	____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);  } -static inline void __pagevec_lru_add_active(struct pagevec *pvec) +static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)  { -	____pagevec_lru_add(pvec, LRU_ACTIVE); +	____pagevec_lru_add(pvec, LRU_ACTIVE_ANON);  } -static inline void pagevec_lru_add(struct pagevec *pvec) +static inline void __pagevec_lru_add_file(struct pagevec *pvec) +{ +	____pagevec_lru_add(pvec, LRU_INACTIVE_FILE); +} + +static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) +{ +	____pagevec_lru_add(pvec, LRU_ACTIVE_FILE); +} + + +static inline void pagevec_lru_add_file(struct pagevec *pvec) +{ +	if (pagevec_count(pvec)) +		__pagevec_lru_add_file(pvec); +} + +static inline void pagevec_lru_add_anon(struct pagevec *pvec)  {  	if (pagevec_count(pvec)) -		__pagevec_lru_add(pvec); +		__pagevec_lru_add_anon(pvec);  }  #endif /* _LINUX_PAGEVEC_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 833be56ad83..7d09d79997a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -184,14 +184,24 @@ extern void swap_setup(void);   * lru_cache_add: add a page to the page lists   * @page: the page to add   */ -static inline void lru_cache_add(struct page *page) +static inline void lru_cache_add_anon(struct page *page)  { -	__lru_cache_add(page, LRU_INACTIVE); +	__lru_cache_add(page, LRU_INACTIVE_ANON);  } -static inline void lru_cache_add_active(struct page *page) +static inline void lru_cache_add_active_anon(struct page *page)  { -	__lru_cache_add(page, LRU_ACTIVE); +	__lru_cache_add(page, LRU_ACTIVE_ANON); +} + +static inline void lru_cache_add_file(struct page *page) +{ +	__lru_cache_add(page, LRU_INACTIVE_FILE); +} + +static inline void lru_cache_add_active_file(struct page *page) +{ +	__lru_cache_add(page, LRU_ACTIVE_FILE);  }  /* linux/mm/vmscan.c */ @@ -199,7 +209,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,  					gfp_t gfp_mask);  extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,  							gfp_t gfp_mask); -extern int __isolate_lru_page(struct page *page, int mode); +extern int __isolate_lru_page(struct page *page, int mode, int file);  extern unsigned long shrink_all_memory(unsigned long nr_pages);  extern int vm_swappiness;  extern int remove_mapping(struct address_space *mapping, struct page *page); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 58334d43951..ff5179f2b15 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -159,6 +159,16 @@ static inline unsigned long zone_page_state(struct zone *zone,  	return x;  } +extern unsigned long global_lru_pages(void); + +static inline unsigned long zone_lru_pages(struct zone *zone) +{ +	return (zone_page_state(zone, NR_ACTIVE_ANON) +		+ zone_page_state(zone, NR_ACTIVE_FILE) +		+ zone_page_state(zone, NR_INACTIVE_ANON) +		+ zone_page_state(zone, NR_INACTIVE_FILE)); +} +  #ifdef CONFIG_NUMA  /*   * Determine the per node value of a stat item. This function  |