diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 94 | 
1 files changed, 88 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c index 2c3a2e0394d..e7667a3584b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -87,7 +87,6 @@   */  #include	<linux/slab.h> -#include	"slab.h"  #include	<linux/mm.h>  #include	<linux/poison.h>  #include	<linux/swap.h> @@ -128,6 +127,8 @@  #include	"internal.h" +#include	"slab.h" +  /*   * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.   *		  0 for faster, smaller code (especially in the critical paths). @@ -641,6 +642,26 @@ static void init_node_lock_keys(int q)  	}  } +static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) +{ +	struct kmem_list3 *l3; +	l3 = cachep->nodelists[q]; +	if (!l3) +		return; + +	slab_set_lock_classes(cachep, &on_slab_l3_key, +			&on_slab_alc_key, q); +} + +static inline void on_slab_lock_classes(struct kmem_cache *cachep) +{ +	int node; + +	VM_BUG_ON(OFF_SLAB(cachep)); +	for_each_node(node) +		on_slab_lock_classes_node(cachep, node); +} +  static inline void init_lock_keys(void)  {  	int node; @@ -657,6 +678,14 @@ static inline void init_lock_keys(void)  {  } +static inline void on_slab_lock_classes(struct kmem_cache *cachep) +{ +} + +static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node) +{ +} +  static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)  {  } @@ -1385,6 +1414,9 @@ static int __cpuinit cpuup_prepare(long cpu)  		free_alien_cache(alien);  		if (cachep->flags & SLAB_DEBUG_OBJECTS)  			slab_set_debugobj_lock_classes_node(cachep, node); +		else if (!OFF_SLAB(cachep) && +			 !(cachep->flags & SLAB_DESTROY_BY_RCU)) +			on_slab_lock_classes_node(cachep, node);  	}  	init_node_lock_keys(node); @@ -1863,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)  		if (page->pfmemalloc)  			SetPageSlabPfmemalloc(page + i);  	} +	memcg_bind_pages(cachep, cachep->gfporder);  	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {  		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); @@ -1899,9 +1932,11 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)  		__ClearPageSlab(page);  		page++;  	} + +	memcg_release_pages(cachep, cachep->gfporder);  	if (current->reclaim_state)  		current->reclaim_state->reclaimed_slab += nr_freed; -	free_pages((unsigned long)addr, cachep->gfporder); +	free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);  }  static void kmem_rcu_free(struct rcu_head *head) @@ -2489,7 +2524,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)  		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);  		slab_set_debugobj_lock_classes(cachep); -	} +	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU)) +		on_slab_lock_classes(cachep);  	return 0;  } @@ -3453,6 +3489,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,  	if (slab_should_failslab(cachep, flags))  		return NULL; +	cachep = memcg_kmem_get_cache(cachep, flags); +  	cache_alloc_debugcheck_before(cachep, flags);  	local_irq_save(save_flags); @@ -3538,6 +3576,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)  	if (slab_should_failslab(cachep, flags))  		return NULL; +	cachep = memcg_kmem_get_cache(cachep, flags); +  	cache_alloc_debugcheck_before(cachep, flags);  	local_irq_save(save_flags);  	objp = __do_cache_alloc(cachep, flags); @@ -3851,6 +3891,9 @@ EXPORT_SYMBOL(__kmalloc);  void kmem_cache_free(struct kmem_cache *cachep, void *objp)  {  	unsigned long flags; +	cachep = cache_from_obj(cachep, objp); +	if (!cachep) +		return;  	local_irq_save(flags);  	debug_check_no_locks_freed(objp, cachep->object_size); @@ -3998,7 +4041,7 @@ static void do_ccupdate_local(void *info)  }  /* Always called with the slab_mutex held */ -static int do_tune_cpucache(struct kmem_cache *cachep, int limit, +static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,  				int batchcount, int shared, gfp_t gfp)  {  	struct ccupdate_struct *new; @@ -4041,12 +4084,49 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,  	return alloc_kmemlist(cachep, gfp);  } +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, +				int batchcount, int shared, gfp_t gfp) +{ +	int ret; +	struct kmem_cache *c = NULL; +	int i = 0; + +	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); + +	if (slab_state < FULL) +		return ret; + +	if ((ret < 0) || !is_root_cache(cachep)) +		return ret; + +	VM_BUG_ON(!mutex_is_locked(&slab_mutex)); +	for_each_memcg_cache_index(i) { +		c = cache_from_memcg(cachep, i); +		if (c) +			/* return value determined by the parent cache only */ +			__do_tune_cpucache(c, limit, batchcount, shared, gfp); +	} + +	return ret; +} +  /* Called with slab_mutex held always */  static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)  {  	int err; -	int limit, shared; +	int limit = 0; +	int shared = 0; +	int batchcount = 0; + +	if (!is_root_cache(cachep)) { +		struct kmem_cache *root = memcg_root_cache(cachep); +		limit = root->limit; +		shared = root->shared; +		batchcount = root->batchcount; +	} +	if (limit && shared && batchcount) +		goto skip_setup;  	/*  	 * The head array serves three purposes:  	 * - create a LIFO ordering, i.e. return objects that are cache-warm @@ -4088,7 +4168,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)  	if (limit > 32)  		limit = 32;  #endif -	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); +	batchcount = (limit + 1) / 2; +skip_setup: +	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);  	if (err)  		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",  		       cachep->name, -err);  |