diff options
| author | Pekka Enberg <penberg@kernel.org> | 2013-05-07 09:19:47 +0300 | 
|---|---|---|
| committer | Pekka Enberg <penberg@kernel.org> | 2013-05-07 09:19:47 +0300 | 
| commit | 69df2ac1288b456a95aceadafbf88cd891a577c8 (patch) | |
| tree | 0f2e83a8c4bc826f12d3f3871ecc1d7be0c9e4e3 /mm/slab.h | |
| parent | c1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff) | |
| parent | 8a965b3baa89ffedc73c0fbc750006c631012ced (diff) | |
| download | olio-linux-3.10-69df2ac1288b456a95aceadafbf88cd891a577c8.tar.xz olio-linux-3.10-69df2ac1288b456a95aceadafbf88cd891a577c8.zip  | |
Merge branch 'slab/next' into slab/for-linus
Diffstat (limited to 'mm/slab.h')
| -rw-r--r-- | mm/slab.h | 43 | 
1 files changed, 42 insertions, 1 deletions
diff --git a/mm/slab.h b/mm/slab.h index 34a98d64219..f96b49e4704 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -16,7 +16,7 @@ enum slab_state {  	DOWN,			/* No slab functionality yet */  	PARTIAL,		/* SLUB: kmem_cache_node available */  	PARTIAL_ARRAYCACHE,	/* SLAB: kmalloc size for arraycache available */ -	PARTIAL_L3,		/* SLAB: kmalloc size for l3 struct available */ +	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */  	UP,			/* Slab caches usable but not all extras yet */  	FULL			/* Everything is working */  }; @@ -35,6 +35,15 @@ extern struct kmem_cache *kmem_cache;  unsigned long calculate_alignment(unsigned long flags,  		unsigned long align, unsigned long size); +#ifndef CONFIG_SLOB +/* Kmalloc array related functions */ +void create_kmalloc_caches(unsigned long); + +/* Find the kmalloc slab corresponding for a certain size */ +struct kmem_cache *kmalloc_slab(size_t, gfp_t); +#endif + +  /* Functions provided by the slab allocators */  extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); @@ -230,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)  	return s;  }  #endif + + +/* + * The slab lists for all objects. + */ +struct kmem_cache_node { +	spinlock_t list_lock; + +#ifdef CONFIG_SLAB +	struct list_head slabs_partial;	/* partial list first, better asm code */ +	struct list_head slabs_full; +	struct list_head slabs_free; +	unsigned long free_objects; +	unsigned int free_limit; +	unsigned int colour_next;	/* Per-node cache coloring */ +	struct array_cache *shared;	/* shared per node */ +	struct array_cache **alien;	/* on other nodes */ +	unsigned long next_reap;	/* updated without locking */ +	int free_touched;		/* updated without locking */ +#endif + +#ifdef CONFIG_SLUB +	unsigned long nr_partial; +	struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG +	atomic_long_t nr_slabs; +	atomic_long_t total_objects; +	struct list_head full; +#endif +#endif + +};  |