diff options
Diffstat (limited to 'mm/slab_common.c')
| -rw-r--r-- | mm/slab_common.c | 174 | 
1 files changed, 173 insertions, 1 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 3f3cd97d3fd..d2517b05d5b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -299,7 +299,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz  	err = __kmem_cache_create(s, flags);  	if (err) -		panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n", +		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",  					name, size, err);  	s->refcount = -1;	/* Exempt from merging for now */ @@ -319,6 +319,178 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,  	return s;  } +struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +EXPORT_SYMBOL(kmalloc_caches); + +#ifdef CONFIG_ZONE_DMA +struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; +EXPORT_SYMBOL(kmalloc_dma_caches); +#endif + +/* + * Conversion table for small slabs sizes / 8 to the index in the + * kmalloc array. This is necessary for slabs < 192 since we have non power + * of two cache sizes there. The size of larger slabs can be determined using + * fls. + */ +static s8 size_index[24] = { +	3,	/* 8 */ +	4,	/* 16 */ +	5,	/* 24 */ +	5,	/* 32 */ +	6,	/* 40 */ +	6,	/* 48 */ +	6,	/* 56 */ +	6,	/* 64 */ +	1,	/* 72 */ +	1,	/* 80 */ +	1,	/* 88 */ +	1,	/* 96 */ +	7,	/* 104 */ +	7,	/* 112 */ +	7,	/* 120 */ +	7,	/* 128 */ +	2,	/* 136 */ +	2,	/* 144 */ +	2,	/* 152 */ +	2,	/* 160 */ +	2,	/* 168 */ +	2,	/* 176 */ +	2,	/* 184 */ +	2	/* 192 */ +}; + +static inline int size_index_elem(size_t bytes) +{ +	return (bytes - 1) / 8; +} + +/* + * Find the kmem_cache structure that serves a given size of + * allocation + */ +struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) +{ +	int index; + +	if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) +		return NULL; + +	if (size <= 192) { +		if (!size) +			return ZERO_SIZE_PTR; + +		index = size_index[size_index_elem(size)]; +	} else +		index = fls(size - 1); + +#ifdef CONFIG_ZONE_DMA +	if (unlikely((flags & GFP_DMA))) +		return kmalloc_dma_caches[index]; + +#endif +	return kmalloc_caches[index]; +} + +/* + * Create the kmalloc array. Some of the regular kmalloc arrays + * may already have been created because they were needed to + * enable allocations for slab creation. + */ +void __init create_kmalloc_caches(unsigned long flags) +{ +	int i; + +	/* +	 * Patch up the size_index table if we have strange large alignment +	 * requirements for the kmalloc array. This is only the case for +	 * MIPS it seems. The standard arches will not generate any code here. +	 * +	 * Largest permitted alignment is 256 bytes due to the way we +	 * handle the index determination for the smaller caches. +	 * +	 * Make sure that nothing crazy happens if someone starts tinkering +	 * around with ARCH_KMALLOC_MINALIGN +	 */ +	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || +		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); + +	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { +		int elem = size_index_elem(i); + +		if (elem >= ARRAY_SIZE(size_index)) +			break; +		size_index[elem] = KMALLOC_SHIFT_LOW; +	} + +	if (KMALLOC_MIN_SIZE >= 64) { +		/* +		 * The 96 byte size cache is not used if the alignment +		 * is 64 byte. +		 */ +		for (i = 64 + 8; i <= 96; i += 8) +			size_index[size_index_elem(i)] = 7; + +	} + +	if (KMALLOC_MIN_SIZE >= 128) { +		/* +		 * The 192 byte sized cache is not used if the alignment +		 * is 128 byte. Redirect kmalloc to use the 256 byte cache +		 * instead. +		 */ +		for (i = 128 + 8; i <= 192; i += 8) +			size_index[size_index_elem(i)] = 8; +	} +	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { +		if (!kmalloc_caches[i]) { +			kmalloc_caches[i] = create_kmalloc_cache(NULL, +							1 << i, flags); + +			/* +			 * Caches that are not of the two-to-the-power-of size. +			 * These have to be created immediately after the +			 * earlier power of two caches +			 */ +			if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) +				kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); + +			if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) +				kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); +		} +	} + +	/* Kmalloc array is now usable */ +	slab_state = UP; + +	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { +		struct kmem_cache *s = kmalloc_caches[i]; +		char *n; + +		if (s) { +			n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i)); + +			BUG_ON(!n); +			s->name = n; +		} +	} + +#ifdef CONFIG_ZONE_DMA +	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { +		struct kmem_cache *s = kmalloc_caches[i]; + +		if (s) { +			int size = kmalloc_size(i); +			char *n = kasprintf(GFP_NOWAIT, +				 "dma-kmalloc-%d", size); + +			BUG_ON(!n); +			kmalloc_dma_caches[i] = create_kmalloc_cache(n, +				size, SLAB_CACHE_DMA | flags); +		} +	} +#endif +}  #endif /* !CONFIG_SLOB */  |