diff options
| -rw-r--r-- | include/linux/poison.h | 4 | ||||
| -rw-r--r-- | mm/slab.c | 42 | 
2 files changed, 25 insertions, 21 deletions
diff --git a/include/linux/poison.h b/include/linux/poison.h index 95f518b1768..d93c300a344 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -15,8 +15,8 @@   * Magic nums for obj red zoning.   * Placed in the first word before and the first word after an obj.   */ -#define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */ -#define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */ +#define	RED_INACTIVE	0x09F911029D74E35BULL	/* when obj is inactive */ +#define	RED_ACTIVE	0xD84156C5635688C0ULL	/* when obj is active */  #define SLUB_RED_INACTIVE	0xbb  #define SLUB_RED_ACTIVE		0xcc diff --git a/mm/slab.c b/mm/slab.c index 5920a412b37..1115e2065bf 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -148,10 +148,11 @@   * Usually, the kmalloc caches are cache_line_size() aligned, except when   * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.   * Some archs want to perform DMA into kmalloc caches and need a guaranteed - * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. - * Note that this flag disables some debug features. + * alignment larger than the alignment of a 64-bit integer. + * ARCH_KMALLOC_MINALIGN allows that. + * Note that increasing this value may disable some debug features.   */ -#define ARCH_KMALLOC_MINALIGN 0 +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)  #endif  #ifndef ARCH_SLAB_MINALIGN @@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep)  	return cachep->obj_size;  } -static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) +static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)  {  	BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); -	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); +	return (unsigned long long*) (objp + obj_offset(cachep) - +				      sizeof(unsigned long long));  } -static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) +static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)  {  	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));  	if (cachep->flags & SLAB_STORE_USER) -		return (unsigned long *)(objp + cachep->buffer_size - -					 2 * BYTES_PER_WORD); -	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); +		return (unsigned long long *)(objp + cachep->buffer_size - +					      sizeof(unsigned long long) - +					      BYTES_PER_WORD); +	return (unsigned long long *) (objp + cachep->buffer_size - +				       sizeof(unsigned long long));  }  static void **dbg_userword(struct kmem_cache *cachep, void *objp) @@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)  #define obj_offset(x)			0  #define obj_size(cachep)		(cachep->buffer_size) -#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;}) -#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;}) +#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;}) +#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})  #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})  #endif @@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)  	char *realobj;  	if (cachep->flags & SLAB_RED_ZONE) { -		printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", +		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",  			*dbg_redzone1(cachep, objp),  			*dbg_redzone2(cachep, objp));  	} @@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,  	 * is greater than BYTES_PER_WORD.  	 */  	if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER) -		ralign = BYTES_PER_WORD; +		ralign = __alignof__(unsigned long long);  	/* 2) arch mandated alignment */  	if (ralign < ARCH_SLAB_MINALIGN) { @@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,  		ralign = align;  	}  	/* disable debug if necessary */ -	if (ralign > BYTES_PER_WORD) +	if (ralign > __alignof__(unsigned long long))  		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);  	/*  	 * 4) Store it. @@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,  	 */  	if (flags & SLAB_RED_ZONE) {  		/* add space for red zone words */ -		cachep->obj_offset += BYTES_PER_WORD; -		size += 2 * BYTES_PER_WORD; +		cachep->obj_offset += sizeof(unsigned long long); +		size += 2 * sizeof(unsigned long long);  	}  	if (flags & SLAB_STORE_USER) {  		/* user store requires one word storage behind the end of @@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp)  static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)  { -	unsigned long redzone1, redzone2; +	unsigned long long redzone1, redzone2;  	redzone1 = *dbg_redzone1(cache, obj);  	redzone2 = *dbg_redzone2(cache, obj); @@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)  	else  		slab_error(cache, "memory outside object was overwritten"); -	printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n", +	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",  			obj, redzone1, redzone2);  } @@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,  			slab_error(cachep, "double free, or memory outside"  						" object was overwritten");  			printk(KERN_ERR -				"%p: redzone 1:0x%lx, redzone 2:0x%lx\n", +				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",  				objp, *dbg_redzone1(cachep, objp),  				*dbg_redzone2(cachep, objp));  		}  |