diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 23 | 
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c index 80848cd3901..8c691fa1cf3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)  	inc_slabs_node(s, page_to_nid(page), page->objects);  	page->slab = s; -	page->flags |= 1 << PG_slab; +	__SetPageSlab(page);  	start = page_address(page); @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,  		freelist = page->freelist;  		counters = page->counters;  		new.counters = counters; -		if (mode) +		if (mode) {  			new.inuse = page->objects; +			new.freelist = NULL; +		} else { +			new.freelist = freelist; +		}  		VM_BUG_ON(new.frozen);  		new.frozen = 1;  	} while (!__cmpxchg_double_slab(s, page,  			freelist, counters, -			NULL, new.counters, +			new.freelist, new.counters,  			"lock and freeze"));  	remove_partial(n, page); @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,  			object = t;  			available =  page->objects - page->inuse;  		} else { -			page->freelist = t;  			available = put_cpu_partial(s, page, 0);  			stat(s, CPU_PARTIAL_NODE);  		} @@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,  /*   * Get a page from somewhere. Search in increasing NUMA distances.   */ -static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, +static void *get_any_partial(struct kmem_cache *s, gfp_t flags,  		struct kmem_cache_cpu *c)  {  #ifdef CONFIG_NUMA @@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,  }  static void -init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) +init_kmem_cache_node(struct kmem_cache_node *n)  {  	n->nr_partial = 0;  	spin_lock_init(&n->list_lock); @@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)  	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);  	init_tracking(kmem_cache_node, n);  #endif -	init_kmem_cache_node(n, kmem_cache_node); +	init_kmem_cache_node(n);  	inc_slabs_node(kmem_cache_node, node, page->objects);  	add_partial(n, page, DEACTIVATE_TO_HEAD); @@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)  		}  		s->node[node] = n; -		init_kmem_cache_node(n, s); +		init_kmem_cache_node(n);  	}  	return 1;  } @@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)  			ret = -ENOMEM;  			goto out;  		} -		init_kmem_cache_node(n, s); +		init_kmem_cache_node(n);  		s->node[nid] = n;  	}  out: @@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,  			}  			return s;  		} -		kfree(n);  		kfree(s);  	} +	kfree(n);  err:  	up_write(&slub_lock);  |