diff options
| -rw-r--r-- | include/linux/mempolicy.h | 2 | ||||
| -rw-r--r-- | mm/mempolicy.c | 68 | 
2 files changed, 49 insertions, 21 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 92bc9988a18..0d7df39a588 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -123,7 +123,7 @@ struct sp_node {  struct shared_policy {  	struct rb_root root; -	struct mutex mutex; +	spinlock_t lock;  };  void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1cb200af382..e2df1c1fb41 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2132,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)   */  /* lookup first element intersecting start-end */ -/* Caller holds sp->mutex */ +/* Caller holds sp->lock */  static struct sp_node *  sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)  { @@ -2196,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)  	if (!sp->root.rb_node)  		return NULL; -	mutex_lock(&sp->mutex); +	spin_lock(&sp->lock);  	sn = sp_lookup(sp, idx, idx+1);  	if (sn) {  		mpol_get(sn->policy);  		pol = sn->policy;  	} -	mutex_unlock(&sp->mutex); +	spin_unlock(&sp->lock);  	return pol;  } @@ -2328,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)  	sp_free(n);  } +static void sp_node_init(struct sp_node *node, unsigned long start, +			unsigned long end, struct mempolicy *pol) +{ +	node->start = start; +	node->end = end; +	node->policy = pol; +} +  static struct sp_node *sp_alloc(unsigned long start, unsigned long end,  				struct mempolicy *pol)  { @@ -2344,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,  		return NULL;  	}  	newpol->flags |= MPOL_F_SHARED; - -	n->start = start; -	n->end = end; -	n->policy = newpol; +	sp_node_init(n, start, end, newpol);  	return n;  } @@ -2357,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,  				 unsigned long end, struct sp_node *new)  {  	struct sp_node *n; +	struct sp_node *n_new = NULL; +	struct mempolicy *mpol_new = NULL;  	int ret = 0; -	mutex_lock(&sp->mutex); +restart: +	spin_lock(&sp->lock);  	n = sp_lookup(sp, start, end);  	/* Take care of old policies in the same range. */  	while (n && n->start < end) { @@ -2372,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,  		} else {  			/* Old policy spanning whole new range. */  			if (n->end > end) { -				struct sp_node *new2; -				new2 = sp_alloc(end, n->end, n->policy); -				if (!new2) { -					ret = -ENOMEM; -					goto out; -				} +				if (!n_new) +					goto alloc_new; + +				*mpol_new = *n->policy; +				atomic_set(&mpol_new->refcnt, 1); +				sp_node_init(n_new, n->end, end, mpol_new); +				sp_insert(sp, n_new);  				n->end = start; -				sp_insert(sp, new2); +				n_new = NULL; +				mpol_new = NULL;  				break;  			} else  				n->end = start; @@ -2390,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,  	}  	if (new)  		sp_insert(sp, new); -out: -	mutex_unlock(&sp->mutex); +	spin_unlock(&sp->lock); +	ret = 0; + +err_out: +	if (mpol_new) +		mpol_put(mpol_new); +	if (n_new) +		kmem_cache_free(sn_cache, n_new); +  	return ret; + +alloc_new: +	spin_unlock(&sp->lock); +	ret = -ENOMEM; +	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); +	if (!n_new) +		goto err_out; +	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); +	if (!mpol_new) +		goto err_out; +	goto restart;  }  /** @@ -2410,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)  	int ret;  	sp->root = RB_ROOT;		/* empty tree == default mempolicy */ -	mutex_init(&sp->mutex); +	spin_lock_init(&sp->lock);  	if (mpol) {  		struct vm_area_struct pvma; @@ -2476,14 +2504,14 @@ void mpol_free_shared_policy(struct shared_policy *p)  	if (!p->root.rb_node)  		return; -	mutex_lock(&p->mutex); +	spin_lock(&p->lock);  	next = rb_first(&p->root);  	while (next) {  		n = rb_entry(next, struct sp_node, nd);  		next = rb_next(&n->nd);  		sp_delete(p, n);  	} -	mutex_unlock(&p->mutex); +	spin_unlock(&p->lock);  }  #ifdef CONFIG_NUMA_BALANCING  |