diff options
| -rw-r--r-- | mm/mempolicy.c | 36 | 
1 files changed, 22 insertions, 14 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e2df1c1fb41..6f7979c566d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -161,19 +161,7 @@ static const struct mempolicy_operations {  /* Check that the nodemask contains at least one populated zone */  static int is_valid_nodemask(const nodemask_t *nodemask)  { -	int nd, k; - -	for_each_node_mask(nd, *nodemask) { -		struct zone *z; - -		for (k = 0; k <= policy_zone; k++) { -			z = &NODE_DATA(nd)->node_zones[k]; -			if (z->present_pages > 0) -				return 1; -		} -	} - -	return 0; +	return nodes_intersects(*nodemask, node_states[N_MEMORY]);  }  static inline int mpol_store_user_nodemask(const struct mempolicy *pol) @@ -1644,6 +1632,26 @@ struct mempolicy *get_vma_policy(struct task_struct *task,  	return pol;  } +static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) +{ +	enum zone_type dynamic_policy_zone = policy_zone; + +	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); + +	/* +	 * if policy->v.nodes has movable memory only, +	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. +	 * +	 * policy->v.nodes is intersect with node_states[N_MEMORY]. +	 * so if the following test faile, it implies +	 * policy->v.nodes has movable memory only. +	 */ +	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) +		dynamic_policy_zone = ZONE_MOVABLE; + +	return zone >= dynamic_policy_zone; +} +  /*   * Return a nodemask representing a mempolicy for filtering nodes for   * page allocation @@ -1652,7 +1660,7 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)  {  	/* Lower zones don't get a nodemask applied for MPOL_BIND */  	if (unlikely(policy->mode == MPOL_BIND) && -			gfp_zone(gfp) >= policy_zone && +			apply_policy_zone(policy, gfp_zone(gfp)) &&  			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))  		return &policy->v.nodes;  |