diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 17 | 
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 82cc839c921..50a21f96467 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -399,7 +399,7 @@ struct cfs_rq {  	 */  	struct sched_entity *curr, *next, *last; -	unsigned long nr_spread_over; +	unsigned int nr_spread_over;  #ifdef CONFIG_FAIR_GROUP_SCHED  	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */ @@ -969,6 +969,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)  	}  } +void task_rq_unlock_wait(struct task_struct *p) +{ +	struct rq *rq = task_rq(p); + +	smp_mb(); /* spin-unlock-wait is not a full memory barrier */ +	spin_unlock_wait(&rq->lock); +} +  static void __task_rq_unlock(struct rq *rq)  	__releases(rq->lock)  { @@ -6877,15 +6885,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)  	struct sched_domain *tmp;  	/* Remove the sched domains which do not contribute to scheduling. */ -	for (tmp = sd; tmp; tmp = tmp->parent) { +	for (tmp = sd; tmp; ) {  		struct sched_domain *parent = tmp->parent;  		if (!parent)  			break; +  		if (sd_parent_degenerate(tmp, parent)) {  			tmp->parent = parent->parent;  			if (parent->parent)  				parent->parent->child = tmp; -		} +		} else +			tmp = tmp->parent;  	}  	if (sd && sd_degenerate(sd)) { @@ -7674,6 +7684,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,  error:  	free_sched_groups(cpu_map, tmpmask);  	SCHED_CPUMASK_FREE((void *)allmasks); +	kfree(rd);  	return -ENOMEM;  #endif  }  |