diff options
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 29 | 
1 files changed, 10 insertions, 19 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234..7a33e5986fc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)  	}  	/* ensure we never gain time by being placed backwards. */ -	vruntime = max_vruntime(se->vruntime, vruntime); - -	se->vruntime = vruntime; +	se->vruntime = max_vruntime(se->vruntime, vruntime);  }  static void check_enqueue_throttle(struct cfs_rq *cfs_rq); @@ -2663,7 +2661,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)  	hrtimer_cancel(&cfs_b->slack_timer);  } -static void unthrottle_offline_cfs_rqs(struct rq *rq) +static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)  {  	struct cfs_rq *cfs_rq; @@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)   */  static int select_idle_sibling(struct task_struct *p, int target)  { -	int cpu = smp_processor_id(); -	int prev_cpu = task_cpu(p);  	struct sched_domain *sd;  	struct sched_group *sg; -	int i; +	int i = task_cpu(p); -	/* -	 * If the task is going to be woken-up on this cpu and if it is -	 * already idle, then it is the right target. -	 */ -	if (target == cpu && idle_cpu(cpu)) -		return cpu; +	if (idle_cpu(target)) +		return target;  	/* -	 * If the task is going to be woken-up on the cpu where it previously -	 * ran and if it is currently idle, then it the right target. +	 * If the prevous cpu is cache affine and idle, don't be stupid.  	 */ -	if (target == prev_cpu && idle_cpu(prev_cpu)) -		return prev_cpu; +	if (i != target && cpus_share_cache(i, target) && idle_cpu(i)) +		return i;  	/*  	 * Otherwise, iterate the domains and find an elegible idle cpu. @@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target)  				goto next;  			for_each_cpu(i, sched_group_cpus(sg)) { -				if (!idle_cpu(i)) +				if (i == target || !idle_cpu(i))  					goto next;  			} @@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task  	 * idle runqueue:  	 */  	if (rq->cfs.load.weight) -		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); +		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));  	return rr_interval;  }  |