diff options
| author | James Morris <james.l.morris@oracle.com> | 2012-05-04 12:46:40 +1000 | 
|---|---|---|
| committer | James Morris <james.l.morris@oracle.com> | 2012-05-04 12:46:40 +1000 | 
| commit | 898bfc1d46bd76f8ea2a0fbd239dd2073efe2aa3 (patch) | |
| tree | e6e666085abe674dbf6292555961fe0a0f2e2d2f /kernel/sched/fair.c | |
| parent | 08162e6a23d476544adfe1164afe9ea8b34ab859 (diff) | |
| parent | 69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff) | |
| download | olio-linux-3.10-898bfc1d46bd76f8ea2a0fbd239dd2073efe2aa3.tar.xz olio-linux-3.10-898bfc1d46bd76f8ea2a0fbd239dd2073efe2aa3.zip  | |
Merge tag 'v3.4-rc5' into next
Linux 3.4-rc5
Merge to pull in prerequisite change for Smack:
86812bb0de1a3758dc6c7aa01a763158a7c0638a
Requested by Casey.
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 18 | 
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0d97ebdc58f..e9553640c1c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)  		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);  #ifdef CONFIG_SMP  	if (entity_is_task(se)) -		list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); +		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);  #endif  	cfs_rq->nr_running++;  } @@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)  static unsigned long task_h_load(struct task_struct *p); +static const unsigned int sched_nr_migrate_break = 32; +  /*   * move_tasks tries to move up to load_move weighted load from busiest to   * this_rq, as part of a balancing operation within domain "sd". @@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)  		/* take a breather every nr_migrate tasks */  		if (env->loop > env->loop_break) { -			env->loop_break += sysctl_sched_nr_migrate; +			env->loop_break += sched_nr_migrate_break;  			env->flags |= LBF_NEED_BREAK;  			break;  		} @@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)  		load = task_h_load(p); -		if (load < 16 && !env->sd->nr_balance_failed) +		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)  			goto next;  		if ((load / 2) > env->load_move) @@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,  		.dst_cpu	= this_cpu,  		.dst_rq		= this_rq,  		.idle		= idle, -		.loop_break	= sysctl_sched_nr_migrate, +		.loop_break	= sched_nr_migrate_break,  	};  	cpumask_copy(cpus, cpu_active_mask); @@ -4445,10 +4447,10 @@ redo:  		 * correctly treated as an imbalance.  		 */  		env.flags |= LBF_ALL_PINNED; -		env.load_move = imbalance; -		env.src_cpu = busiest->cpu; -		env.src_rq = busiest; -		env.loop_max = busiest->nr_running; +		env.load_move	= imbalance; +		env.src_cpu	= busiest->cpu; +		env.src_rq	= busiest; +		env.loop_max	= min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);  more_balance:  		local_irq_save(flags);  |