diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2011-06-10 14:46:48 +0200 | 
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2011-06-10 14:46:57 +0200 | 
| commit | 5be5758c114b18260c6fd4c8373bf89e39b0fe82 (patch) | |
| tree | 54390f904df6ff11e570f764c444356cf2709fda /kernel/sched.c | |
| parent | 71f66a6580c4e42df377bebbcca5c72661a40700 (diff) | |
| parent | 7f45e5cd1718ed769295033ca214032848a0097d (diff) | |
| download | olio-linux-3.10-5be5758c114b18260c6fd4c8373bf89e39b0fe82.tar.xz olio-linux-3.10-5be5758c114b18260c6fd4c8373bf89e39b0fe82.zip  | |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply patches against new
code I have in queue.
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 127 | 
1 files changed, 67 insertions, 60 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2d12893b8b0..3f2e502d609 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)  /*   * Return the group to which this tasks belongs.   * - * We use task_subsys_state_check() and extend the RCU verification - * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() - * holds that lock for each task it moves into the cgroup. Therefore - * by holding that lock, we pin the task to the current cgroup. + * We use task_subsys_state_check() and extend the RCU verification with + * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each + * task it moves into the cgroup. Therefore by holding either of those locks, + * we pin the task to the current cgroup.   */  static inline struct task_group *task_group(struct task_struct *p)  { @@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)  	struct cgroup_subsys_state *css;  	css = task_subsys_state_check(p, cpu_cgroup_subsys_id, -			lockdep_is_held(&p->pi_lock)); +			lockdep_is_held(&p->pi_lock) || +			lockdep_is_held(&task_rq(p)->lock));  	tg = container_of(css, struct task_group, css);  	return autogroup_task_group(p, tg); @@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)  			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));  #ifdef CONFIG_LOCKDEP +	/* +	 * The caller should hold either p->pi_lock or rq->lock, when changing +	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. +	 * +	 * sched_move_task() holds both and thus holding either pins the cgroup, +	 * see set_task_rq(). +	 * +	 * Furthermore, all task_rq users should acquire both locks, see +	 * task_rq_lock(). +	 */  	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||  				      lockdep_is_held(&task_rq(p)->lock)));  #endif @@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)  		}  		rcu_read_unlock();  	} + +	if (wake_flags & WF_MIGRATED) +		schedstat_inc(p, se.statistics.nr_wakeups_migrate); +  #endif /* CONFIG_SMP */  	schedstat_inc(rq, ttwu_count); @@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)  	if (wake_flags & WF_SYNC)  		schedstat_inc(p, se.statistics.nr_wakeups_sync); -	if (cpu != task_cpu(p)) -		schedstat_inc(p, se.statistics.nr_wakeups_migrate); -  #endif /* CONFIG_SCHEDSTATS */  } @@ -2573,7 +2585,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)  	if (!next)  		smp_send_reschedule(cpu);  } -#endif + +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW +static int ttwu_activate_remote(struct task_struct *p, int wake_flags) +{ +	struct rq *rq; +	int ret = 0; + +	rq = __task_rq_lock(p); +	if (p->on_cpu) { +		ttwu_activate(rq, p, ENQUEUE_WAKEUP); +		ttwu_do_wakeup(rq, p, wake_flags); +		ret = 1; +	} +	__task_rq_unlock(rq); + +	return ret; + +} +#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ +#endif /* CONFIG_SMP */  static void ttwu_queue(struct task_struct *p, int cpu)  { @@ -2581,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)  #if defined(CONFIG_SMP)  	if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { +		sched_clock_cpu(cpu); /* sync clocks x-cpu */  		ttwu_queue_remote(p, cpu);  		return;  	} @@ -2631,17 +2663,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)  	while (p->on_cpu) {  #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW  		/* -		 * If called from interrupt context we could have landed in the -		 * middle of schedule(), in this case we should take care not -		 * to spin on ->on_cpu if p is current, since that would -		 * deadlock. +		 * In case the architecture enables interrupts in +		 * context_switch(), we cannot busy wait, since that +		 * would lead to deadlocks when an interrupt hits and +		 * tries to wake up @prev. So bail and do a complete +		 * remote wakeup.  		 */ -		if (p == current) { -			ttwu_queue(p, cpu); +		if (ttwu_activate_remote(p, wake_flags))  			goto stat; -		} -#endif +#else  		cpu_relax(); +#endif  	}  	/*  	 * Pairs with the smp_wmb() in finish_lock_switch(). @@ -2655,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)  		p->sched_class->task_waking(p);  	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); -	if (task_cpu(p) != cpu) +	if (task_cpu(p) != cpu) { +		wake_flags |= WF_MIGRATED;  		set_task_cpu(p, cpu); +	}  #endif /* CONFIG_SMP */  	ttwu_queue(p, cpu); @@ -5841,7 +5875,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)  	idle->state = TASK_RUNNING;  	idle->se.exec_start = sched_clock(); -	cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); +	do_set_cpus_allowed(idle, cpumask_of(cpu));  	/*  	 * We're having a chicken and egg problem, even though we are  	 * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -5929,6 +5963,16 @@ static inline void sched_init_granularity(void)  }  #ifdef CONFIG_SMP +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ +	if (p->sched_class && p->sched_class->set_cpus_allowed) +		p->sched_class->set_cpus_allowed(p, new_mask); +	else { +		cpumask_copy(&p->cpus_allowed, new_mask); +		p->rt.nr_cpus_allowed = cpumask_weight(new_mask); +	} +} +  /*   * This is how migration works:   * @@ -5974,12 +6018,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)  		goto out;  	} -	if (p->sched_class->set_cpus_allowed) -		p->sched_class->set_cpus_allowed(p, new_mask); -	else { -		cpumask_copy(&p->cpus_allowed, new_mask); -		p->rt.nr_cpus_allowed = cpumask_weight(new_mask); -	} +	do_set_cpus_allowed(p, new_mask);  	/* Can the task run on the task's current CPU? If so, we're done */  	if (cpumask_test_cpu(task_cpu(p), new_mask)) @@ -8764,42 +8803,10 @@ cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)  	return 0;  } -static int -cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, -		      struct task_struct *tsk, bool threadgroup) -{ -	int retval = cpu_cgroup_can_attach_task(cgrp, tsk); -	if (retval) -		return retval; -	if (threadgroup) { -		struct task_struct *c; -		rcu_read_lock(); -		list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { -			retval = cpu_cgroup_can_attach_task(cgrp, c); -			if (retval) { -				rcu_read_unlock(); -				return retval; -			} -		} -		rcu_read_unlock(); -	} -	return 0; -} -  static void -cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, -		  struct cgroup *old_cont, struct task_struct *tsk, -		  bool threadgroup) +cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)  {  	sched_move_task(tsk); -	if (threadgroup) { -		struct task_struct *c; -		rcu_read_lock(); -		list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { -			sched_move_task(c); -		} -		rcu_read_unlock(); -	}  }  static void @@ -8887,8 +8894,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {  	.name		= "cpu",  	.create		= cpu_cgroup_create,  	.destroy	= cpu_cgroup_destroy, -	.can_attach	= cpu_cgroup_can_attach, -	.attach		= cpu_cgroup_attach, +	.can_attach_task = cpu_cgroup_can_attach_task, +	.attach_task	= cpu_cgroup_attach_task,  	.exit		= cpu_cgroup_exit,  	.populate	= cpu_cgroup_populate,  	.subsys_id	= cpu_cgroup_subsys_id,  |