diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 9 | 
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 536b213f0ce..5d011ef4c0d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)  	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.  	 *  	 * sched_move_task() holds both and thus holding either pins the cgroup, -	 * see set_task_rq(). +	 * see task_group().  	 *  	 * Furthermore, all task_rq users should acquire both locks, see  	 * task_rq_lock(). @@ -7658,6 +7658,7 @@ void sched_destroy_group(struct task_group *tg)   */  void sched_move_task(struct task_struct *tsk)  { +	struct task_group *tg;  	int on_rq, running;  	unsigned long flags;  	struct rq *rq; @@ -7672,6 +7673,12 @@ void sched_move_task(struct task_struct *tsk)  	if (unlikely(running))  		tsk->sched_class->put_prev_task(rq, tsk); +	tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id, +				lockdep_is_held(&tsk->sighand->siglock)), +			  struct task_group, css); +	tg = autogroup_task_group(tsk, tg); +	tsk->sched_task_group = tg; +  #ifdef CONFIG_FAIR_GROUP_SCHED  	if (tsk->sched_class->task_move_group)  		tsk->sched_class->task_move_group(tsk, on_rq);  |