diff options
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/auto_group.c | 3 | ||||
| -rw-r--r-- | kernel/sched/core.c | 49 | ||||
| -rw-r--r-- | kernel/sched/debug.c | 7 | 
3 files changed, 42 insertions, 17 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 0984a21076a..64de5f8b0c9 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -35,6 +35,7 @@ static inline void autogroup_destroy(struct kref *kref)  	ag->tg->rt_se = NULL;  	ag->tg->rt_rq = NULL;  #endif +	sched_offline_group(ag->tg);  	sched_destroy_group(ag->tg);  } @@ -76,6 +77,8 @@ static inline struct autogroup *autogroup_create(void)  	if (IS_ERR(tg))  		goto out_free; +	sched_online_group(tg, &root_task_group); +  	kref_init(&ag->kref);  	init_rwsem(&ag->lock);  	ag->id = atomic_inc_return(&autogroup_seq_nr); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03d7784b7bd..3a673a3b0c6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7161,7 +7161,6 @@ static void free_sched_group(struct task_group *tg)  struct task_group *sched_create_group(struct task_group *parent)  {  	struct task_group *tg; -	unsigned long flags;  	tg = kzalloc(sizeof(*tg), GFP_KERNEL);  	if (!tg) @@ -7173,6 +7172,17 @@ struct task_group *sched_create_group(struct task_group *parent)  	if (!alloc_rt_sched_group(tg, parent))  		goto err; +	return tg; + +err: +	free_sched_group(tg); +	return ERR_PTR(-ENOMEM); +} + +void sched_online_group(struct task_group *tg, struct task_group *parent) +{ +	unsigned long flags; +  	spin_lock_irqsave(&task_group_lock, flags);  	list_add_rcu(&tg->list, &task_groups); @@ -7182,12 +7192,6 @@ struct task_group *sched_create_group(struct task_group *parent)  	INIT_LIST_HEAD(&tg->children);  	list_add_rcu(&tg->siblings, &parent->children);  	spin_unlock_irqrestore(&task_group_lock, flags); - -	return tg; - -err: -	free_sched_group(tg); -	return ERR_PTR(-ENOMEM);  }  /* rcu callback to free various structures associated with a task group */ @@ -7200,6 +7204,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)  /* Destroy runqueue etc associated with a task group */  void sched_destroy_group(struct task_group *tg)  { +	/* wait for possible concurrent references to cfs_rqs complete */ +	call_rcu(&tg->rcu, free_sched_group_rcu); +} + +void sched_offline_group(struct task_group *tg) +{  	unsigned long flags;  	int i; @@ -7211,9 +7221,6 @@ void sched_destroy_group(struct task_group *tg)  	list_del_rcu(&tg->list);  	list_del_rcu(&tg->siblings);  	spin_unlock_irqrestore(&task_group_lock, flags); - -	/* wait for possible concurrent references to cfs_rqs complete */ -	call_rcu(&tg->rcu, free_sched_group_rcu);  }  /* change task's runqueue when it moves between groups. @@ -7584,6 +7591,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)  	return &tg->css;  } +static int cpu_cgroup_css_online(struct cgroup *cgrp) +{ +	struct task_group *tg = cgroup_tg(cgrp); +	struct task_group *parent; + +	if (!cgrp->parent) +		return 0; + +	parent = cgroup_tg(cgrp->parent); +	sched_online_group(tg, parent); +	return 0; +} +  static void cpu_cgroup_css_free(struct cgroup *cgrp)  {  	struct task_group *tg = cgroup_tg(cgrp); @@ -7591,6 +7611,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)  	sched_destroy_group(tg);  } +static void cpu_cgroup_css_offline(struct cgroup *cgrp) +{ +	struct task_group *tg = cgroup_tg(cgrp); + +	sched_offline_group(tg); +} +  static int cpu_cgroup_can_attach(struct cgroup *cgrp,  				 struct cgroup_taskset *tset)  { @@ -7946,6 +7973,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {  	.name		= "cpu",  	.css_alloc	= cpu_cgroup_css_alloc,  	.css_free	= cpu_cgroup_css_free, +	.css_online	= cpu_cgroup_css_online, +	.css_offline	= cpu_cgroup_css_offline,  	.can_attach	= cpu_cgroup_can_attach,  	.attach		= cpu_cgroup_attach,  	.exit		= cpu_cgroup_exit, diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 7ae4c4c5420..557e7b53b32 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -110,13 +110,6 @@ static char *task_group_path(struct task_group *tg)  	if (autogroup_path(tg, group_path, PATH_MAX))  		return group_path; -	/* -	 * May be NULL if the underlying cgroup isn't fully-created yet -	 */ -	if (!tg->css.cgroup) { -		group_path[0] = '\0'; -		return group_path; -	}  	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);  	return group_path;  }  |