diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 43 | 
1 files changed, 38 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a455dca884a..3c11ae0a948 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);   */  static DEFINE_SPINLOCK(task_group_lock); +#ifdef CONFIG_FAIR_GROUP_SCHED +  #ifdef CONFIG_SMP  static int root_task_group_empty(void)  { @@ -316,7 +318,6 @@ static int root_task_group_empty(void)  }  #endif -#ifdef CONFIG_FAIR_GROUP_SCHED  #ifdef CONFIG_USER_SCHED  # define INIT_TASK_GROUP_LOAD	(2*NICE_0_LOAD)  #else /* !CONFIG_USER_SCHED */ @@ -1992,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,  		p->sched_class->prio_changed(rq, p, oldprio, running);  } +/** + * kthread_bind - bind a just-created kthread to a cpu. + * @p: thread created by kthread_create(). + * @cpu: cpu (might not be online, must be possible) for @k to run on. + * + * Description: This function is equivalent to set_cpus_allowed(), + * except that @cpu doesn't need to be online, and the thread must be + * stopped (i.e., just returned from kthread_create()). + * + * Function lives here instead of kthread.c because it messes with + * scheduler internals which require locking. + */ +void kthread_bind(struct task_struct *p, unsigned int cpu) +{ +	struct rq *rq = cpu_rq(cpu); +	unsigned long flags; + +	/* Must have done schedule() in kthread() before we set_task_cpu */ +	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { +		WARN_ON(1); +		return; +	} + +	spin_lock_irqsave(&rq->lock, flags); +	set_task_cpu(p, cpu); +	p->cpus_allowed = cpumask_of_cpu(cpu); +	p->rt.nr_cpus_allowed = 1; +	p->flags |= PF_THREAD_BOUND; +	spin_unlock_irqrestore(&rq->lock, flags); +} +EXPORT_SYMBOL(kthread_bind); +  #ifdef CONFIG_SMP  /*   * Is this task likely cache-hot: @@ -2004,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)  	/*  	 * Buddy candidates are cache hot:  	 */ -	if (sched_feat(CACHE_HOT_BUDDY) && +	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&  			(&p->se == cfs_rq_of(&p->se)->next ||  			 &p->se == cfs_rq_of(&p->se)->last))  		return 1; @@ -9532,13 +9565,13 @@ void __init sched_init(void)  	current->sched_class = &fair_sched_class;  	/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ -	alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); +	zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);  #ifdef CONFIG_SMP  #ifdef CONFIG_NO_HZ -	alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); +	zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);  	alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);  #endif -	alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); +	zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);  #endif /* SMP */  	perf_event_init();  |