diff options
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 62 | 
1 files changed, 9 insertions, 53 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cb49b2ab0e1..ebdb1954121 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p)  {  	struct rq *rq = task_rq(p); -	BUG_ON(rq != this_rq()); -	BUG_ON(p == current); +	if (WARN_ON_ONCE(rq != this_rq()) || +	    WARN_ON_ONCE(p == current)) +		return; +  	lockdep_assert_held(&rq->lock);  	if (!raw_spin_trylock(&p->pi_lock)) { @@ -2997,51 +2999,6 @@ void __sched schedule_preempt_disabled(void)  	preempt_disable();  } -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER - -static inline bool owner_running(struct mutex *lock, struct task_struct *owner) -{ -	if (lock->owner != owner) -		return false; - -	/* -	 * Ensure we emit the owner->on_cpu, dereference _after_ checking -	 * lock->owner still matches owner, if that fails, owner might -	 * point to free()d memory, if it still matches, the rcu_read_lock() -	 * ensures the memory stays valid. -	 */ -	barrier(); - -	return owner->on_cpu; -} - -/* - * Look out! "owner" is an entirely speculative pointer - * access and not reliable. - */ -int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) -{ -	if (!sched_feat(OWNER_SPIN)) -		return 0; - -	rcu_read_lock(); -	while (owner_running(lock, owner)) { -		if (need_resched()) -			break; - -		arch_mutex_cpu_relax(); -	} -	rcu_read_unlock(); - -	/* -	 * We break out the loop above on need_resched() and when the -	 * owner changed, which is a sign for heavy contention. Return -	 * success only when lock->owner is NULL. -	 */ -	return lock->owner == NULL; -} -#endif -  #ifdef CONFIG_PREEMPT  /*   * this is the entry point to schedule() from in-kernel preemption @@ -4130,6 +4087,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)  	get_task_struct(p);  	rcu_read_unlock(); +	if (p->flags & PF_NO_SETAFFINITY) { +		retval = -EINVAL; +		goto out_put_task; +	}  	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {  		retval = -ENOMEM;  		goto out_put_task; @@ -4777,11 +4738,6 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)  		goto out;  	} -	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) { -		ret = -EINVAL; -		goto out; -	} -  	do_set_cpus_allowed(p, new_mask);  	/* Can the task run on the task's current CPU? If so, we're done */ @@ -5003,7 +4959,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)  }  static int min_load_idx = 0; -static int max_load_idx = CPU_LOAD_IDX_MAX; +static int max_load_idx = CPU_LOAD_IDX_MAX-1;  static void  set_table_entry(struct ctl_table *entry,  |