diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/context_tracking.c | 40 | ||||
| -rw-r--r-- | kernel/sched/core.c | 21 | 
2 files changed, 57 insertions, 4 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 85bdde1137e..383f8231e43 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -70,6 +70,46 @@ void user_enter(void)  	local_irq_restore(flags);  } +#ifdef CONFIG_PREEMPT +/** + * preempt_schedule_context - preempt_schedule called by tracing + * + * The tracing infrastructure uses preempt_enable_notrace to prevent + * recursion and tracing preempt enabling caused by the tracing + * infrastructure itself. But as tracing can happen in areas coming + * from userspace or just about to enter userspace, a preempt enable + * can occur before user_exit() is called. This will cause the scheduler + * to be called when the system is still in usermode. + * + * To prevent this, the preempt_enable_notrace will use this function + * instead of preempt_schedule() to exit user context if needed before + * calling the scheduler. + */ +void __sched notrace preempt_schedule_context(void) +{ +	struct thread_info *ti = current_thread_info(); +	enum ctx_state prev_ctx; + +	if (likely(ti->preempt_count || irqs_disabled())) +		return; + +	/* +	 * Need to disable preemption in case user_exit() is traced +	 * and the tracer calls preempt_enable_notrace() causing +	 * an infinite recursion. +	 */ +	preempt_disable_notrace(); +	prev_ctx = exception_enter(); +	preempt_enable_no_resched_notrace(); + +	preempt_schedule(); + +	preempt_disable_notrace(); +	exception_exit(prev_ctx); +	preempt_enable_notrace(); +} +EXPORT_SYMBOL_GPL(preempt_schedule_context); +#endif /* CONFIG_PREEMPT */  /**   * user_exit - Inform the context tracking that the CPU is diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1a27f91872..e8b335016c5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)  static inline bool got_nohz_idle_kick(void)  {  	int cpu = smp_processor_id(); -	return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); + +	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) +		return false; + +	if (idle_cpu(cpu) && !need_resched()) +		return true; + +	/* +	 * We can't run Idle Load Balance on this CPU for this time so we +	 * cancel it and clear NOHZ_BALANCE_KICK +	 */ +	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); +	return false;  }  #else /* CONFIG_NO_HZ_COMMON */ @@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)  void scheduler_ipi(void)  { -	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() -	    && !tick_nohz_full_cpu(smp_processor_id())) +	if (llist_empty(&this_rq()->wake_list) +			&& !tick_nohz_full_cpu(smp_processor_id()) +			&& !got_nohz_idle_kick())  		return;  	/* @@ -1417,7 +1430,7 @@ void scheduler_ipi(void)  	/*  	 * Check if someone kicked us for doing the nohz idle load balance.  	 */ -	if (unlikely(got_nohz_idle_kick() && !need_resched())) { +	if (unlikely(got_nohz_idle_kick())) {  		this_rq()->idle_balance = 1;  		raise_softirq_irqoff(SCHED_SOFTIRQ);  	}  |