diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcu.h | 7 | ||||
| -rw-r--r-- | kernel/rcupdate.c | 60 | ||||
| -rw-r--r-- | kernel/rcutiny.c | 6 | ||||
| -rw-r--r-- | kernel/rcutiny_plugin.h | 56 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 55 | ||||
| -rw-r--r-- | kernel/rcutree.c | 247 | ||||
| -rw-r--r-- | kernel/rcutree.h | 7 | ||||
| -rw-r--r-- | kernel/trace/trace_clock.c | 1 | 
8 files changed, 347 insertions, 92 deletions
diff --git a/kernel/rcu.h b/kernel/rcu.h index 20dfba576c2..7f8e7590e3e 100644 --- a/kernel/rcu.h +++ b/kernel/rcu.h @@ -111,4 +111,11 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)  extern int rcu_expedited; +#ifdef CONFIG_RCU_STALL_COMMON + +extern int rcu_cpu_stall_suppress; +int rcu_jiffies_till_stall_check(void); + +#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ +  #endif /* __LINUX_RCU_H */ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index a2cf76177b4..48ab70384a4 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -404,11 +404,65 @@ EXPORT_SYMBOL_GPL(rcuhead_debug_descr);  #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */  #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) -void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) +void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp, +			       unsigned long secs, +			       unsigned long c_old, unsigned long c)  { -	trace_rcu_torture_read(rcutorturename, rhp); +	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);  }  EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);  #else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ +	do { } while (0)  #endif + +#ifdef CONFIG_RCU_STALL_COMMON + +#ifdef CONFIG_PROVE_RCU +#define RCU_STALL_DELAY_DELTA	       (5 * HZ) +#else +#define RCU_STALL_DELAY_DELTA	       0 +#endif + +int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ +int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; + +module_param(rcu_cpu_stall_suppress, int, 0644); +module_param(rcu_cpu_stall_timeout, int, 0644); + +int rcu_jiffies_till_stall_check(void) +{ +	int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); + +	/* +	 * Limit check must be consistent with the Kconfig limits +	 * for CONFIG_RCU_CPU_STALL_TIMEOUT. +	 */ +	if (till_stall_check < 3) { +		ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; +		till_stall_check = 3; +	} else if (till_stall_check > 300) { +		ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; +		till_stall_check = 300; +	} +	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; +} + +static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ +	rcu_cpu_stall_suppress = 1; +	return NOTIFY_DONE; +} + +static struct notifier_block rcu_panic_block = { +	.notifier_call = rcu_panic, +}; + +static int __init check_cpu_stall_init(void) +{ +	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); +	return 0; +} +early_initcall(check_cpu_stall_init); + +#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9f72a0f9f85..a0714a51b6d 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -51,10 +51,10 @@ static void __call_rcu(struct rcu_head *head,  		       void (*func)(struct rcu_head *rcu),  		       struct rcu_ctrlblk *rcp); -#include "rcutiny_plugin.h" -  static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; +#include "rcutiny_plugin.h" +  /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */  static void rcu_idle_enter_common(long long newval)  { @@ -205,6 +205,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)   */  static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)  { +	reset_cpu_stall_ticks(rcp);  	if (rcp->rcucblist != NULL &&  	    rcp->donetail != rcp->curtail) {  		rcp->donetail = rcp->curtail; @@ -251,6 +252,7 @@ void rcu_bh_qs(int cpu)   */  void rcu_check_callbacks(int cpu, int user)  { +	check_cpu_stalls();  	if (user || rcu_is_cpu_rrupt_from_idle())  		rcu_sched_qs(cpu);  	else if (!in_softirq()) diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index f85016a2309..8a233002fae 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -33,6 +33,9 @@ struct rcu_ctrlblk {  	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */  	struct rcu_head **curtail;	/* ->next pointer of last CB. */  	RCU_TRACE(long qlen);		/* Number of pending CBs. */ +	RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */ +	RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */ +	RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */  	RCU_TRACE(char *name);		/* Name of RCU type. */  }; @@ -54,6 +57,51 @@ int rcu_scheduler_active __read_mostly;  EXPORT_SYMBOL_GPL(rcu_scheduler_active);  #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#ifdef CONFIG_RCU_TRACE + +static void check_cpu_stall(struct rcu_ctrlblk *rcp) +{ +	unsigned long j; +	unsigned long js; + +	if (rcu_cpu_stall_suppress) +		return; +	rcp->ticks_this_gp++; +	j = jiffies; +	js = rcp->jiffies_stall; +	if (*rcp->curtail && ULONG_CMP_GE(j, js)) { +		pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", +		       rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, +		       jiffies - rcp->gp_start, rcp->qlen); +		dump_stack(); +	} +	if (*rcp->curtail && ULONG_CMP_GE(j, js)) +		rcp->jiffies_stall = jiffies + +			3 * rcu_jiffies_till_stall_check() + 3; +	else if (ULONG_CMP_GE(j, js)) +		rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); +} + +static void check_cpu_stall_preempt(void); + +#endif /* #ifdef CONFIG_RCU_TRACE */ + +static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) +{ +#ifdef CONFIG_RCU_TRACE +	rcp->ticks_this_gp = 0; +	rcp->gp_start = jiffies; +	rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check(); +#endif /* #ifdef CONFIG_RCU_TRACE */ +} + +static void check_cpu_stalls(void) +{ +	RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk)); +	RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk)); +	RCU_TRACE(check_cpu_stall_preempt()); +} +  #ifdef CONFIG_TINY_PREEMPT_RCU  #include <linux/delay.h> @@ -448,6 +496,7 @@ static void rcu_preempt_start_gp(void)  		/* Official start of GP. */  		rcu_preempt_ctrlblk.gpnum++;  		RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); +		reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);  		/* Any blocked RCU readers block new GP. */  		if (rcu_preempt_blocked_readers_any()) @@ -1054,4 +1103,11 @@ MODULE_AUTHOR("Paul E. McKenney");  MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");  MODULE_LICENSE("GPL"); +static void check_cpu_stall_preempt(void) +{ +#ifdef CONFIG_TINY_PREEMPT_RCU +	check_cpu_stall(&rcu_preempt_ctrlblk.rcb); +#endif /* #ifdef CONFIG_TINY_PREEMPT_RCU */ +} +  #endif /* #ifdef CONFIG_RCU_TRACE */ diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 0249800c611..cd4c35d097a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -46,6 +46,7 @@  #include <linux/stat.h>  #include <linux/srcu.h>  #include <linux/slab.h> +#include <linux/trace_clock.h>  #include <asm/byteorder.h>  MODULE_LICENSE("GPL"); @@ -845,7 +846,7 @@ static int rcu_torture_boost(void *arg)  		/* Wait for the next test interval. */  		oldstarttime = boost_starttime;  		while (ULONG_CMP_LT(jiffies, oldstarttime)) { -			schedule_timeout_uninterruptible(1); +			schedule_timeout_interruptible(oldstarttime - jiffies);  			rcu_stutter_wait("rcu_torture_boost");  			if (kthread_should_stop() ||  			    fullstop != FULLSTOP_DONTSTOP) @@ -1028,7 +1029,6 @@ void rcutorture_trace_dump(void)  		return;  	if (atomic_xchg(&beenhere, 1) != 0)  		return; -	do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);  	ftrace_dump(DUMP_ALL);  } @@ -1042,13 +1042,16 @@ static void rcu_torture_timer(unsigned long unused)  {  	int idx;  	int completed; +	int completed_end;  	static DEFINE_RCU_RANDOM(rand);  	static DEFINE_SPINLOCK(rand_lock);  	struct rcu_torture *p;  	int pipe_count; +	unsigned long long ts;  	idx = cur_ops->readlock();  	completed = cur_ops->completed(); +	ts = trace_clock_local();  	p = rcu_dereference_check(rcu_torture_current,  				  rcu_read_lock_bh_held() ||  				  rcu_read_lock_sched_held() || @@ -1058,7 +1061,6 @@ static void rcu_torture_timer(unsigned long unused)  		cur_ops->readunlock(idx);  		return;  	} -	do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);  	if (p->rtort_mbtest == 0)  		atomic_inc(&n_rcu_torture_mberror);  	spin_lock(&rand_lock); @@ -1071,10 +1073,16 @@ static void rcu_torture_timer(unsigned long unused)  		/* Should not happen, but... */  		pipe_count = RCU_TORTURE_PIPE_LEN;  	} -	if (pipe_count > 1) +	completed_end = cur_ops->completed(); +	if (pipe_count > 1) { +		unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); + +		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, +					  completed, completed_end);  		rcutorture_trace_dump(); +	}  	__this_cpu_inc(rcu_torture_count[pipe_count]); -	completed = cur_ops->completed() - completed; +	completed = completed_end - completed;  	if (completed > RCU_TORTURE_PIPE_LEN) {  		/* Should not happen, but... */  		completed = RCU_TORTURE_PIPE_LEN; @@ -1094,11 +1102,13 @@ static int  rcu_torture_reader(void *arg)  {  	int completed; +	int completed_end;  	int idx;  	DEFINE_RCU_RANDOM(rand);  	struct rcu_torture *p;  	int pipe_count;  	struct timer_list t; +	unsigned long long ts;  	VERBOSE_PRINTK_STRING("rcu_torture_reader task started");  	set_user_nice(current, 19); @@ -1112,6 +1122,7 @@ rcu_torture_reader(void *arg)  		}  		idx = cur_ops->readlock();  		completed = cur_ops->completed(); +		ts = trace_clock_local();  		p = rcu_dereference_check(rcu_torture_current,  					  rcu_read_lock_bh_held() ||  					  rcu_read_lock_sched_held() || @@ -1122,7 +1133,6 @@ rcu_torture_reader(void *arg)  			schedule_timeout_interruptible(HZ);  			continue;  		} -		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);  		if (p->rtort_mbtest == 0)  			atomic_inc(&n_rcu_torture_mberror);  		cur_ops->read_delay(&rand); @@ -1132,10 +1142,17 @@ rcu_torture_reader(void *arg)  			/* Should not happen, but... */  			pipe_count = RCU_TORTURE_PIPE_LEN;  		} -		if (pipe_count > 1) +		completed_end = cur_ops->completed(); +		if (pipe_count > 1) { +			unsigned long __maybe_unused ts_rem = +					do_div(ts, NSEC_PER_USEC); + +			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, +						  ts, completed, completed_end);  			rcutorture_trace_dump(); +		}  		__this_cpu_inc(rcu_torture_count[pipe_count]); -		completed = cur_ops->completed() - completed; +		completed = completed_end - completed;  		if (completed > RCU_TORTURE_PIPE_LEN) {  			/* Should not happen, but... */  			completed = RCU_TORTURE_PIPE_LEN; @@ -1301,19 +1318,35 @@ static void rcu_torture_shuffle_tasks(void)  				set_cpus_allowed_ptr(reader_tasks[i],  						     shuffle_tmp_mask);  	} -  	if (fakewriter_tasks) {  		for (i = 0; i < nfakewriters; i++)  			if (fakewriter_tasks[i])  				set_cpus_allowed_ptr(fakewriter_tasks[i],  						     shuffle_tmp_mask);  	} -  	if (writer_task)  		set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); -  	if (stats_task)  		set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); +	if (stutter_task) +		set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); +	if (fqs_task) +		set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); +	if (shutdown_task) +		set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); +#ifdef CONFIG_HOTPLUG_CPU +	if (onoff_task) +		set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ +	if (stall_task) +		set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); +	if (barrier_cbs_tasks) +		for (i = 0; i < n_barrier_cbs; i++) +			if (barrier_cbs_tasks[i]) +				set_cpus_allowed_ptr(barrier_cbs_tasks[i], +						     shuffle_tmp_mask); +	if (barrier_task) +		set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);  	if (rcu_idle_cpu == -1)  		rcu_idle_cpu = num_online_cpus() - 1; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 8a13c8ef964..5b8ad827fd8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -217,12 +217,6 @@ module_param(blimit, long, 0444);  module_param(qhimark, long, 0444);  module_param(qlowmark, long, 0444); -int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ -int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - -module_param(rcu_cpu_stall_suppress, int, 0644); -module_param(rcu_cpu_stall_timeout, int, 0644); -  static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;  static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; @@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)  }  /* - * Does the current CPU require a yet-as-unscheduled grace period? + * Does the current CPU require a not-yet-started grace period? + * The caller must have disabled interrupts to prevent races with + * normal callback registry.   */  static int  cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)  { -	struct rcu_head **ntp; +	int i; -	ntp = rdp->nxttail[RCU_DONE_TAIL + -			   (ACCESS_ONCE(rsp->completed) != rdp->completed)]; -	return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp && -	       !rcu_gp_in_progress(rsp); +	if (rcu_gp_in_progress(rsp)) +		return 0;  /* No, a grace period is already in progress. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL]) +		return 0;  /* No, this is a no-CBs (or offline) CPU. */ +	if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) +		return 1;  /* Yes, this CPU has newly registered callbacks. */ +	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) +		if (rdp->nxttail[i - 1] != rdp->nxttail[i] && +		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), +				 rdp->nxtcompleted[i])) +			return 1;  /* Yes, CBs for future grace period. */ +	return 0; /* No grace period needed. */  }  /* @@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)  	return 0;  } -static int jiffies_till_stall_check(void) -{ -	int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); - -	/* -	 * Limit check must be consistent with the Kconfig limits -	 * for CONFIG_RCU_CPU_STALL_TIMEOUT. -	 */ -	if (till_stall_check < 3) { -		ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; -		till_stall_check = 3; -	} else if (till_stall_check > 300) { -		ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; -		till_stall_check = 300; -	} -	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; -} -  static void record_gp_stall_check_time(struct rcu_state *rsp)  {  	rsp->gp_start = jiffies; -	rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); +	rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();  }  /* @@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)  		raw_spin_unlock_irqrestore(&rnp->lock, flags);  		return;  	} -	rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3; +	rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;  	raw_spin_unlock_irqrestore(&rnp->lock, flags);  	/* @@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp)  	raw_spin_lock_irqsave(&rnp->lock, flags);  	if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))  		rsp->jiffies_stall = jiffies + -				     3 * jiffies_till_stall_check() + 3; +				     3 * rcu_jiffies_till_stall_check() + 3;  	raw_spin_unlock_irqrestore(&rnp->lock, flags);  	set_need_resched();  /* kick ourselves to get things going. */ @@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)  	}  } -static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) -{ -	rcu_cpu_stall_suppress = 1; -	return NOTIFY_DONE; -} -  /**   * rcu_cpu_stall_reset - prevent further stall warnings in current grace period   * @@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void)  		rsp->jiffies_stall = jiffies + ULONG_MAX / 2;  } -static struct notifier_block rcu_panic_block = { -	.notifier_call = rcu_panic, -}; - -static void __init check_cpu_stall_init(void) -{ -	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); -} -  /*   * Update CPU-local rcu_data state to record the newly noticed grace period.   * This is used both when we started the grace period and when we notice @@ -1071,6 +1042,145 @@ static void init_callback_list(struct rcu_data *rdp)  }  /* + * Determine the value that ->completed will have at the end of the + * next subsequent grace period.  This is used to tag callbacks so that + * a CPU can invoke callbacks in a timely fashion even if that CPU has + * been dyntick-idle for an extended period with callbacks under the + * influence of RCU_FAST_NO_HZ. + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static unsigned long rcu_cbs_completed(struct rcu_state *rsp, +				       struct rcu_node *rnp) +{ +	/* +	 * If RCU is idle, we just wait for the next grace period. +	 * But we can only be sure that RCU is idle if we are looking +	 * at the root rcu_node structure -- otherwise, a new grace +	 * period might have started, but just not yet gotten around +	 * to initializing the current non-root rcu_node structure. +	 */ +	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) +		return rnp->completed + 1; + +	/* +	 * Otherwise, wait for a possible partial grace period and +	 * then the subsequent full grace period. +	 */ +	return rnp->completed + 2; +} + +/* + * If there is room, assign a ->completed number to any callbacks on + * this CPU that have not already been assigned.  Also accelerate any + * callbacks that were previously assigned a ->completed number that has + * since proven to be too conservative, which can happen if callbacks get + * assigned a ->completed number while RCU is idle, but with reference to + * a non-root rcu_node structure.  This function is idempotent, so it does + * not hurt to call it repeatedly. + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, +			       struct rcu_data *rdp) +{ +	unsigned long c; +	int i; + +	/* If the CPU has no callbacks, nothing to do. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) +		return; + +	/* +	 * Starting from the sublist containing the callbacks most +	 * recently assigned a ->completed number and working down, find the +	 * first sublist that is not assignable to an upcoming grace period. +	 * Such a sublist has something in it (first two tests) and has +	 * a ->completed number assigned that will complete sooner than +	 * the ->completed number for newly arrived callbacks (last test). +	 * +	 * The key point is that any later sublist can be assigned the +	 * same ->completed number as the newly arrived callbacks, which +	 * means that the callbacks in any of these later sublist can be +	 * grouped into a single sublist, whether or not they have already +	 * been assigned a ->completed number. +	 */ +	c = rcu_cbs_completed(rsp, rnp); +	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--) +		if (rdp->nxttail[i] != rdp->nxttail[i - 1] && +		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c)) +			break; + +	/* +	 * If there are no sublist for unassigned callbacks, leave. +	 * At the same time, advance "i" one sublist, so that "i" will +	 * index into the sublist where all the remaining callbacks should +	 * be grouped into. +	 */ +	if (++i >= RCU_NEXT_TAIL) +		return; + +	/* +	 * Assign all subsequent callbacks' ->completed number to the next +	 * full grace period and group them all in the sublist initially +	 * indexed by "i". +	 */ +	for (; i <= RCU_NEXT_TAIL; i++) { +		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL]; +		rdp->nxtcompleted[i] = c; +	} + +	/* Trace depending on how much we were able to accelerate. */ +	if (!*rdp->nxttail[RCU_WAIT_TAIL]) +		trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB"); +	else +		trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB"); +} + +/* + * Move any callbacks whose grace period has completed to the + * RCU_DONE_TAIL sublist, then compact the remaining sublists and + * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL + * sublist.  This function is idempotent, so it does not hurt to + * invoke it repeatedly.  As long as it is not invoked -too- often... + * + * The caller must hold rnp->lock with interrupts disabled. + */ +static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, +			    struct rcu_data *rdp) +{ +	int i, j; + +	/* If the CPU has no callbacks, nothing to do. */ +	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) +		return; + +	/* +	 * Find all callbacks whose ->completed numbers indicate that they +	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. +	 */ +	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { +		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) +			break; +		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i]; +	} +	/* Clean up any sublist tail pointers that were misordered above. */ +	for (j = RCU_WAIT_TAIL; j < i; j++) +		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL]; + +	/* Copy down callbacks to fill in empty sublists. */ +	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { +		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL]) +			break; +		rdp->nxttail[j] = rdp->nxttail[i]; +		rdp->nxtcompleted[j] = rdp->nxtcompleted[i]; +	} + +	/* Classify any remaining callbacks. */ +	rcu_accelerate_cbs(rsp, rnp, rdp); +} + +/*   * Advance this CPU's callbacks, but only if the current grace period   * has ended.  This may be called only from the CPU to whom the rdp   * belongs.  In addition, the corresponding leaf rcu_node structure's @@ -1080,12 +1190,15 @@ static void  __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)  {  	/* Did another grace period end? */ -	if (rdp->completed != rnp->completed) { +	if (rdp->completed == rnp->completed) { + +		/* No, so just accelerate recent callbacks. */ +		rcu_accelerate_cbs(rsp, rnp, rdp); -		/* Advance callbacks.  No harm if list empty. */ -		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; -		rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; -		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +	} else { + +		/* Advance callbacks. */ +		rcu_advance_cbs(rsp, rnp, rdp);  		/* Remember that we saw this grace-period completion. */  		rdp->completed = rnp->completed; @@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)  	/*  	 * Because there is no grace period in progress right now,  	 * any callbacks we have up to this point will be satisfied -	 * by the next grace period.  So promote all callbacks to be -	 * handled after the end of the next grace period.  If the -	 * CPU is not yet aware of the end of the previous grace period, -	 * we need to allow for the callback advancement that will -	 * occur when it does become aware.  Deadlock prevents us from -	 * making it aware at this point: We cannot acquire a leaf -	 * rcu_node ->lock while holding the root rcu_node ->lock. +	 * by the next grace period.  So this is a good place to +	 * assign a grace period number to recently posted callbacks.  	 */ -	rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; -	if (rdp->completed == rsp->completed) -		rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +	rcu_accelerate_cbs(rsp, rnp, rdp);  	rsp->gp_flags = RCU_GP_FLAG_INIT;  	raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ @@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)  		 * This GP can't end until cpu checks in, so all of our  		 * callbacks can be processed during the next GP.  		 */ -		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; +		rcu_accelerate_cbs(rsp, rnp, rdp);  		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */  	} @@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)  	long bl, count, count_lazy;  	int i; -	/* If no callbacks are ready, just return.*/ +	/* If no callbacks are ready, just return. */  	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {  		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);  		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), @@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp)  	WARN_ON_ONCE(rdp->beenonline == 0); -	/* -	 * Advance callbacks in response to end of earlier grace -	 * period that some other CPU ended. -	 */ +	/* Handle the end of a grace period that some other CPU ended.  */  	rcu_process_gp_end(rsp, rdp);  	/* Update RCU state based on any recent quiescent states. */  	rcu_check_quiescent_state(rsp, rdp);  	/* Does this CPU require a not-yet-started grace period? */ +	local_irq_save(flags);  	if (cpu_needs_another_gp(rsp, rdp)) { -		raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); +		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */  		rcu_start_gp(rsp, flags);  /* releases above lock */ +	} else { +		local_irq_restore(flags);  	}  	/* If there are callbacks ready, invoke them. */ @@ -3075,7 +3181,6 @@ void __init rcu_init(void)  	cpu_notifier(rcu_cpu_notify, 0);  	for_each_online_cpu(cpu)  		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); -	check_cpu_stall_init();  }  #include "rcutree_plugin.h" diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 6f21f2eccb1..c896b5045d9 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -278,6 +278,8 @@ struct rcu_data {  	 */  	struct rcu_head *nxtlist;  	struct rcu_head **nxttail[RCU_NEXT_SIZE]; +	unsigned long	nxtcompleted[RCU_NEXT_SIZE]; +					/* grace periods for sublists. */  	long		qlen_lazy;	/* # of lazy queued callbacks */  	long		qlen;		/* # of queued callbacks, incl lazy */  	long		qlen_last_fqs_check; @@ -339,11 +341,6 @@ struct rcu_data {  #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */ -#ifdef CONFIG_PROVE_RCU -#define RCU_STALL_DELAY_DELTA	       (5 * HZ) -#else -#define RCU_STALL_DELAY_DELTA	       0 -#endif  #define RCU_STALL_RAT_DELAY		2	/* Allow other CPUs time */  						/*  to take at least one */  						/*  scheduling clock irq */ diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 394783531cb..1bbb1b200ce 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -44,6 +44,7 @@ u64 notrace trace_clock_local(void)  	return clock;  } +EXPORT_SYMBOL_GPL(trace_clock_local);  /*   * trace_clock(): 'between' trace clock. Not completely serialized,  |