diff options
| author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-10-31 15:01:54 -0700 | 
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 10:31:42 -0800 | 
| commit | 4145fa7fbee3ec1e61c52825b146192885d9759f (patch) | |
| tree | 58e0767a59bf110acb99da3fb5c1f0f0b5170be8 | |
| parent | 9ceae0e248fb553c702d51d5275167d462f4efd2 (diff) | |
| download | olio-linux-3.10-4145fa7fbee3ec1e61c52825b146192885d9759f.tar.xz olio-linux-3.10-4145fa7fbee3ec1e61c52825b146192885d9759f.zip | |
rcu: Deconfuse dynticks entry-exit tracing
The trace_rcu_dyntick() trace event did not print both the old and
the new value of the nesting level, and furthermore printed only
the low-order 32 bits of it.  This could result in some confusion
when interpreting trace-event dumps, so this commit prints both
the old and the new value, prints the full 64 bits, and also selects
the process-entry/exit increment to print nicely in hexadecimal.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
| -rw-r--r-- | include/trace/events/rcu.h | 15 | ||||
| -rw-r--r-- | kernel/rcu.h | 7 | ||||
| -rw-r--r-- | kernel/rcutiny.c | 28 | ||||
| -rw-r--r-- | kernel/rcutree.c | 35 | 
4 files changed, 53 insertions, 32 deletions
| diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 172620a92b1..c29fb2f5590 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -246,21 +246,24 @@ TRACE_EVENT(rcu_fqs,   */  TRACE_EVENT(rcu_dyntick, -	TP_PROTO(char *polarity, int nesting), +	TP_PROTO(char *polarity, long long oldnesting, long long newnesting), -	TP_ARGS(polarity, nesting), +	TP_ARGS(polarity, oldnesting, newnesting),  	TP_STRUCT__entry(  		__field(char *, polarity) -		__field(int, nesting) +		__field(long long, oldnesting) +		__field(long long, newnesting)  	),  	TP_fast_assign(  		__entry->polarity = polarity; -		__entry->nesting = nesting; +		__entry->oldnesting = oldnesting; +		__entry->newnesting = newnesting;  	), -	TP_printk("%s %d", __entry->polarity, __entry->nesting) +	TP_printk("%s %llx %llx", __entry->polarity, +		  __entry->oldnesting, __entry->newnesting)  );  /* @@ -470,7 +473,7 @@ TRACE_EVENT(rcu_torture_read,  #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)  #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)  #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) -#define trace_rcu_dyntick(polarity, nesting) do { } while (0) +#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)  #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)  #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)  #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) diff --git a/kernel/rcu.h b/kernel/rcu.h index f600868d550..aa88baab5f7 100644 --- a/kernel/rcu.h +++ b/kernel/rcu.h @@ -30,6 +30,13 @@  #endif /* #else #ifdef CONFIG_RCU_TRACE */  /* + * Process-level increment to ->dynticks_nesting field.  This allows for + * architectures that use half-interrupts and half-exceptions from + * process context. + */ +#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1) + +/*   * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally   * by call_rcu() and rcu callback execution, and are therefore not part of the   * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b4e0b498176..9b9bdf666fb 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -53,20 +53,21 @@ static void __call_rcu(struct rcu_head *head,  #include "rcutiny_plugin.h" -static long long rcu_dynticks_nesting = LLONG_MAX / 2; +static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;  /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ -static void rcu_idle_enter_common(void) +static void rcu_idle_enter_common(long long oldval)  {  	if (rcu_dynticks_nesting) { -		RCU_TRACE(trace_rcu_dyntick("--=", rcu_dynticks_nesting)); +		RCU_TRACE(trace_rcu_dyntick("--=", +					    oldval, rcu_dynticks_nesting));  		return;  	} -	RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting)); +	RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));  	if (!idle_cpu(smp_processor_id())) {  		WARN_ON_ONCE(1);	/* must be idle task! */  		RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", -					    rcu_dynticks_nesting)); +					    oldval, rcu_dynticks_nesting));  		ftrace_dump(DUMP_ALL);  	}  	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ @@ -79,10 +80,12 @@ static void rcu_idle_enter_common(void)  void rcu_idle_enter(void)  {  	unsigned long flags; +	long long oldval;  	local_irq_save(flags); +	oldval = rcu_dynticks_nesting;  	rcu_dynticks_nesting = 0; -	rcu_idle_enter_common(); +	rcu_idle_enter_common(oldval);  	local_irq_restore(flags);  } @@ -92,11 +95,13 @@ void rcu_idle_enter(void)  void rcu_irq_exit(void)  {  	unsigned long flags; +	long long oldval;  	local_irq_save(flags); +	oldval = rcu_dynticks_nesting;  	rcu_dynticks_nesting--;  	WARN_ON_ONCE(rcu_dynticks_nesting < 0); -	rcu_idle_enter_common(); +	rcu_idle_enter_common(oldval);  	local_irq_restore(flags);  } @@ -104,14 +109,15 @@ void rcu_irq_exit(void)  static void rcu_idle_exit_common(long long oldval)  {  	if (oldval) { -		RCU_TRACE(trace_rcu_dyntick("++=", rcu_dynticks_nesting)); +		RCU_TRACE(trace_rcu_dyntick("++=", +					    oldval, rcu_dynticks_nesting));  		return;  	} -	RCU_TRACE(trace_rcu_dyntick("End", oldval)); +	RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));  	if (!idle_cpu(smp_processor_id())) {  		WARN_ON_ONCE(1);	/* must be idle task! */  		RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", -			  oldval)); +			  oldval, rcu_dynticks_nesting));  		ftrace_dump(DUMP_ALL);  	}  } @@ -127,7 +133,7 @@ void rcu_idle_exit(void)  	local_irq_save(flags);  	oldval = rcu_dynticks_nesting;  	WARN_ON_ONCE(oldval != 0); -	rcu_dynticks_nesting = LLONG_MAX / 2; +	rcu_dynticks_nesting = DYNTICK_TASK_NESTING;  	rcu_idle_exit_common(oldval);  	local_irq_restore(flags);  } diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 489b62a67d3..06e40dd53b2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -196,7 +196,7 @@ void rcu_note_context_switch(int cpu)  EXPORT_SYMBOL_GPL(rcu_note_context_switch);  DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { -	.dynticks_nesting = LLONG_MAX / 2, +	.dynticks_nesting = DYNTICK_TASK_NESTING,  	.dynticks = ATOMIC_INIT(1),  }; @@ -348,17 +348,17 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)   * we really have entered idle, and must do the appropriate accounting.   * The caller must have disabled interrupts.   */ -static void rcu_idle_enter_common(struct rcu_dynticks *rdtp) +static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)  {  	if (rdtp->dynticks_nesting) { -		trace_rcu_dyntick("--=", rdtp->dynticks_nesting); +		trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);  		return;  	} -	trace_rcu_dyntick("Start", rdtp->dynticks_nesting); +	trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);  	if (!idle_cpu(smp_processor_id())) {  		WARN_ON_ONCE(1);	/* must be idle task! */  		trace_rcu_dyntick("Error on entry: not idle task", -				   rdtp->dynticks_nesting); +				   oldval, rdtp->dynticks_nesting);  		ftrace_dump(DUMP_ALL);  	}  	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ @@ -383,12 +383,14 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp)  void rcu_idle_enter(void)  {  	unsigned long flags; +	long long oldval;  	struct rcu_dynticks *rdtp;  	local_irq_save(flags);  	rdtp = &__get_cpu_var(rcu_dynticks); +	oldval = rdtp->dynticks_nesting;  	rdtp->dynticks_nesting = 0; -	rcu_idle_enter_common(rdtp); +	rcu_idle_enter_common(rdtp, oldval);  	local_irq_restore(flags);  } @@ -411,13 +413,15 @@ void rcu_idle_enter(void)  void rcu_irq_exit(void)  {  	unsigned long flags; +	long long oldval;  	struct rcu_dynticks *rdtp;  	local_irq_save(flags);  	rdtp = &__get_cpu_var(rcu_dynticks); +	oldval = rdtp->dynticks_nesting;  	rdtp->dynticks_nesting--;  	WARN_ON_ONCE(rdtp->dynticks_nesting < 0); -	rcu_idle_enter_common(rdtp); +	rcu_idle_enter_common(rdtp, oldval);  	local_irq_restore(flags);  } @@ -431,7 +435,7 @@ void rcu_irq_exit(void)  static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)  {  	if (oldval) { -		trace_rcu_dyntick("++=", rdtp->dynticks_nesting); +		trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);  		return;  	}  	smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */ @@ -439,10 +443,11 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)  	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */  	smp_mb__after_atomic_inc();  /* See above. */  	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); -	trace_rcu_dyntick("End", oldval); +	trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);  	if (!idle_cpu(smp_processor_id())) {  		WARN_ON_ONCE(1);	/* must be idle task! */ -		trace_rcu_dyntick("Error on exit: not idle task", oldval); +		trace_rcu_dyntick("Error on exit: not idle task", +				  oldval, rdtp->dynticks_nesting);  		ftrace_dump(DUMP_ALL);  	}  } @@ -453,8 +458,8 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)   * Exit idle mode, in other words, -enter- the mode in which RCU   * read-side critical sections can occur.   * - * We crowbar the ->dynticks_nesting field to LLONG_MAX/2 to allow for - * the possibility of usermode upcalls messing up our count + * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to + * allow for the possibility of usermode upcalls messing up our count   * of interrupt nesting level during the busy period that is just   * now starting.   */ @@ -468,7 +473,7 @@ void rcu_idle_exit(void)  	rdtp = &__get_cpu_var(rcu_dynticks);  	oldval = rdtp->dynticks_nesting;  	WARN_ON_ONCE(oldval != 0); -	rdtp->dynticks_nesting = LLONG_MAX / 2; +	rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;  	rcu_idle_exit_common(rdtp, oldval);  	local_irq_restore(flags);  } @@ -2012,7 +2017,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)  		rdp->nxttail[i] = &rdp->nxtlist;  	rdp->qlen = 0;  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu); -	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2); +	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);  	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);  	rdp->cpu = cpu;  	rdp->rsp = rsp; @@ -2040,7 +2045,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)  	rdp->qlen_last_fqs_check = 0;  	rdp->n_force_qs_snap = rsp->n_force_qs;  	rdp->blimit = blimit; -	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2); +	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);  	WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1);  	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */ |