diff options
| author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-05-02 17:37:49 +0200 | 
|---|---|---|
| committer | Frederic Weisbecker <fweisbec@gmail.com> | 2013-05-02 17:54:19 +0200 | 
| commit | c032862fba51a3ca504752d3a25186b324c5ce83 (patch) | |
| tree | 955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /kernel/trace/trace_functions_graph.c | |
| parent | fda76e074c7737fc57855dd17c762e50ed526052 (diff) | |
| parent | 8700c95adb033843fc163d112b9d21d4fda78018 (diff) | |
| download | olio-linux-3.10-c032862fba51a3ca504752d3a25186b324c5ce83.tar.xz olio-linux-3.10-c032862fba51a3ca504752d3a25186b324c5ce83.zip  | |
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched
upstream updates in order to fix some dependencies.
Merge a common upstream merge point that has these
updates.
Conflicts:
	include/linux/perf_event.h
	kernel/rcutree.h
	kernel/rcutree_plugin.h
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 12 | 
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 39ada66389c..8388bc99f2e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr,  {  	struct ftrace_event_call *call = &event_funcgraph_entry;  	struct ring_buffer_event *event; -	struct ring_buffer *buffer = tr->buffer; +	struct ring_buffer *buffer = tr->trace_buffer.buffer;  	struct ftrace_graph_ent_entry *entry;  	if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) @@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)  	local_irq_save(flags);  	cpu = raw_smp_processor_id(); -	data = tr->data[cpu]; +	data = per_cpu_ptr(tr->trace_buffer.data, cpu);  	disabled = atomic_inc_return(&data->disabled);  	if (likely(disabled == 1)) {  		pc = preempt_count(); @@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr,  {  	struct ftrace_event_call *call = &event_funcgraph_exit;  	struct ring_buffer_event *event; -	struct ring_buffer *buffer = tr->buffer; +	struct ring_buffer *buffer = tr->trace_buffer.buffer;  	struct ftrace_graph_ret_entry *entry;  	if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) @@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)  	local_irq_save(flags);  	cpu = raw_smp_processor_id(); -	data = tr->data[cpu]; +	data = per_cpu_ptr(tr->trace_buffer.data, cpu);  	disabled = atomic_inc_return(&data->disabled);  	if (likely(disabled == 1)) {  		pc = preempt_count(); @@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter,  			 * We need to consume the current entry to see  			 * the next one.  			 */ -			ring_buffer_consume(iter->tr->buffer, iter->cpu, +			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,  					    NULL, NULL); -			event = ring_buffer_peek(iter->tr->buffer, iter->cpu, +			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,  						 NULL, NULL);  		}  |