diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 166 | 
1 files changed, 127 insertions, 39 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 420ec348757..b3749a2c313 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {  	.opts = trace_opts  }; -/* pid on the last trace processed */ +static struct trace_array *graph_array;  /* Add a function return address to the trace stack on thread info.*/ @@ -166,10 +166,123 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)  	return ret;  } +static int __trace_graph_entry(struct trace_array *tr, +				struct ftrace_graph_ent *trace, +				unsigned long flags, +				int pc) +{ +	struct ftrace_event_call *call = &event_funcgraph_entry; +	struct ring_buffer_event *event; +	struct ring_buffer *buffer = tr->buffer; +	struct ftrace_graph_ent_entry *entry; + +	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) +		return 0; + +	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, +					  sizeof(*entry), flags, pc); +	if (!event) +		return 0; +	entry	= ring_buffer_event_data(event); +	entry->graph_ent			= *trace; +	if (!filter_current_check_discard(buffer, call, entry, event)) +		ring_buffer_unlock_commit(buffer, event); + +	return 1; +} + +int trace_graph_entry(struct ftrace_graph_ent *trace) +{ +	struct trace_array *tr = graph_array; +	struct trace_array_cpu *data; +	unsigned long flags; +	long disabled; +	int ret; +	int cpu; +	int pc; + +	if (unlikely(!tr)) +		return 0; + +	if (!ftrace_trace_task(current)) +		return 0; + +	if (!ftrace_graph_addr(trace->func)) +		return 0; + +	local_irq_save(flags); +	cpu = raw_smp_processor_id(); +	data = tr->data[cpu]; +	disabled = atomic_inc_return(&data->disabled); +	if (likely(disabled == 1)) { +		pc = preempt_count(); +		ret = __trace_graph_entry(tr, trace, flags, pc); +	} else { +		ret = 0; +	} +	/* Only do the atomic if it is not already set */ +	if (!test_tsk_trace_graph(current)) +		set_tsk_trace_graph(current); + +	atomic_dec(&data->disabled); +	local_irq_restore(flags); + +	return ret; +} + +static void __trace_graph_return(struct trace_array *tr, +				struct ftrace_graph_ret *trace, +				unsigned long flags, +				int pc) +{ +	struct ftrace_event_call *call = &event_funcgraph_exit; +	struct ring_buffer_event *event; +	struct ring_buffer *buffer = tr->buffer; +	struct ftrace_graph_ret_entry *entry; + +	if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) +		return; + +	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, +					  sizeof(*entry), flags, pc); +	if (!event) +		return; +	entry	= ring_buffer_event_data(event); +	entry->ret				= *trace; +	if (!filter_current_check_discard(buffer, call, entry, event)) +		ring_buffer_unlock_commit(buffer, event); +} + +void trace_graph_return(struct ftrace_graph_ret *trace) +{ +	struct trace_array *tr = graph_array; +	struct trace_array_cpu *data; +	unsigned long flags; +	long disabled; +	int cpu; +	int pc; + +	local_irq_save(flags); +	cpu = raw_smp_processor_id(); +	data = tr->data[cpu]; +	disabled = atomic_inc_return(&data->disabled); +	if (likely(disabled == 1)) { +		pc = preempt_count(); +		__trace_graph_return(tr, trace, flags, pc); +	} +	if (!trace->depth) +		clear_tsk_trace_graph(current); +	atomic_dec(&data->disabled); +	local_irq_restore(flags); +} +  static int graph_trace_init(struct trace_array *tr)  { -	int ret = register_ftrace_graph(&trace_graph_return, -					&trace_graph_entry); +	int ret; + +	graph_array = tr; +	ret = register_ftrace_graph(&trace_graph_return, +				    &trace_graph_entry);  	if (ret)  		return ret;  	tracing_start_cmdline_record(); @@ -177,49 +290,30 @@ static int graph_trace_init(struct trace_array *tr)  	return 0;  } +void set_graph_array(struct trace_array *tr) +{ +	graph_array = tr; +} +  static void graph_trace_reset(struct trace_array *tr)  {  	tracing_stop_cmdline_record();  	unregister_ftrace_graph();  } -static inline int log10_cpu(int nb) -{ -	if (nb / 100) -		return 3; -	if (nb / 10) -		return 2; -	return 1; -} +static int max_bytes_for_cpu;  static enum print_line_t  print_graph_cpu(struct trace_seq *s, int cpu)  { -	int i;  	int ret; -	int log10_this = log10_cpu(cpu); -	int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); -  	/*  	 * Start with a space character - to make it stand out  	 * to the right a bit when trace output is pasted into  	 * email:  	 */ -	ret = trace_seq_printf(s, " "); - -	/* -	 * Tricky - we space the CPU field according to the max -	 * number of online CPUs. On a 2-cpu system it would take -	 * a maximum of 1 digit - on a 128 cpu system it would -	 * take up to 3 digits: -	 */ -	for (i = 0; i < log10_all - log10_this; i++) { -		ret = trace_seq_printf(s, " "); -		if (!ret) -			return TRACE_TYPE_PARTIAL_LINE; -	} -	ret = trace_seq_printf(s, "%d) ", cpu); +	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);  	if (!ret)  		return TRACE_TYPE_PARTIAL_LINE; @@ -565,11 +659,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,  			return TRACE_TYPE_PARTIAL_LINE;  	} -	ret = seq_print_ip_sym(s, call->func, 0); -	if (!ret) -		return TRACE_TYPE_PARTIAL_LINE; - -	ret = trace_seq_printf(s, "();\n"); +	ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);  	if (!ret)  		return TRACE_TYPE_PARTIAL_LINE; @@ -612,11 +702,7 @@ print_graph_entry_nested(struct trace_iterator *iter,  			return TRACE_TYPE_PARTIAL_LINE;  	} -	ret = seq_print_ip_sym(s, call->func, 0); -	if (!ret) -		return TRACE_TYPE_PARTIAL_LINE; - -	ret = trace_seq_printf(s, "() {\n"); +	ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);  	if (!ret)  		return TRACE_TYPE_PARTIAL_LINE; @@ -934,6 +1020,8 @@ static struct tracer graph_trace __read_mostly = {  static __init int init_graph_trace(void)  { +	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); +  	return register_tracer(&graph_trace);  }  |