diff options
| author | Anton Vorontsov <anton.vorontsov@linaro.org> | 2012-07-09 17:10:42 -0700 | 
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-07-17 10:07:00 -0700 | 
| commit | 21f679404a0c28bd5b1b3aff2a7218bbff4cb43d (patch) | |
| tree | 9a5f1d0be079dfe225e2993331f9ddfac36cb7fc /kernel/trace/trace_functions.c | |
| parent | 060287b8c467bf49a594d8d669e1986c6d8d76b0 (diff) | |
| download | olio-linux-3.10-21f679404a0c28bd5b1b3aff2a7218bbff4cb43d.tar.xz olio-linux-3.10-21f679404a0c28bd5b1b3aff2a7218bbff4cb43d.zip  | |
tracing/function: Introduce persistent trace option
This patch introduces 'func_ptrace' option, now available in
/sys/kernel/debug/tracing/options when function tracer
is selected.
The patch also adds some tiny code that calls back to pstore
to record the trace. The callback is no-op when PSTORE=n.
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 25 | 
1 files changed, 20 insertions, 5 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index c7b0c6a7db0..13770abd7a1 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -13,6 +13,7 @@  #include <linux/debugfs.h>  #include <linux/uaccess.h>  #include <linux/ftrace.h> +#include <linux/pstore.h>  #include <linux/fs.h>  #include "trace.h" @@ -74,6 +75,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)  	preempt_enable_notrace();  } +/* Our two options */ +enum { +	TRACE_FUNC_OPT_STACK	= 0x1, +	TRACE_FUNC_OPT_PSTORE	= 0x2, +}; + +static struct tracer_flags func_flags; +  static void  function_trace_call(unsigned long ip, unsigned long parent_ip)  { @@ -97,6 +106,12 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)  	disabled = atomic_inc_return(&data->disabled);  	if (likely(disabled == 1)) { +		/* +		 * So far tracing doesn't support multiple buffers, so +		 * we make an explicit call for now. +		 */ +		if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) +			pstore_ftrace_call(ip, parent_ip);  		pc = preempt_count();  		trace_function(tr, ip, parent_ip, flags, pc);  	} @@ -158,15 +173,13 @@ static struct ftrace_ops trace_stack_ops __read_mostly =  	.flags = FTRACE_OPS_FL_GLOBAL,  }; -/* Our two options */ -enum { -	TRACE_FUNC_OPT_STACK = 0x1, -}; -  static struct tracer_opt func_opts[] = {  #ifdef CONFIG_STACKTRACE  	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },  #endif +#ifdef CONFIG_PSTORE_FTRACE +	{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, +#endif  	{ } /* Always set a last empty entry */  }; @@ -218,6 +231,8 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)  		}  		return 0; +	} else if (bit == TRACE_FUNC_OPT_PSTORE) { +		return 0;  	}  	return -EINVAL;  |