diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 11:04:19 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-05 11:04:19 -0700 | 
| commit | 714f83d5d9f7c785f622259dad1f4fad12d64664 (patch) | |
| tree | 20563541ae438e11d686b4d629074eb002a481b7 /kernel/trace/trace_functions.c | |
| parent | 8901e7ffc2fa78ede7ce9826dbad68a3a25dc2dc (diff) | |
| parent | 645dae969c3b8651c5bc7c54a1835ec03820f85f (diff) | |
| download | olio-linux-3.10-714f83d5d9f7c785f622259dad1f4fad12d64664.tar.xz olio-linux-3.10-714f83d5d9f7c785f622259dad1f4fad12d64664.zip  | |
Merge branch 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (413 commits)
  tracing, net: fix net tree and tracing tree merge interaction
  tracing, powerpc: fix powerpc tree and tracing tree interaction
  ring-buffer: do not remove reader page from list on ring buffer free
  function-graph: allow unregistering twice
  trace: make argument 'mem' of trace_seq_putmem() const
  tracing: add missing 'extern' keywords to trace_output.h
  tracing: provide trace_seq_reserve()
  blktrace: print out BLK_TN_MESSAGE properly
  blktrace: extract duplidate code
  blktrace: fix memory leak when freeing struct blk_io_trace
  blktrace: fix blk_probes_ref chaos
  blktrace: make classic output more classic
  blktrace: fix off-by-one bug
  blktrace: fix the original blktrace
  blktrace: fix a race when creating blk_tree_root in debugfs
  blktrace: fix timestamp in binary output
  tracing, Text Edit Lock: cleanup
  tracing: filter fix for TRACE_EVENT_FORMAT events
  ftrace: Using FTRACE_WARN_ON() to check "freed record" in ftrace_release()
  x86: kretprobe-booster interrupt emulation code fix
  ...
Fix up trivial conflicts in
 arch/parisc/include/asm/ftrace.h
 include/linux/memory.h
 kernel/extable.c
 kernel/module.c
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 369 | 
1 files changed, 353 insertions, 16 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a1..c9a0b7df44f 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -9,6 +9,7 @@   *  Copyright (C) 2004-2006 Ingo Molnar   *  Copyright (C) 2004 William Lee Irwin III   */ +#include <linux/ring_buffer.h>  #include <linux/debugfs.h>  #include <linux/uaccess.h>  #include <linux/ftrace.h> @@ -16,52 +17,388 @@  #include "trace.h" -static void start_function_trace(struct trace_array *tr) +/* function tracing enabled */ +static int			ftrace_function_enabled; + +static struct trace_array	*func_trace; + +static void tracing_start_function_trace(void); +static void tracing_stop_function_trace(void); + +static int function_trace_init(struct trace_array *tr)  { +	func_trace = tr;  	tr->cpu = get_cpu(); -	tracing_reset_online_cpus(tr);  	put_cpu();  	tracing_start_cmdline_record();  	tracing_start_function_trace(); +	return 0;  } -static void stop_function_trace(struct trace_array *tr) +static void function_trace_reset(struct trace_array *tr)  {  	tracing_stop_function_trace();  	tracing_stop_cmdline_record();  } -static int function_trace_init(struct trace_array *tr) +static void function_trace_start(struct trace_array *tr)  { -	start_function_trace(tr); -	return 0; +	tracing_reset_online_cpus(tr);  } -static void function_trace_reset(struct trace_array *tr) +static void +function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) +{ +	struct trace_array *tr = func_trace; +	struct trace_array_cpu *data; +	unsigned long flags; +	long disabled; +	int cpu, resched; +	int pc; + +	if (unlikely(!ftrace_function_enabled)) +		return; + +	pc = preempt_count(); +	resched = ftrace_preempt_disable(); +	local_save_flags(flags); +	cpu = raw_smp_processor_id(); +	data = tr->data[cpu]; +	disabled = atomic_inc_return(&data->disabled); + +	if (likely(disabled == 1)) +		trace_function(tr, ip, parent_ip, flags, pc); + +	atomic_dec(&data->disabled); +	ftrace_preempt_enable(resched); +} + +static void +function_trace_call(unsigned long ip, unsigned long parent_ip)  { -	stop_function_trace(tr); +	struct trace_array *tr = func_trace; +	struct trace_array_cpu *data; +	unsigned long flags; +	long disabled; +	int cpu; +	int pc; + +	if (unlikely(!ftrace_function_enabled)) +		return; + +	/* +	 * Need to use raw, since this must be called before the +	 * recursive protection is performed. +	 */ +	local_irq_save(flags); +	cpu = raw_smp_processor_id(); +	data = tr->data[cpu]; +	disabled = atomic_inc_return(&data->disabled); + +	if (likely(disabled == 1)) { +		pc = preempt_count(); +		trace_function(tr, ip, parent_ip, flags, pc); +	} + +	atomic_dec(&data->disabled); +	local_irq_restore(flags);  } -static void function_trace_start(struct trace_array *tr) +static void +function_stack_trace_call(unsigned long ip, unsigned long parent_ip)  { -	tracing_reset_online_cpus(tr); +	struct trace_array *tr = func_trace; +	struct trace_array_cpu *data; +	unsigned long flags; +	long disabled; +	int cpu; +	int pc; + +	if (unlikely(!ftrace_function_enabled)) +		return; + +	/* +	 * Need to use raw, since this must be called before the +	 * recursive protection is performed. +	 */ +	local_irq_save(flags); +	cpu = raw_smp_processor_id(); +	data = tr->data[cpu]; +	disabled = atomic_inc_return(&data->disabled); + +	if (likely(disabled == 1)) { +		pc = preempt_count(); +		trace_function(tr, ip, parent_ip, flags, pc); +		/* +		 * skip over 5 funcs: +		 *    __ftrace_trace_stack, +		 *    __trace_stack, +		 *    function_stack_trace_call +		 *    ftrace_list_func +		 *    ftrace_call +		 */ +		__trace_stack(tr, flags, 5, pc); +	} + +	atomic_dec(&data->disabled); +	local_irq_restore(flags); +} + + +static struct ftrace_ops trace_ops __read_mostly = +{ +	.func = function_trace_call, +}; + +static struct ftrace_ops trace_stack_ops __read_mostly = +{ +	.func = function_stack_trace_call, +}; + +/* Our two options */ +enum { +	TRACE_FUNC_OPT_STACK = 0x1, +}; + +static struct tracer_opt func_opts[] = { +#ifdef CONFIG_STACKTRACE +	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, +#endif +	{ } /* Always set a last empty entry */ +}; + +static struct tracer_flags func_flags = { +	.val = 0, /* By default: all flags disabled */ +	.opts = func_opts +}; + +static void tracing_start_function_trace(void) +{ +	ftrace_function_enabled = 0; + +	if (trace_flags & TRACE_ITER_PREEMPTONLY) +		trace_ops.func = function_trace_call_preempt_only; +	else +		trace_ops.func = function_trace_call; + +	if (func_flags.val & TRACE_FUNC_OPT_STACK) +		register_ftrace_function(&trace_stack_ops); +	else +		register_ftrace_function(&trace_ops); + +	ftrace_function_enabled = 1; +} + +static void tracing_stop_function_trace(void) +{ +	ftrace_function_enabled = 0; +	/* OK if they are not registered */ +	unregister_ftrace_function(&trace_stack_ops); +	unregister_ftrace_function(&trace_ops); +} + +static int func_set_flag(u32 old_flags, u32 bit, int set) +{ +	if (bit == TRACE_FUNC_OPT_STACK) { +		/* do nothing if already set */ +		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) +			return 0; + +		if (set) { +			unregister_ftrace_function(&trace_ops); +			register_ftrace_function(&trace_stack_ops); +		} else { +			unregister_ftrace_function(&trace_stack_ops); +			register_ftrace_function(&trace_ops); +		} + +		return 0; +	} + +	return -EINVAL;  }  static struct tracer function_trace __read_mostly =  { -	.name	     = "function", -	.init	     = function_trace_init, -	.reset	     = function_trace_reset, -	.start	     = function_trace_start, +	.name		= "function", +	.init		= function_trace_init, +	.reset		= function_trace_reset, +	.start		= function_trace_start, +	.wait_pipe	= poll_wait_pipe, +	.flags		= &func_flags, +	.set_flag	= func_set_flag,  #ifdef CONFIG_FTRACE_SELFTEST -	.selftest    = trace_selftest_startup_function, +	.selftest	= trace_selftest_startup_function,  #endif  }; +#ifdef CONFIG_DYNAMIC_FTRACE +static void +ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) +{ +	long *count = (long *)data; + +	if (tracing_is_on()) +		return; + +	if (!*count) +		return; + +	if (*count != -1) +		(*count)--; + +	tracing_on(); +} + +static void +ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +{ +	long *count = (long *)data; + +	if (!tracing_is_on()) +		return; + +	if (!*count) +		return; + +	if (*count != -1) +		(*count)--; + +	tracing_off(); +} + +static int +ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, +			 struct ftrace_probe_ops *ops, void *data); + +static struct ftrace_probe_ops traceon_probe_ops = { +	.func			= ftrace_traceon, +	.print			= ftrace_trace_onoff_print, +}; + +static struct ftrace_probe_ops traceoff_probe_ops = { +	.func			= ftrace_traceoff, +	.print			= ftrace_trace_onoff_print, +}; + +static int +ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, +			 struct ftrace_probe_ops *ops, void *data) +{ +	char str[KSYM_SYMBOL_LEN]; +	long count = (long)data; + +	kallsyms_lookup(ip, NULL, NULL, NULL, str); +	seq_printf(m, "%s:", str); + +	if (ops == &traceon_probe_ops) +		seq_printf(m, "traceon"); +	else +		seq_printf(m, "traceoff"); + +	if (count == -1) +		seq_printf(m, ":unlimited\n"); +	else +		seq_printf(m, ":count=%ld", count); +	seq_putc(m, '\n'); + +	return 0; +} + +static int +ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) +{ +	struct ftrace_probe_ops *ops; + +	/* we register both traceon and traceoff to this callback */ +	if (strcmp(cmd, "traceon") == 0) +		ops = &traceon_probe_ops; +	else +		ops = &traceoff_probe_ops; + +	unregister_ftrace_function_probe_func(glob, ops); + +	return 0; +} + +static int +ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) +{ +	struct ftrace_probe_ops *ops; +	void *count = (void *)-1; +	char *number; +	int ret; + +	/* hash funcs only work with set_ftrace_filter */ +	if (!enable) +		return -EINVAL; + +	if (glob[0] == '!') +		return ftrace_trace_onoff_unreg(glob+1, cmd, param); + +	/* we register both traceon and traceoff to this callback */ +	if (strcmp(cmd, "traceon") == 0) +		ops = &traceon_probe_ops; +	else +		ops = &traceoff_probe_ops; + +	if (!param) +		goto out_reg; + +	number = strsep(¶m, ":"); + +	if (!strlen(number)) +		goto out_reg; + +	/* +	 * We use the callback data field (which is a pointer) +	 * as our counter. +	 */ +	ret = strict_strtoul(number, 0, (unsigned long *)&count); +	if (ret) +		return ret; + + out_reg: +	ret = register_ftrace_function_probe(glob, ops, count); + +	return ret; +} + +static struct ftrace_func_command ftrace_traceon_cmd = { +	.name			= "traceon", +	.func			= ftrace_trace_onoff_callback, +}; + +static struct ftrace_func_command ftrace_traceoff_cmd = { +	.name			= "traceoff", +	.func			= ftrace_trace_onoff_callback, +}; + +static int __init init_func_cmd_traceon(void) +{ +	int ret; + +	ret = register_ftrace_command(&ftrace_traceoff_cmd); +	if (ret) +		return ret; + +	ret = register_ftrace_command(&ftrace_traceon_cmd); +	if (ret) +		unregister_ftrace_command(&ftrace_traceoff_cmd); +	return ret; +} +#else +static inline int init_func_cmd_traceon(void) +{ +	return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +  static __init int init_function_trace(void)  { +	init_func_cmd_traceon();  	return register_tracer(&function_trace);  } -  device_initcall(init_function_trace); +  |