diff options
| -rw-r--r-- | include/linux/ftrace.h | 1 | ||||
| -rw-r--r-- | kernel/Makefile | 2 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 17 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 24 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 3 | 
8 files changed, 54 insertions, 10 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 3121b95443d..f368d041e02 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -89,6 +89,7 @@ void ftrace_enable_daemon(void);  /* totally disable ftrace - can not re-enable after this */  void ftrace_kill(void); +void ftrace_kill_atomic(void);  static inline void tracer_disable(void)  { diff --git a/kernel/Makefile b/kernel/Makefile index ca2433e8487..480976275d9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -11,7 +11,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \  	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \  	    notifier.o ksysfs.o pm_qos_params.o sched_clock.o -CFLAGS_REMOVE_sched.o = -pg -mno-spe +CFLAGS_REMOVE_sched.o = -mno-spe  ifdef CONFIG_FTRACE  # Do not trace debug files and internal ftrace files diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0f271c45cd0..4231a3dc224 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1602,6 +1602,23 @@ core_initcall(ftrace_dynamic_init);  #endif /* CONFIG_DYNAMIC_FTRACE */  /** + * ftrace_kill_atomic - kill ftrace from critical sections + * + * This function should be used by panic code. It stops ftrace + * but in a not so nice way. If you need to simply kill ftrace + * from a non-atomic section, use ftrace_kill. + */ +void ftrace_kill_atomic(void) +{ +	ftrace_disabled = 1; +	ftrace_enabled = 0; +#ifdef CONFIG_DYNAMIC_FTRACE +	ftraced_suspend = -1; +#endif +	clear_ftrace_function(); +} + +/**   * ftrace_kill - totally shutdown ftrace   *   * This is a safety measure. If something was detected that seems diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e46de641ea4..868e121c8e3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -96,6 +96,9 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);  /* tracer_enabled is used to toggle activation of a tracer */  static int			tracer_enabled = 1; +/* function tracing enabled */ +int				ftrace_function_enabled; +  /*   * trace_nr_entries is the number of entries that is allocated   * for a buffer. Note, the number of entries is always rounded @@ -134,6 +137,7 @@ static notrace void no_trace_init(struct trace_array *tr)  {  	int cpu; +	ftrace_function_enabled = 0;  	if(tr->ctrl)  		for_each_online_cpu(cpu)  			tracing_reset(tr->data[cpu]); @@ -1027,7 +1031,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)  	long disabled;  	int cpu; -	if (unlikely(!tracer_enabled)) +	if (unlikely(!ftrace_function_enabled))  		return;  	if (skip_trace(ip)) @@ -1052,11 +1056,15 @@ static struct ftrace_ops trace_ops __read_mostly =  void tracing_start_function_trace(void)  { +	ftrace_function_enabled = 0;  	register_ftrace_function(&trace_ops); +	if (tracer_enabled) +		ftrace_function_enabled = 1;  }  void tracing_stop_function_trace(void)  { +	ftrace_function_enabled = 0;  	unregister_ftrace_function(&trace_ops);  }  #endif @@ -1383,7 +1391,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)  		   "server",  #elif defined(CONFIG_PREEMPT_VOLUNTARY)  		   "desktop", -#elif defined(CONFIG_PREEMPT_DESKTOP) +#elif defined(CONFIG_PREEMPT)  		   "preempt",  #else  		   "unknown", @@ -1892,8 +1900,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)  		m->private = iter;  		/* stop the trace while dumping */ -		if (iter->tr->ctrl) +		if (iter->tr->ctrl) {  			tracer_enabled = 0; +			ftrace_function_enabled = 0; +		}  		if (iter->trace && iter->trace->open)  			iter->trace->open(iter); @@ -1926,8 +1936,14 @@ int tracing_release(struct inode *inode, struct file *file)  		iter->trace->close(iter);  	/* reenable tracing if it was previously enabled */ -	if (iter->tr->ctrl) +	if (iter->tr->ctrl) {  		tracer_enabled = 1; +		/* +		 * It is safe to enable function tracing even if it +		 * isn't used +		 */ +		ftrace_function_enabled = 1; +	}  	mutex_unlock(&trace_types_lock);  	seq_release(inode, file); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8cb215b239d..f69f86788c2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -223,8 +223,6 @@ void trace_function(struct trace_array *tr,  		    unsigned long parent_ip,  		    unsigned long flags); -void tracing_start_function_trace(void); -void tracing_stop_function_trace(void);  void tracing_start_cmdline_record(void);  void tracing_stop_cmdline_record(void);  int register_tracer(struct tracer *type); @@ -241,6 +239,14 @@ void update_max_tr_single(struct trace_array *tr,  extern cycle_t ftrace_now(int cpu); +#ifdef CONFIG_FTRACE +void tracing_start_function_trace(void); +void tracing_stop_function_trace(void); +#else +# define tracing_start_function_trace()		do { } while (0) +# define tracing_stop_function_trace()		do { } while (0) +#endif +  #ifdef CONFIG_CONTEXT_SWITCH_TRACER  typedef void  (*tracer_switch_func_t)(void *private, diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 7ee7dcd76b7..31214489797 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -28,7 +28,10 @@ static void function_reset(struct trace_array *tr)  static void start_function_trace(struct trace_array *tr)  { +	tr->cpu = get_cpu();  	function_reset(tr); +	put_cpu(); +  	tracing_start_cmdline_record();  	tracing_start_function_trace();  } diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 93a66200915..cb817a209aa 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -227,14 +227,14 @@ void tracing_stop_cmdline_record(void)  static void start_sched_trace(struct trace_array *tr)  {  	sched_switch_reset(tr); -	tracer_enabled = 1;  	tracing_start_cmdline_record(); +	tracer_enabled = 1;  }  static void stop_sched_trace(struct trace_array *tr)  { -	tracing_stop_cmdline_record();  	tracer_enabled = 0; +	tracing_stop_cmdline_record();  }  static void sched_switch_trace_init(struct trace_array *tr) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index bf7e91caef5..3c8d61df447 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -352,9 +352,10 @@ static void start_wakeup_tracer(struct trace_array *tr)  	 */  	smp_wmb(); -	tracer_enabled = 1;  	register_ftrace_function(&trace_ops); +	tracer_enabled = 1; +  	return;  fail_deprobe_wake_new:  	marker_probe_unregister("kernel_sched_wakeup_new",  |