diff options
| author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2013-05-09 14:44:29 +0900 | 
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2013-05-09 20:14:25 -0400 | 
| commit | 1cf4c0732db3cd3c49cadbc60ff6bda08604e6fa (patch) | |
| tree | 74133737a3ecea18847788c10fae7bb474f1aaa8 | |
| parent | 30052170dcc256c18a43fb3e76577a67394543f8 (diff) | |
| download | olio-linux-3.10-1cf4c0732db3cd3c49cadbc60ff6bda08604e6fa.tar.xz olio-linux-3.10-1cf4c0732db3cd3c49cadbc60ff6bda08604e6fa.zip  | |
tracing: Modify soft-mode only if there's no other referrer
Modify soft-mode flag only if no other soft-mode referrer
(currently only the ftrace triggers) by using a reference
counter in each ftrace_event_file.
Without this fix, adding and removing several different
enable/disable_event triggers on the same event clear
soft-mode bit from the ftrace_event_file. This also
happens with a typo of glob on setting triggers.
e.g.
 # echo vfs_symlink:enable_event:net:netif_rx > set_ftrace_filter
 # cat events/net/netif_rx/enable
 0*
 # echo typo_func:enable_event:net:netif_rx > set_ftrace_filter
 # cat events/net/netif_rx/enable
 0
 # cat set_ftrace_filter
 #### all functions enabled ####
 vfs_symlink:enable_event:net:netif_rx:unlimited
As above, we still have a trigger, but soft-mode is gone.
Link: http://lkml.kernel.org/r/20130509054429.30398.7464.stgit@mhiramat-M0-7522
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: David Sharp <dhsharp@google.com>
Cc: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Tom Zanussi <tom.zanussi@intel.com>
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
| -rw-r--r-- | include/linux/ftrace_event.h | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 12 | 
2 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 34e00fb49be..4372658c73a 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -293,6 +293,7 @@ struct ftrace_event_file {  	 * caching and such. Which is mostly OK ;-)  	 */  	unsigned long		flags; +	atomic_t		sm_ref;	/* soft-mode reference counter */  };  #define __TRACE_EVENT_FLAGS(name, value)				\ diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 915c136d7bd..8be1224046f 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,  	switch (enable) {  	case 0:  		/* -		 * When soft_disable is set and enable is cleared, we want +		 * When soft_disable is set and enable is cleared, the sm_ref +		 * reference counter is decremented. If it reaches 0, we want  		 * to clear the SOFT_DISABLED flag but leave the event in the  		 * state that it was. That is, if the event was enabled and  		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED @@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,  		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.  		 */  		if (soft_disable) { +			if (atomic_dec_return(&file->sm_ref) > 0) +				break;  			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;  			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);  		} else @@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,  		 */  		if (!soft_disable)  			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); -		else +		else { +			if (atomic_inc_return(&file->sm_ref) > 1) +				break;  			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); +		}  		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { @@ -1540,6 +1546,7 @@ __trace_add_new_event(struct ftrace_event_call *call,  	file->event_call = call;  	file->tr = tr; +	atomic_set(&file->sm_ref, 0);  	list_add(&file->list, &tr->events);  	return event_create_dir(tr->event_dir, file, id, enable, filter, format); @@ -1562,6 +1569,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,  	file->event_call = call;  	file->tr = tr; +	atomic_set(&file->sm_ref, 0);  	list_add(&file->list, &tr->events);  	return 0;  |