diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-07 17:30:50 +0900 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-07 17:30:50 +0900 | 
| commit | 7f60ba388f5b9dd8b0da463b394412dace3ab814 (patch) | |
| tree | b97b4fb5c8ad07a435e5b1b559988364764d5e8d /kernel/trace/trace_functions.c | |
| parent | e665faa424a4a782aa986274920c1fc5b76f5560 (diff) | |
| parent | 80c9d03c22f13a17df67b4b99a83ed5e9acf6093 (diff) | |
| download | olio-linux-3.10-7f60ba388f5b9dd8b0da463b394412dace3ab814.tar.xz olio-linux-3.10-7f60ba388f5b9dd8b0da463b394412dace3ab814.zip  | |
Merge tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore
Pull pstore changes from Anton Vorontsov:
 1) We no longer ad-hoc to the function tracer "high level"
    infrastructure and no longer use its debugfs knobs.  The change
    slightly touches kernel/trace directory, but it got the needed ack
    from Steven Rostedt:
      http://lkml.org/lkml/2012/8/21/688
 2) Added maintainers entry;
 3) A bunch of fixes, nothing special.
* tag 'for-v3.7' of git://git.infradead.org/users/cbou/linux-pstore:
  pstore: Avoid recursive spinlocks in the oops_in_progress case
  pstore/ftrace: Convert to its own enable/disable debugfs knob
  pstore/ram: Add missing platform_device_unregister
  MAINTAINERS: Add pstore maintainers
  pstore/ram: Mark ramoops_pstore_write_buf() as notrace
  pstore/ram: Fix printk format warning
  pstore/ram: Fix possible NULL dereference
Diffstat (limited to 'kernel/trace/trace_functions.c')
| -rw-r--r-- | kernel/trace/trace_functions.c | 15 | 
1 files changed, 1 insertions, 14 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 483162a9f90..507a7a9630b 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -13,7 +13,6 @@  #include <linux/debugfs.h>  #include <linux/uaccess.h>  #include <linux/ftrace.h> -#include <linux/pstore.h>  #include <linux/fs.h>  #include "trace.h" @@ -76,10 +75,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,  	preempt_enable_notrace();  } -/* Our two options */ +/* Our option */  enum {  	TRACE_FUNC_OPT_STACK	= 0x1, -	TRACE_FUNC_OPT_PSTORE	= 0x2,  };  static struct tracer_flags func_flags; @@ -109,12 +107,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,  	disabled = atomic_inc_return(&data->disabled);  	if (likely(disabled == 1)) { -		/* -		 * So far tracing doesn't support multiple buffers, so -		 * we make an explicit call for now. -		 */ -		if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) -			pstore_ftrace_call(ip, parent_ip);  		pc = preempt_count();  		trace_function(tr, ip, parent_ip, flags, pc);  	} @@ -181,9 +173,6 @@ static struct tracer_opt func_opts[] = {  #ifdef CONFIG_STACKTRACE  	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },  #endif -#ifdef CONFIG_PSTORE_FTRACE -	{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, -#endif  	{ } /* Always set a last empty entry */  }; @@ -236,8 +225,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)  		}  		break; -	case TRACE_FUNC_OPT_PSTORE: -		break;  	default:  		return -EINVAL;  	}  |