diff options
| author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-13 20:43:57 -0400 | 
|---|---|---|
| committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:36:06 -0400 | 
| commit | 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 (patch) | |
| tree | c103cc8bf1687f5ffe54ff7f17c7672bda20cad1 /kernel/trace/trace_stack.c | |
| parent | dd42cd3ea96d687f15525c4f14fa582702db223f (diff) | |
| download | olio-linux-3.10-87889501d0adfae10e3b0f0e6f2d7536eed9ae84.tar.xz olio-linux-3.10-87889501d0adfae10e3b0f0e6f2d7536eed9ae84.zip  | |
tracing: Use stack of calling function for stack tracer
Use the stack of stack_trace_call() instead of check_stack() as
the test pointer for max stack size. It makes it a bit cleaner
and a little more accurate.
Adding stable, as a later fix depends on this patch.
Cc: stable@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 12 | 
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 42ca822fc70..dc02e29d825 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -39,20 +39,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex);  int stack_tracer_enabled;  static int last_stack_tracer_enabled; -static inline void check_stack(void) +static inline void +check_stack(unsigned long *stack)  {  	unsigned long this_size, flags;  	unsigned long *p, *top, *start;  	int i; -	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); +	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);  	this_size = THREAD_SIZE - this_size;  	if (this_size <= max_stack_size)  		return;  	/* we do not handle interrupt stacks yet */ -	if (!object_is_on_stack(&this_size)) +	if (!object_is_on_stack(stack))  		return;  	local_irq_save(flags); @@ -73,7 +74,7 @@ static inline void check_stack(void)  	 * Now find where in the stack these are.  	 */  	i = 0; -	start = &this_size; +	start = stack;  	top = (unsigned long *)  		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -113,6 +114,7 @@ static void  stack_trace_call(unsigned long ip, unsigned long parent_ip,  		 struct ftrace_ops *op, struct pt_regs *pt_regs)  { +	unsigned long stack;  	int cpu;  	preempt_disable_notrace(); @@ -122,7 +124,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,  	if (per_cpu(trace_active, cpu)++ != 0)  		goto out; -	check_stack(); +	check_stack(&stack);   out:  	per_cpu(trace_active, cpu)--;  |