diff options
Diffstat (limited to 'arch/x86/kernel/ptrace.c')
| -rw-r--r-- | arch/x86/kernel/ptrace.c | 37 | 
1 files changed, 37 insertions, 0 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b00b33a1839..974b67e46dd 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -22,6 +22,7 @@  #include <linux/perf_event.h>  #include <linux/hw_breakpoint.h>  #include <linux/rcupdate.h> +#include <linux/module.h>  #include <asm/uaccess.h>  #include <asm/pgtable.h> @@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)  #define FLAG_MASK		FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps.  The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ +	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); +	unsigned long sp = (unsigned long)®s->sp; +	struct thread_info *tinfo; + +	if (context == (sp & ~(THREAD_SIZE - 1))) +		return sp; + +	tinfo = (struct thread_info *)context; +	if (tinfo->previous_esp) +		return tinfo->previous_esp; + +	return (unsigned long)regs; +} +EXPORT_SYMBOL_GPL(kernel_stack_pointer); +  static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)  {  	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); @@ -1511,6 +1541,13 @@ void syscall_trace_leave(struct pt_regs *regs)  {  	bool step; +	/* +	 * We may come here right after calling schedule_user() +	 * or do_notify_resume(), in which case we can be in RCU +	 * user mode. +	 */ +	rcu_user_exit(); +  	audit_syscall_exit(regs);  	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))  |