diff options
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
| -rw-r--r-- | arch/x86/kernel/entry_64.S | 172 | 
1 files changed, 142 insertions, 30 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 69babd8c834..44531acd9a8 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -56,6 +56,8 @@  #include <asm/ftrace.h>  #include <asm/percpu.h>  #include <asm/asm.h> +#include <asm/rcu.h> +#include <asm/smap.h>  #include <linux/err.h>  /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */ @@ -68,25 +70,51 @@  	.section .entry.text, "ax"  #ifdef CONFIG_FUNCTION_TRACER + +#ifdef CC_USING_FENTRY +# define function_hook	__fentry__ +#else +# define function_hook	mcount +#endif +  #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(mcount) + +ENTRY(function_hook)  	retq -END(mcount) +END(function_hook) + +/* skip is set if stack has been adjusted */ +.macro ftrace_caller_setup skip=0 +	MCOUNT_SAVE_FRAME \skip + +	/* Load the ftrace_ops into the 3rd parameter */ +	leaq function_trace_op, %rdx + +	/* Load ip into the first parameter */ +	movq RIP(%rsp), %rdi +	subq $MCOUNT_INSN_SIZE, %rdi +	/* Load the parent_ip into the second parameter */ +#ifdef CC_USING_FENTRY +	movq SS+16(%rsp), %rsi +#else +	movq 8(%rbp), %rsi +#endif +.endm  ENTRY(ftrace_caller) +	/* Check if tracing was disabled (quick check) */  	cmpl $0, function_trace_stop  	jne  ftrace_stub -	MCOUNT_SAVE_FRAME - -	movq 0x38(%rsp), %rdi -	movq 8(%rbp), %rsi -	subq $MCOUNT_INSN_SIZE, %rdi +	ftrace_caller_setup +	/* regs go into 4th parameter (but make it NULL) */ +	movq $0, %rcx  GLOBAL(ftrace_call)  	call ftrace_stub  	MCOUNT_RESTORE_FRAME +ftrace_return:  #ifdef CONFIG_FUNCTION_GRAPH_TRACER  GLOBAL(ftrace_graph_call) @@ -97,8 +125,78 @@ GLOBAL(ftrace_stub)  	retq  END(ftrace_caller) +ENTRY(ftrace_regs_caller) +	/* Save the current flags before compare (in SS location)*/ +	pushfq + +	/* Check if tracing was disabled (quick check) */ +	cmpl $0, function_trace_stop +	jne  ftrace_restore_flags + +	/* skip=8 to skip flags saved in SS */ +	ftrace_caller_setup 8 + +	/* Save the rest of pt_regs */ +	movq %r15, R15(%rsp) +	movq %r14, R14(%rsp) +	movq %r13, R13(%rsp) +	movq %r12, R12(%rsp) +	movq %r11, R11(%rsp) +	movq %r10, R10(%rsp) +	movq %rbp, RBP(%rsp) +	movq %rbx, RBX(%rsp) +	/* Copy saved flags */ +	movq SS(%rsp), %rcx +	movq %rcx, EFLAGS(%rsp) +	/* Kernel segments */ +	movq $__KERNEL_DS, %rcx +	movq %rcx, SS(%rsp) +	movq $__KERNEL_CS, %rcx +	movq %rcx, CS(%rsp) +	/* Stack - skipping return address */ +	leaq SS+16(%rsp), %rcx +	movq %rcx, RSP(%rsp) + +	/* regs go into 4th parameter */ +	leaq (%rsp), %rcx + +GLOBAL(ftrace_regs_call) +	call ftrace_stub + +	/* Copy flags back to SS, to restore them */ +	movq EFLAGS(%rsp), %rax +	movq %rax, SS(%rsp) + +	/* Handlers can change the RIP */ +	movq RIP(%rsp), %rax +	movq %rax, SS+8(%rsp) + +	/* restore the rest of pt_regs */ +	movq R15(%rsp), %r15 +	movq R14(%rsp), %r14 +	movq R13(%rsp), %r13 +	movq R12(%rsp), %r12 +	movq R10(%rsp), %r10 +	movq RBP(%rsp), %rbp +	movq RBX(%rsp), %rbx + +	/* skip=8 to skip flags saved in SS */ +	MCOUNT_RESTORE_FRAME 8 + +	/* Restore flags */ +	popfq + +	jmp ftrace_return +ftrace_restore_flags: +	popfq +	jmp  ftrace_stub + +END(ftrace_regs_caller) + +  #else /* ! CONFIG_DYNAMIC_FTRACE */ -ENTRY(mcount) + +ENTRY(function_hook)  	cmpl $0, function_trace_stop  	jne  ftrace_stub @@ -119,8 +217,12 @@ GLOBAL(ftrace_stub)  trace:  	MCOUNT_SAVE_FRAME -	movq 0x38(%rsp), %rdi +	movq RIP(%rsp), %rdi +#ifdef CC_USING_FENTRY +	movq SS+16(%rsp), %rsi +#else  	movq 8(%rbp), %rsi +#endif  	subq $MCOUNT_INSN_SIZE, %rdi  	call   *ftrace_trace_function @@ -128,20 +230,22 @@ trace:  	MCOUNT_RESTORE_FRAME  	jmp ftrace_stub -END(mcount) +END(function_hook)  #endif /* CONFIG_DYNAMIC_FTRACE */  #endif /* CONFIG_FUNCTION_TRACER */  #ifdef CONFIG_FUNCTION_GRAPH_TRACER  ENTRY(ftrace_graph_caller) -	cmpl $0, function_trace_stop -	jne ftrace_stub -  	MCOUNT_SAVE_FRAME +#ifdef CC_USING_FENTRY +	leaq SS+16(%rsp), %rdi +	movq $0, %rdx	/* No framepointers needed */ +#else  	leaq 8(%rbp), %rdi -	movq 0x38(%rsp), %rsi  	movq (%rbp), %rdx +#endif +	movq RIP(%rsp), %rsi  	subq $MCOUNT_INSN_SIZE, %rsi  	call	prepare_ftrace_return @@ -342,15 +446,15 @@ ENDPROC(native_usergs_sysret64)  	.macro SAVE_ARGS_IRQ  	cld  	/* start from rbp in pt_regs and jump over */ -	movq_cfi rdi, RDI-RBP -	movq_cfi rsi, RSI-RBP -	movq_cfi rdx, RDX-RBP -	movq_cfi rcx, RCX-RBP -	movq_cfi rax, RAX-RBP -	movq_cfi  r8,  R8-RBP -	movq_cfi  r9,  R9-RBP -	movq_cfi r10, R10-RBP -	movq_cfi r11, R11-RBP +	movq_cfi rdi, (RDI-RBP) +	movq_cfi rsi, (RSI-RBP) +	movq_cfi rdx, (RDX-RBP) +	movq_cfi rcx, (RCX-RBP) +	movq_cfi rax, (RAX-RBP) +	movq_cfi  r8,  (R8-RBP) +	movq_cfi  r9,  (R9-RBP) +	movq_cfi r10, (R10-RBP) +	movq_cfi r11, (R11-RBP)  	/* Save rbp so that we can unwind from get_irq_regs() */  	movq_cfi rbp, 0 @@ -384,7 +488,7 @@ ENDPROC(native_usergs_sysret64)  	.endm  ENTRY(save_rest) -	PARTIAL_FRAME 1 REST_SKIP+8 +	PARTIAL_FRAME 1 (REST_SKIP+8)  	movq 5*8+16(%rsp), %r11	/* save return address */  	movq_cfi rbx, RBX+16  	movq_cfi rbp, RBP+16 @@ -440,7 +544,7 @@ ENTRY(ret_from_fork)  	LOCK ; btr $TIF_FORK,TI_flags(%r8) -	pushq_cfi kernel_eflags(%rip) +	pushq_cfi $0x0002  	popfq_cfi				# reset kernel eflags  	call schedule_tail			# rdi: 'prev' task parameter @@ -465,7 +569,8 @@ END(ret_from_fork)   * System call entry. Up to 6 arguments in registers are supported.   *   * SYSCALL does not save anything on the stack and does not change the - * stack pointer. + * stack pointer.  However, it does mask the flags register for us, so + * CLD and CLAC are not needed.   */  /* @@ -565,7 +670,7 @@ sysret_careful:  	TRACE_IRQS_ON  	ENABLE_INTERRUPTS(CLBR_NONE)  	pushq_cfi %rdi -	call schedule +	SCHEDULE_USER  	popq_cfi %rdi  	jmp sysret_check @@ -678,7 +783,7 @@ int_careful:  	TRACE_IRQS_ON  	ENABLE_INTERRUPTS(CLBR_NONE)  	pushq_cfi %rdi -	call schedule +	SCHEDULE_USER  	popq_cfi %rdi  	DISABLE_INTERRUPTS(CLBR_NONE)  	TRACE_IRQS_OFF @@ -884,6 +989,7 @@ END(interrupt)  	 */  	.p2align CONFIG_X86_L1_CACHE_SHIFT  common_interrupt: +	ASM_CLAC  	XCPT_FRAME  	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */  	interrupt do_IRQ @@ -974,7 +1080,7 @@ retint_careful:  	TRACE_IRQS_ON  	ENABLE_INTERRUPTS(CLBR_NONE)  	pushq_cfi %rdi -	call  schedule +	SCHEDULE_USER  	popq_cfi %rdi  	GET_THREAD_INFO(%rcx)  	DISABLE_INTERRUPTS(CLBR_NONE) @@ -1023,6 +1129,7 @@ END(common_interrupt)   */  .macro apicinterrupt num sym do_sym  ENTRY(\sym) +	ASM_CLAC  	INTR_FRAME  	pushq_cfi $~(\num)  .Lcommon_\sym: @@ -1077,6 +1184,7 @@ apicinterrupt IRQ_WORK_VECTOR \   */  .macro zeroentry sym do_sym  ENTRY(\sym) +	ASM_CLAC  	INTR_FRAME  	PARAVIRT_ADJUST_EXCEPTION_FRAME  	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */ @@ -1094,6 +1202,7 @@ END(\sym)  .macro paranoidzeroentry sym do_sym  ENTRY(\sym) +	ASM_CLAC  	INTR_FRAME  	PARAVIRT_ADJUST_EXCEPTION_FRAME  	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */ @@ -1112,6 +1221,7 @@ END(\sym)  #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)  .macro paranoidzeroentry_ist sym do_sym ist  ENTRY(\sym) +	ASM_CLAC  	INTR_FRAME  	PARAVIRT_ADJUST_EXCEPTION_FRAME  	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */ @@ -1131,6 +1241,7 @@ END(\sym)  .macro errorentry sym do_sym  ENTRY(\sym) +	ASM_CLAC  	XCPT_FRAME  	PARAVIRT_ADJUST_EXCEPTION_FRAME  	subq $ORIG_RAX-R15, %rsp @@ -1149,6 +1260,7 @@ END(\sym)  	/* error code is on the stack already */  .macro paranoiderrorentry sym do_sym  ENTRY(\sym) +	ASM_CLAC  	XCPT_FRAME  	PARAVIRT_ADJUST_EXCEPTION_FRAME  	subq $ORIG_RAX-R15, %rsp @@ -1449,7 +1561,7 @@ paranoid_userspace:  paranoid_schedule:  	TRACE_IRQS_ON  	ENABLE_INTERRUPTS(CLBR_ANY) -	call schedule +	SCHEDULE_USER  	DISABLE_INTERRUPTS(CLBR_ANY)  	TRACE_IRQS_OFF  	jmp paranoid_userspace  |