diff options
Diffstat (limited to 'arch/x86/kernel/alternative.c')
| -rw-r--r-- | arch/x86/kernel/alternative.c | 23 | 
1 files changed, 14 insertions, 9 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a84ac7b570e..4c80f155743 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -414,9 +414,17 @@ void __init alternative_instructions(void)  	   that might execute the to be patched code.  	   Other CPUs are not running. */  	stop_nmi(); -#ifdef CONFIG_X86_MCE -	stop_mce(); -#endif + +	/* +	 * Don't stop machine check exceptions while patching. +	 * MCEs only happen when something got corrupted and in this +	 * case we must do something about the corruption. +	 * Ignoring it is worse than a unlikely patching race. +	 * Also machine checks tend to be broadcast and if one CPU +	 * goes into machine check the others follow quickly, so we don't +	 * expect a machine check to cause undue problems during to code +	 * patching. +	 */  	apply_alternatives(__alt_instructions, __alt_instructions_end); @@ -456,9 +464,6 @@ void __init alternative_instructions(void)  				(unsigned long)__smp_locks_end);  	restart_nmi(); -#ifdef CONFIG_X86_MCE -	restart_mce(); -#endif  }  /** @@ -498,12 +503,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)   */  void *__kprobes text_poke(void *addr, const void *opcode, size_t len)  { -	unsigned long flags;  	char *vaddr;  	int nr_pages = 2;  	struct page *pages[2];  	int i; +	might_sleep();  	if (!core_kernel_text((unsigned long)addr)) {  		pages[0] = vmalloc_to_page(addr);  		pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -517,9 +522,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)  		nr_pages = 1;  	vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);  	BUG_ON(!vaddr); -	local_irq_save(flags); +	local_irq_disable();  	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); -	local_irq_restore(flags); +	local_irq_enable();  	vunmap(vaddr);  	sync_core();  	/* Could also do a CLFLUSH here to speed up CPU recovery; but  |