diff options
Diffstat (limited to 'arch/x86/kernel/alternative.c')
| -rw-r--r-- | arch/x86/kernel/alternative.c | 111 | 
1 files changed, 28 insertions, 83 deletions
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ced4534baed..ef5ccca79a6 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -23,19 +23,6 @@  #define MAX_PATCH_LEN (255-1) -#ifdef CONFIG_HOTPLUG_CPU -static int smp_alt_once; - -static int __init bootonly(char *str) -{ -	smp_alt_once = 1; -	return 1; -} -__setup("smp-alt-boot", bootonly); -#else -#define smp_alt_once 1 -#endif -  static int __initdata_or_module debug_alternative;  static int __init debug_alt(char *str) @@ -317,7 +304,7 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,  		/* turn DS segment override prefix into lock prefix */  		if (*ptr == 0x3e)  			text_poke(ptr, ((unsigned char []){0xf0}), 1); -	}; +	}  	mutex_unlock(&text_mutex);  } @@ -326,9 +313,6 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,  {  	const s32 *poff; -	if (noreplace_smp) -		return; -  	mutex_lock(&text_mutex);  	for (poff = start; poff < end; poff++) {  		u8 *ptr = (u8 *)poff + *poff; @@ -338,7 +322,7 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,  		/* turn lock prefix into DS segment override prefix */  		if (*ptr == 0xf0)  			text_poke(ptr, ((unsigned char []){0x3E}), 1); -	}; +	}  	mutex_unlock(&text_mutex);  } @@ -359,7 +343,7 @@ struct smp_alt_module {  };  static LIST_HEAD(smp_alt_modules);  static DEFINE_MUTEX(smp_alt); -static int smp_mode = 1;	/* protected by smp_alt */ +static bool uniproc_patched = false;	/* protected by smp_alt */  void __init_or_module alternatives_smp_module_add(struct module *mod,  						  char *name, @@ -368,19 +352,18 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,  {  	struct smp_alt_module *smp; -	if (noreplace_smp) -		return; +	mutex_lock(&smp_alt); +	if (!uniproc_patched) +		goto unlock; -	if (smp_alt_once) { -		if (boot_cpu_has(X86_FEATURE_UP)) -			alternatives_smp_unlock(locks, locks_end, -						text, text_end); -		return; -	} +	if (num_possible_cpus() == 1) +		/* Don't bother remembering, we'll never have to undo it. */ +		goto smp_unlock;  	smp = kzalloc(sizeof(*smp), GFP_KERNEL);  	if (NULL == smp) -		return; /* we'll run the (safe but slow) SMP code then ... */ +		/* we'll run the (safe but slow) SMP code then ... */ +		goto unlock;  	smp->mod	= mod;  	smp->name	= name; @@ -392,11 +375,10 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,  		__func__, smp->locks, smp->locks_end,  		smp->text, smp->text_end, smp->name); -	mutex_lock(&smp_alt);  	list_add_tail(&smp->next, &smp_alt_modules); -	if (boot_cpu_has(X86_FEATURE_UP)) -		alternatives_smp_unlock(smp->locks, smp->locks_end, -					smp->text, smp->text_end); +smp_unlock: +	alternatives_smp_unlock(locks, locks_end, text, text_end); +unlock:  	mutex_unlock(&smp_alt);  } @@ -404,24 +386,18 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)  {  	struct smp_alt_module *item; -	if (smp_alt_once || noreplace_smp) -		return; -  	mutex_lock(&smp_alt);  	list_for_each_entry(item, &smp_alt_modules, next) {  		if (mod != item->mod)  			continue;  		list_del(&item->next); -		mutex_unlock(&smp_alt); -		DPRINTK("%s: %s\n", __func__, item->name);  		kfree(item); -		return; +		break;  	}  	mutex_unlock(&smp_alt);  } -bool skip_smp_alternatives; -void alternatives_smp_switch(int smp) +void alternatives_enable_smp(void)  {  	struct smp_alt_module *mod; @@ -436,34 +412,21 @@ void alternatives_smp_switch(int smp)  	pr_info("lockdep: fixing up alternatives\n");  #endif -	if (noreplace_smp || smp_alt_once || skip_smp_alternatives) -		return; -	BUG_ON(!smp && (num_online_cpus() > 1)); +	/* Why bother if there are no other CPUs? */ +	BUG_ON(num_possible_cpus() == 1);  	mutex_lock(&smp_alt); -	/* -	 * Avoid unnecessary switches because it forces JIT based VMs to -	 * throw away all cached translations, which can be quite costly. -	 */ -	if (smp == smp_mode) { -		/* nothing */ -	} else if (smp) { +	if (uniproc_patched) {  		pr_info("switching to SMP code\n"); +		BUG_ON(num_online_cpus() != 1);  		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);  		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);  		list_for_each_entry(mod, &smp_alt_modules, next)  			alternatives_smp_lock(mod->locks, mod->locks_end,  					      mod->text, mod->text_end); -	} else { -		pr_info("switching to UP code\n"); -		set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); -		set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); -		list_for_each_entry(mod, &smp_alt_modules, next) -			alternatives_smp_unlock(mod->locks, mod->locks_end, -						mod->text, mod->text_end); +		uniproc_patched = false;  	} -	smp_mode = smp;  	mutex_unlock(&smp_alt);  } @@ -540,40 +503,22 @@ void __init alternative_instructions(void)  	apply_alternatives(__alt_instructions, __alt_instructions_end); -	/* switch to patch-once-at-boottime-only mode and free the -	 * tables in case we know the number of CPUs will never ever -	 * change */ -#ifdef CONFIG_HOTPLUG_CPU -	if (num_possible_cpus() < 2) -		smp_alt_once = 1; -#endif -  #ifdef CONFIG_SMP -	if (smp_alt_once) { -		if (1 == num_possible_cpus()) { -			pr_info("switching to UP code\n"); -			set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); -			set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); - -			alternatives_smp_unlock(__smp_locks, __smp_locks_end, -						_text, _etext); -		} -	} else { +	/* Patch to UP if other cpus not imminent. */ +	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { +		uniproc_patched = true;  		alternatives_smp_module_add(NULL, "core kernel",  					    __smp_locks, __smp_locks_end,  					    _text, _etext); - -		/* Only switch to UP mode if we don't immediately boot others */ -		if (num_present_cpus() == 1 || setup_max_cpus <= 1) -			alternatives_smp_switch(0);  	} -#endif - 	apply_paravirt(__parainstructions, __parainstructions_end); -	if (smp_alt_once) +	if (!uniproc_patched || num_possible_cpus() == 1)  		free_init_pages("SMP alternatives",  				(unsigned long)__smp_locks,  				(unsigned long)__smp_locks_end); +#endif + +	apply_paravirt(__parainstructions, __parainstructions_end);  	restart_nmi();  }  |