diff options
Diffstat (limited to 'arch/x86/kernel/cpu/amd.c')
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 116 | 
1 files changed, 65 insertions, 51 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 15239fffd6f..5013a48d1af 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -12,7 +12,6 @@  #include <asm/pci-direct.h>  #ifdef CONFIG_X86_64 -# include <asm/numa_64.h>  # include <asm/mmconfig.h>  # include <asm/cacheflush.h>  #endif @@ -21,11 +20,11 @@  static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)  { -	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());  	u32 gprs[8] = { 0 };  	int err; -	WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); +	WARN_ONCE((boot_cpu_data.x86 != 0xf), +		  "%s should only be used on K8!\n", __func__);  	gprs[1] = msr;  	gprs[7] = 0x9c5a203a; @@ -39,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)  static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)  { -	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());  	u32 gprs[8] = { 0 }; -	WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); +	WARN_ONCE((boot_cpu_data.x86 != 0xf), +		  "%s should only be used on K8!\n", __func__);  	gprs[0] = (u32)val;  	gprs[1] = msr; @@ -193,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	/* Athlon 660/661 is valid. */  	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||  	    (c->x86_mask == 1))) -		goto valid_k7; +		return;  	/* Duron 670 is valid */  	if ((c->x86_model == 7) && (c->x86_mask == 0)) -		goto valid_k7; +		return;  	/*  	 * Athlon 662, Duron 671, and Athlon >model 7 have capability @@ -210,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||  	     (c->x86_model > 7))  		if (cpu_has_mp) -			goto valid_k7; +			return;  	/* If we get here, not a certified SMP capable AMD system. */ @@ -220,11 +219,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)  	 */  	WARN_ONCE(1, "WARNING: This combination of AMD"  		" processors is not suitable for SMP.\n"); -	if (!test_taint(TAINT_UNSAFE_SMP)) -		add_taint(TAINT_UNSAFE_SMP); - -valid_k7: -	; +	add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);  }  static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) @@ -364,9 +359,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)  #endif  } -int amd_get_nb_id(int cpu) +u16 amd_get_nb_id(int cpu)  { -	int id = 0; +	u16 id = 0;  #ifdef CONFIG_SMP  	id = per_cpu(cpu_llc_id, cpu);  #endif @@ -515,13 +510,16 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)  #endif  } +static const int amd_erratum_383[]; +static const int amd_erratum_400[]; +static bool cpu_has_amd_erratum(const int *erratum); +  static void __cpuinit init_amd(struct cpuinfo_x86 *c)  {  	u32 dummy; - -#ifdef CONFIG_SMP  	unsigned long long value; +#ifdef CONFIG_SMP  	/*  	 * Disable TLB flush filter by setting HWCR.FFDIS on K8  	 * bit 6 of msr C001_0015 @@ -559,12 +557,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		 * (AMD Erratum #110, docId: 25759).  		 */  		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { -			u64 val; -  			clear_cpu_cap(c, X86_FEATURE_LAHF_LM); -			if (!rdmsrl_amd_safe(0xc001100d, &val)) { -				val &= ~(1ULL << 32); -				wrmsrl_amd_safe(0xc001100d, val); +			if (!rdmsrl_amd_safe(0xc001100d, &value)) { +				value &= ~(1ULL << 32); +				wrmsrl_amd_safe(0xc001100d, value);  			}  		} @@ -617,13 +613,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  	if ((c->x86 == 0x15) &&  	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&  	    !cpu_has(c, X86_FEATURE_TOPOEXT)) { -		u64 val; -		if (!rdmsrl_safe(0xc0011005, &val)) { -			val |= 1ULL << 54; -			wrmsrl_safe(0xc0011005, val); -			rdmsrl(0xc0011005, val); -			if (val & (1ULL << 54)) { +		if (!rdmsrl_safe(0xc0011005, &value)) { +			value |= 1ULL << 54; +			wrmsrl_safe(0xc0011005, value); +			rdmsrl(0xc0011005, value); +			if (value & (1ULL << 54)) {  				set_cpu_cap(c, X86_FEATURE_TOPOEXT);  				printk(KERN_INFO FW_INFO "CPU: Re-enabling "  				  "disabled Topology Extensions Support\n"); @@ -637,11 +632,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  	 */  	if ((c->x86 == 0x15) &&  	    (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { -		u64 val; -		if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { -			val |= 0x1E; -			wrmsrl_safe(0xc0011021, val); +		if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { +			value |= 0x1E; +			wrmsrl_safe(0xc0011021, value);  		}  	} @@ -685,12 +679,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		 * benefit in doing so.  		 */  		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { +			unsigned long pfn = tseg >> PAGE_SHIFT; +  			printk(KERN_DEBUG "tseg: %010llx\n", tseg); -			if ((tseg>>PMD_SHIFT) < -				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || -				((tseg>>PMD_SHIFT) < -				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && -				(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) +			if (pfn_range_is_mapped(pfn, pfn + 1))  				set_memory_4k((unsigned long)__va(tseg), 1);  		}  	} @@ -703,13 +695,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  	if (c->x86 > 0x11)  		set_cpu_cap(c, X86_FEATURE_ARAT); -	/* -	 * Disable GART TLB Walk Errors on Fam10h. We do this here -	 * because this is always needed when GART is enabled, even in a -	 * kernel which has no MCE support built in. -	 */  	if (c->x86 == 0x10) {  		/* +		 * Disable GART TLB Walk Errors on Fam10h. We do this here +		 * because this is always needed when GART is enabled, even in a +		 * kernel which has no MCE support built in.  		 * BIOS should disable GartTlbWlk Errors themself. If  		 * it doesn't do it here as suggested by the BKDG.  		 * @@ -723,8 +713,29 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  			mask |= (1 << 10);  			wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);  		} + +		/* +		 * On family 10h BIOS may not have properly enabled WC+ support, +		 * causing it to be converted to CD memtype. This may result in +		 * performance degradation for certain nested-paging guests. +		 * Prevent this conversion by clearing bit 24 in +		 * MSR_AMD64_BU_CFG2. +		 * +		 * NOTE: we want to use the _safe accessors so as not to #GP kvm +		 * guests on older kvm hosts. +		 */ + +		rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); +		value &= ~(1ULL << 24); +		wrmsrl_safe(MSR_AMD64_BU_CFG2, value); + +		if (cpu_has_amd_erratum(amd_erratum_383)) +			set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);  	} +	if (cpu_has_amd_erratum(amd_erratum_400)) +		set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); +  	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);  } @@ -843,8 +854,7 @@ cpu_dev_register(amd_cpu_dev);   * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that   * have an OSVW id assigned, which it takes as first argument. Both take a   * variable number of family-specific model-stepping ranges created by - * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const - * int[] in arch/x86/include/asm/processor.h. + * AMD_MODEL_RANGE().   *   * Example:   * @@ -854,16 +864,22 @@ cpu_dev_register(amd_cpu_dev);   *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));   */ -const int amd_erratum_400[] = +#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 } +#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 } +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff) +#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff) +#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff) + +static const int amd_erratum_400[] =  	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),  			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_400); -const int amd_erratum_383[] = +static const int amd_erratum_383[] =  	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -EXPORT_SYMBOL_GPL(amd_erratum_383); -bool cpu_has_amd_erratum(const int *erratum) +static bool cpu_has_amd_erratum(const int *erratum)  {  	struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);  	int osvw_id = *erratum++; @@ -904,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum)  	return false;  } - -EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);  |