diff options
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c')
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 116 | 
1 files changed, 34 insertions, 82 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ae9b503220c..7d5c3b0ea8d 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -33,7 +33,7 @@  #include <linux/cpufreq.h>  #include <linux/compiler.h>  #include <linux/dmi.h> -#include <trace/power.h> +#include <trace/events/power.h>  #include <linux/acpi.h>  #include <linux/io.h> @@ -60,7 +60,6 @@ enum {  };  #define INTEL_MSR_RANGE		(0xffff) -#define CPUID_6_ECX_APERFMPERF_CAPABILITY	(0x1)  struct acpi_cpufreq_data {  	struct acpi_processor_performance *acpi_data; @@ -71,13 +70,7 @@ struct acpi_cpufreq_data {  static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); -struct acpi_msr_data { -	u64 saved_aperf, saved_mperf; -}; - -static DEFINE_PER_CPU(struct acpi_msr_data, msr_data); - -DEFINE_TRACE(power_mark); +static DEFINE_PER_CPU(struct aperfmperf, old_perf);  /* acpi_perf_data is a pointer to percpu data. */  static struct acpi_processor_performance *acpi_perf_data; @@ -244,23 +237,12 @@ static u32 get_cur_val(const struct cpumask *mask)  	return cmd.val;  } -struct perf_pair { -	union { -		struct { -			u32 lo; -			u32 hi; -		} split; -		u64 whole; -	} aperf, mperf; -}; -  /* Called via smp_call_function_single(), on the target CPU */  static void read_measured_perf_ctrs(void *_cur)  { -	struct perf_pair *cur = _cur; +	struct aperfmperf *am = _cur; -	rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); -	rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); +	get_aperfmperf(am);  }  /* @@ -279,63 +261,17 @@ static void read_measured_perf_ctrs(void *_cur)  static unsigned int get_measured_perf(struct cpufreq_policy *policy,  				      unsigned int cpu)  { -	struct perf_pair readin, cur; -	unsigned int perf_percent; +	struct aperfmperf perf; +	unsigned long ratio;  	unsigned int retval; -	if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) +	if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))  		return 0; -	cur.aperf.whole = readin.aperf.whole - -				per_cpu(msr_data, cpu).saved_aperf; -	cur.mperf.whole = readin.mperf.whole - -				per_cpu(msr_data, cpu).saved_mperf; -	per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole; -	per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole; - -#ifdef __i386__ -	/* -	 * We dont want to do 64 bit divide with 32 bit kernel -	 * Get an approximate value. Return failure in case we cannot get -	 * an approximate value. -	 */ -	if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { -		int shift_count; -		u32 h; - -		h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); -		shift_count = fls(h); - -		cur.aperf.whole >>= shift_count; -		cur.mperf.whole >>= shift_count; -	} - -	if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { -		int shift_count = 7; -		cur.aperf.split.lo >>= shift_count; -		cur.mperf.split.lo >>= shift_count; -	} - -	if (cur.aperf.split.lo && cur.mperf.split.lo) -		perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; -	else -		perf_percent = 0; - -#else -	if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { -		int shift_count = 7; -		cur.aperf.whole >>= shift_count; -		cur.mperf.whole >>= shift_count; -	} - -	if (cur.aperf.whole && cur.mperf.whole) -		perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; -	else -		perf_percent = 0; - -#endif +	ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); +	per_cpu(old_perf, cpu) = perf; -	retval = (policy->cpuinfo.max_freq * perf_percent) / 100; +	retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;  	return retval;  } @@ -394,7 +330,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,  	unsigned int next_perf_state = 0; /* Index into perf table */  	unsigned int i;  	int result = 0; -	struct power_trace it;  	dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); @@ -426,7 +361,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,  		}  	} -	trace_power_mark(&it, POWER_PSTATE, next_perf_state); +	trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency);  	switch (data->cpu_feature) {  	case SYSTEM_INTEL_MSR_CAPABLE: @@ -588,6 +523,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {  	},  	{ }  }; + +static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) +{ +	/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf +	 * AL30: A Machine Check Exception (MCE) Occurring during an +	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause +	 * Both Processor Cores to Lock Up when HT is enabled*/ +	if (c->x86_vendor == X86_VENDOR_INTEL) { +		if ((c->x86 == 15) && +		    (c->x86_model == 6) && +		    (c->x86_mask == 8) && smt_capable()) +			return -ENODEV; +		} +	return 0; +}  #endif  static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -602,6 +552,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)  	dprintk("acpi_cpufreq_cpu_init\n"); +#ifdef CONFIG_SMP +	result = acpi_cpufreq_blacklist(c); +	if (result) +		return result; +#endif +  	data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);  	if (!data)  		return -ENOMEM; @@ -731,12 +687,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)  	acpi_processor_notify_smm(THIS_MODULE);  	/* Check for APERF/MPERF support in hardware */ -	if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { -		unsigned int ecx; -		ecx = cpuid_ecx(6); -		if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) -			acpi_cpufreq_driver.getavg = get_measured_perf; -	} +	if (cpu_has(c, X86_FEATURE_APERFMPERF)) +		acpi_cpufreq_driver.getavg = get_measured_perf;  	dprintk("CPU%u - ACPI performance management activated.\n", cpu);  	for (i = 0; i < perf->state_count; i++)  |