diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_amd.c')
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 142 | 
1 files changed, 74 insertions, 68 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfda3c4..db6f7d4056e 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)  	return (hwc->config & 0xe0) == 0xe0;  } +static inline int amd_has_nb(struct cpu_hw_events *cpuc) +{ +	struct amd_nb *nb = cpuc->amd_nb; + +	return nb && nb->nb_id != -1; +} +  static void amd_put_event_constraints(struct cpu_hw_events *cpuc,  				      struct perf_event *event)  { @@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,  	/*  	 * only care about NB events  	 */ -	if (!(nb && amd_is_nb_event(hwc))) +	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))  		return;  	/* @@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)  	/*  	 * if not NB event or no NB, then no constraints  	 */ -	if (!(nb && amd_is_nb_event(hwc))) +	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))  		return &unconstrained;  	/* @@ -271,28 +278,6 @@ done:  	return &emptyconstraint;  } -static __initconst struct x86_pmu amd_pmu = { -	.name			= "AMD", -	.handle_irq		= x86_pmu_handle_irq, -	.disable_all		= x86_pmu_disable_all, -	.enable_all		= x86_pmu_enable_all, -	.enable			= x86_pmu_enable_event, -	.disable		= x86_pmu_disable_event, -	.eventsel		= MSR_K7_EVNTSEL0, -	.perfctr		= MSR_K7_PERFCTR0, -	.event_map		= amd_pmu_event_map, -	.raw_event		= amd_pmu_raw_event, -	.max_events		= ARRAY_SIZE(amd_perfmon_event_map), -	.num_events		= 4, -	.event_bits		= 48, -	.event_mask		= (1ULL << 48) - 1, -	.apic			= 1, -	/* use highest bit to detect overflow */ -	.max_period		= (1ULL << 47) - 1, -	.get_event_constraints	= amd_get_event_constraints, -	.put_event_constraints	= amd_put_event_constraints -}; -  static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)  {  	struct amd_nb *nb; @@ -309,57 +294,61 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)  	 * initialize all possible NB constraints  	 */  	for (i = 0; i < x86_pmu.num_events; i++) { -		set_bit(i, nb->event_constraints[i].idxmsk); +		__set_bit(i, nb->event_constraints[i].idxmsk);  		nb->event_constraints[i].weight = 1;  	}  	return nb;  } -static void amd_pmu_cpu_online(int cpu) +static int amd_pmu_cpu_prepare(int cpu)  { -	struct cpu_hw_events *cpu1, *cpu2; -	struct amd_nb *nb = NULL; +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + +	WARN_ON_ONCE(cpuc->amd_nb); + +	if (boot_cpu_data.x86_max_cores < 2) +		return NOTIFY_OK; + +	cpuc->amd_nb = amd_alloc_nb(cpu, -1); +	if (!cpuc->amd_nb) +		return NOTIFY_BAD; + +	return NOTIFY_OK; +} + +static void amd_pmu_cpu_starting(int cpu) +{ +	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); +	struct amd_nb *nb;  	int i, nb_id;  	if (boot_cpu_data.x86_max_cores < 2)  		return; -	/* -	 * function may be called too early in the -	 * boot process, in which case nb_id is bogus -	 */  	nb_id = amd_get_nb_id(cpu); -	if (nb_id == BAD_APICID) -		return; - -	cpu1 = &per_cpu(cpu_hw_events, cpu); -	cpu1->amd_nb = NULL; +	WARN_ON_ONCE(nb_id == BAD_APICID);  	raw_spin_lock(&amd_nb_lock);  	for_each_online_cpu(i) { -		cpu2 = &per_cpu(cpu_hw_events, i); -		nb = cpu2->amd_nb; -		if (!nb) +		nb = per_cpu(cpu_hw_events, i).amd_nb; +		if (WARN_ON_ONCE(!nb))  			continue; -		if (nb->nb_id == nb_id) -			goto found; -	} -	nb = amd_alloc_nb(cpu, nb_id); -	if (!nb) { -		pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); -		raw_spin_unlock(&amd_nb_lock); -		return; +		if (nb->nb_id == nb_id) { +			kfree(cpuc->amd_nb); +			cpuc->amd_nb = nb; +			break; +		}  	} -found: -	nb->refcnt++; -	cpu1->amd_nb = nb; + +	cpuc->amd_nb->nb_id = nb_id; +	cpuc->amd_nb->refcnt++;  	raw_spin_unlock(&amd_nb_lock);  } -static void amd_pmu_cpu_offline(int cpu) +static void amd_pmu_cpu_dead(int cpu)  {  	struct cpu_hw_events *cpuhw; @@ -370,14 +359,44 @@ static void amd_pmu_cpu_offline(int cpu)  	raw_spin_lock(&amd_nb_lock); -	if (--cpuhw->amd_nb->refcnt == 0) -		kfree(cpuhw->amd_nb); +	if (cpuhw->amd_nb) { +		struct amd_nb *nb = cpuhw->amd_nb; + +		if (nb->nb_id == -1 || --nb->refcnt == 0) +			kfree(nb); -	cpuhw->amd_nb = NULL; +		cpuhw->amd_nb = NULL; +	}  	raw_spin_unlock(&amd_nb_lock);  } +static __initconst struct x86_pmu amd_pmu = { +	.name			= "AMD", +	.handle_irq		= x86_pmu_handle_irq, +	.disable_all		= x86_pmu_disable_all, +	.enable_all		= x86_pmu_enable_all, +	.enable			= x86_pmu_enable_event, +	.disable		= x86_pmu_disable_event, +	.eventsel		= MSR_K7_EVNTSEL0, +	.perfctr		= MSR_K7_PERFCTR0, +	.event_map		= amd_pmu_event_map, +	.raw_event		= amd_pmu_raw_event, +	.max_events		= ARRAY_SIZE(amd_perfmon_event_map), +	.num_events		= 4, +	.event_bits		= 48, +	.event_mask		= (1ULL << 48) - 1, +	.apic			= 1, +	/* use highest bit to detect overflow */ +	.max_period		= (1ULL << 47) - 1, +	.get_event_constraints	= amd_get_event_constraints, +	.put_event_constraints	= amd_put_event_constraints, + +	.cpu_prepare		= amd_pmu_cpu_prepare, +	.cpu_starting		= amd_pmu_cpu_starting, +	.cpu_dead		= amd_pmu_cpu_dead, +}; +  static __init int amd_pmu_init(void)  {  	/* Performance-monitoring supported from K7 and later: */ @@ -390,11 +409,6 @@ static __init int amd_pmu_init(void)  	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,  	       sizeof(hw_cache_event_ids)); -	/* -	 * explicitly initialize the boot cpu, other cpus will get -	 * the cpu hotplug callbacks from smp_init() -	 */ -	amd_pmu_cpu_online(smp_processor_id());  	return 0;  } @@ -405,12 +419,4 @@ static int amd_pmu_init(void)  	return 0;  } -static void amd_pmu_cpu_online(int cpu) -{ -} - -static void amd_pmu_cpu_offline(int cpu) -{ -} -  #endif  |