diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-06-16 14:37:10 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-09-09 20:46:30 +0200 | 
| commit | a4eaf7f14675cb512d69f0c928055e73d0c6d252 (patch) | |
| tree | e8a0f631fc28d4bd9becd2e9e2c71743c64ee3ec /arch/sparc/kernel | |
| parent | fa407f35e0298d841e4088f95a7f9cf6e725c6d5 (diff) | |
| download | olio-linux-3.10-a4eaf7f14675cb512d69f0c928055e73d0c6d252.tar.xz olio-linux-3.10-a4eaf7f14675cb512d69f0c928055e73d0c6d252.zip  | |
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
 1) We disable the counter:
    a) the pmu has per-counter enable bits, we flip that
    b) we program a NOP event, preserving the counter state
 2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sparc/kernel')
| -rw-r--r-- | arch/sparc/kernel/perf_event.c | 109 | 
1 files changed, 67 insertions, 42 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 37cae676536..516be2314b5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)  		enc = perf_event_get_enc(cpuc->events[i]);  		pcr &= ~mask_for_index(idx); -		pcr |= event_encoding(enc, idx); +		if (hwc->state & PERF_HES_STOPPED) +			pcr |= nop_for_index(idx); +		else +			pcr |= event_encoding(enc, idx);  	}  out:  	return pcr;  } -static void sparc_pmu_pmu_enable(struct pmu *pmu) +static void sparc_pmu_enable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	u64 pcr; @@ -691,7 +694,7 @@ static void sparc_pmu_pmu_enable(struct pmu *pmu)  	pcr_ops->write(cpuc->pcr);  } -static void sparc_pmu_pmu_disable(struct pmu *pmu) +static void sparc_pmu_disable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	u64 val; @@ -710,10 +713,53 @@ static void sparc_pmu_pmu_disable(struct pmu *pmu)  	pcr_ops->write(cpuc->pcr);  } -static void sparc_pmu_disable(struct perf_event *event) +static int active_event_index(struct cpu_hw_events *cpuc, +			      struct perf_event *event) +{ +	int i; + +	for (i = 0; i < cpuc->n_events; i++) { +		if (cpuc->event[i] == event) +			break; +	} +	BUG_ON(i == cpuc->n_events); +	return cpuc->current_idx[i]; +} + +static void sparc_pmu_start(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = active_event_index(cpuc, event); + +	if (flags & PERF_EF_RELOAD) { +		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); +		sparc_perf_event_set_period(event, &event->hw, idx); +	} + +	event->hw.state = 0; + +	sparc_pmu_enable_event(cpuc, &event->hw, idx); +} + +static void sparc_pmu_stop(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = active_event_index(cpuc, event); + +	if (!(event->hw.state & PERF_HES_STOPPED)) { +		sparc_pmu_disable_event(cpuc, &event->hw, idx); +		event->hw.state |= PERF_HES_STOPPED; +	} + +	if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { +		sparc_perf_event_update(event, &event->hw, idx); +		event->hw.state |= PERF_HES_UPTODATE; +	} +} + +static void sparc_pmu_del(struct perf_event *event, int _flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); -	struct hw_perf_event *hwc = &event->hw;  	unsigned long flags;  	int i; @@ -722,7 +768,10 @@ static void sparc_pmu_disable(struct perf_event *event)  	for (i = 0; i < cpuc->n_events; i++) {  		if (event == cpuc->event[i]) { -			int idx = cpuc->current_idx[i]; +			/* Absorb the final count and turn off the +			 * event. +			 */ +			sparc_pmu_stop(event, PERF_EF_UPDATE);  			/* Shift remaining entries down into  			 * the existing slot. @@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)  					cpuc->current_idx[i];  			} -			/* Absorb the final count and turn off the -			 * event. -			 */ -			sparc_pmu_disable_event(cpuc, hwc, idx); -			barrier(); -			sparc_perf_event_update(event, hwc, idx); -  			perf_event_update_userpage(event);  			cpuc->n_events--; @@ -752,19 +794,6 @@ static void sparc_pmu_disable(struct perf_event *event)  	local_irq_restore(flags);  } -static int active_event_index(struct cpu_hw_events *cpuc, -			      struct perf_event *event) -{ -	int i; - -	for (i = 0; i < cpuc->n_events; i++) { -		if (cpuc->event[i] == event) -			break; -	} -	BUG_ON(i == cpuc->n_events); -	return cpuc->current_idx[i]; -} -  static void sparc_pmu_read(struct perf_event *event)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)  	sparc_perf_event_update(event, hwc, idx);  } -static void sparc_pmu_unthrottle(struct perf_event *event) -{ -	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); -	int idx = active_event_index(cpuc, event); -	struct hw_perf_event *hwc = &event->hw; - -	sparc_pmu_enable_event(cpuc, hwc, idx); -} -  static atomic_t active_events = ATOMIC_INIT(0);  static DEFINE_MUTEX(pmc_grab_mutex); @@ -984,7 +1004,7 @@ static int collect_events(struct perf_event *group, int max_count,  	return n;  } -static int sparc_pmu_enable(struct perf_event *event) +static int sparc_pmu_add(struct perf_event *event, int ef_flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	int n0, ret = -EAGAIN; @@ -1001,6 +1021,10 @@ static int sparc_pmu_enable(struct perf_event *event)  	cpuc->events[n0] = event->hw.event_base;  	cpuc->current_idx[n0] = PIC_NO_INDEX; +	event->hw.state = PERF_HES_UPTODATE; +	if (!(ef_flags & PERF_EF_START)) +		event->hw.state |= PERF_HES_STOPPED; +  	/*  	 * If group events scheduling transaction was started,  	 * skip the schedulability test here, it will be peformed @@ -1156,13 +1180,14 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)  }  static struct pmu pmu = { -	.pmu_enable	= sparc_pmu_pmu_enable, -	.pmu_disable	= sparc_pmu_pmu_disable, +	.pmu_enable	= sparc_pmu_enable, +	.pmu_disable	= sparc_pmu_disable,  	.event_init	= sparc_pmu_event_init, -	.enable		= sparc_pmu_enable, -	.disable	= sparc_pmu_disable, +	.add		= sparc_pmu_add, +	.del		= sparc_pmu_del, +	.start		= sparc_pmu_start, +	.stop		= sparc_pmu_stop,  	.read		= sparc_pmu_read, -	.unthrottle	= sparc_pmu_unthrottle,  	.start_txn	= sparc_pmu_start_txn,  	.cancel_txn	= sparc_pmu_cancel_txn,  	.commit_txn	= sparc_pmu_commit_txn, @@ -1243,7 +1268,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,  			continue;  		if (perf_event_overflow(event, 1, &data, regs)) -			sparc_pmu_disable_event(cpuc, hwc, idx); +			sparc_pmu_stop(event, 0);  	}  	return NOTIFY_STOP;  |