diff options
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
| -rw-r--r-- | arch/sparc/kernel/perf_event.c | 109 | 
1 files changed, 67 insertions, 42 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 37cae676536..516be2314b5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)  		enc = perf_event_get_enc(cpuc->events[i]);  		pcr &= ~mask_for_index(idx); -		pcr |= event_encoding(enc, idx); +		if (hwc->state & PERF_HES_STOPPED) +			pcr |= nop_for_index(idx); +		else +			pcr |= event_encoding(enc, idx);  	}  out:  	return pcr;  } -static void sparc_pmu_pmu_enable(struct pmu *pmu) +static void sparc_pmu_enable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	u64 pcr; @@ -691,7 +694,7 @@ static void sparc_pmu_pmu_enable(struct pmu *pmu)  	pcr_ops->write(cpuc->pcr);  } -static void sparc_pmu_pmu_disable(struct pmu *pmu) +static void sparc_pmu_disable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	u64 val; @@ -710,10 +713,53 @@ static void sparc_pmu_pmu_disable(struct pmu *pmu)  	pcr_ops->write(cpuc->pcr);  } -static void sparc_pmu_disable(struct perf_event *event) +static int active_event_index(struct cpu_hw_events *cpuc, +			      struct perf_event *event) +{ +	int i; + +	for (i = 0; i < cpuc->n_events; i++) { +		if (cpuc->event[i] == event) +			break; +	} +	BUG_ON(i == cpuc->n_events); +	return cpuc->current_idx[i]; +} + +static void sparc_pmu_start(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = active_event_index(cpuc, event); + +	if (flags & PERF_EF_RELOAD) { +		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); +		sparc_perf_event_set_period(event, &event->hw, idx); +	} + +	event->hw.state = 0; + +	sparc_pmu_enable_event(cpuc, &event->hw, idx); +} + +static void sparc_pmu_stop(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = active_event_index(cpuc, event); + +	if (!(event->hw.state & PERF_HES_STOPPED)) { +		sparc_pmu_disable_event(cpuc, &event->hw, idx); +		event->hw.state |= PERF_HES_STOPPED; +	} + +	if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { +		sparc_perf_event_update(event, &event->hw, idx); +		event->hw.state |= PERF_HES_UPTODATE; +	} +} + +static void sparc_pmu_del(struct perf_event *event, int _flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); -	struct hw_perf_event *hwc = &event->hw;  	unsigned long flags;  	int i; @@ -722,7 +768,10 @@ static void sparc_pmu_disable(struct perf_event *event)  	for (i = 0; i < cpuc->n_events; i++) {  		if (event == cpuc->event[i]) { -			int idx = cpuc->current_idx[i]; +			/* Absorb the final count and turn off the +			 * event. +			 */ +			sparc_pmu_stop(event, PERF_EF_UPDATE);  			/* Shift remaining entries down into  			 * the existing slot. @@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event)  					cpuc->current_idx[i];  			} -			/* Absorb the final count and turn off the -			 * event. -			 */ -			sparc_pmu_disable_event(cpuc, hwc, idx); -			barrier(); -			sparc_perf_event_update(event, hwc, idx); -  			perf_event_update_userpage(event);  			cpuc->n_events--; @@ -752,19 +794,6 @@ static void sparc_pmu_disable(struct perf_event *event)  	local_irq_restore(flags);  } -static int active_event_index(struct cpu_hw_events *cpuc, -			      struct perf_event *event) -{ -	int i; - -	for (i = 0; i < cpuc->n_events; i++) { -		if (cpuc->event[i] == event) -			break; -	} -	BUG_ON(i == cpuc->n_events); -	return cpuc->current_idx[i]; -} -  static void sparc_pmu_read(struct perf_event *event)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event)  	sparc_perf_event_update(event, hwc, idx);  } -static void sparc_pmu_unthrottle(struct perf_event *event) -{ -	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); -	int idx = active_event_index(cpuc, event); -	struct hw_perf_event *hwc = &event->hw; - -	sparc_pmu_enable_event(cpuc, hwc, idx); -} -  static atomic_t active_events = ATOMIC_INIT(0);  static DEFINE_MUTEX(pmc_grab_mutex); @@ -984,7 +1004,7 @@ static int collect_events(struct perf_event *group, int max_count,  	return n;  } -static int sparc_pmu_enable(struct perf_event *event) +static int sparc_pmu_add(struct perf_event *event, int ef_flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	int n0, ret = -EAGAIN; @@ -1001,6 +1021,10 @@ static int sparc_pmu_enable(struct perf_event *event)  	cpuc->events[n0] = event->hw.event_base;  	cpuc->current_idx[n0] = PIC_NO_INDEX; +	event->hw.state = PERF_HES_UPTODATE; +	if (!(ef_flags & PERF_EF_START)) +		event->hw.state |= PERF_HES_STOPPED; +  	/*  	 * If group events scheduling transaction was started,  	 * skip the schedulability test here, it will be peformed @@ -1156,13 +1180,14 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)  }  static struct pmu pmu = { -	.pmu_enable	= sparc_pmu_pmu_enable, -	.pmu_disable	= sparc_pmu_pmu_disable, +	.pmu_enable	= sparc_pmu_enable, +	.pmu_disable	= sparc_pmu_disable,  	.event_init	= sparc_pmu_event_init, -	.enable		= sparc_pmu_enable, -	.disable	= sparc_pmu_disable, +	.add		= sparc_pmu_add, +	.del		= sparc_pmu_del, +	.start		= sparc_pmu_start, +	.stop		= sparc_pmu_stop,  	.read		= sparc_pmu_read, -	.unthrottle	= sparc_pmu_unthrottle,  	.start_txn	= sparc_pmu_start_txn,  	.cancel_txn	= sparc_pmu_cancel_txn,  	.commit_txn	= sparc_pmu_commit_txn, @@ -1243,7 +1268,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,  			continue;  		if (perf_event_overflow(event, 1, &data, regs)) -			sparc_pmu_disable_event(cpuc, hwc, idx); +			sparc_pmu_stop(event, 0);  	}  	return NOTIFY_STOP;  |