diff options
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 106 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 2 | 
3 files changed, 63 insertions, 47 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 79705ac4501..dd6fec71067 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void)  	}  } -static void x86_pmu_pmu_disable(struct pmu *pmu) +static void x86_pmu_disable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -800,10 +800,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,  		hwc->last_tag == cpuc->tags[i];  } -static int x86_pmu_start(struct perf_event *event); -static void x86_pmu_stop(struct perf_event *event); +static void x86_pmu_start(struct perf_event *event, int flags); +static void x86_pmu_stop(struct perf_event *event, int flags); -static void x86_pmu_pmu_enable(struct pmu *pmu) +static void x86_pmu_enable(struct pmu *pmu)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	struct perf_event *event; @@ -839,7 +839,14 @@ static void x86_pmu_pmu_enable(struct pmu *pmu)  			    match_prev_assignment(hwc, cpuc, i))  				continue; -			x86_pmu_stop(event); +			/* +			 * Ensure we don't accidentally enable a stopped +			 * counter simply because we rescheduled. +			 */ +			if (hwc->state & PERF_HES_STOPPED) +				hwc->state |= PERF_HES_ARCH; + +			x86_pmu_stop(event, PERF_EF_UPDATE);  		}  		for (i = 0; i < cpuc->n_events; i++) { @@ -851,7 +858,10 @@ static void x86_pmu_pmu_enable(struct pmu *pmu)  			else if (i < n_running)  				continue; -			x86_pmu_start(event); +			if (hwc->state & PERF_HES_ARCH) +				continue; + +			x86_pmu_start(event, PERF_EF_RELOAD);  		}  		cpuc->n_added = 0;  		perf_events_lapic_init(); @@ -952,15 +962,12 @@ static void x86_pmu_enable_event(struct perf_event *event)  }  /* - * activate a single event + * Add a single event to the PMU.   *   * The event is added to the group of enabled events   * but only if it can be scehduled with existing events. - * - * Called with PMU disabled. If successful and return value 1, - * then guaranteed to call perf_enable() and hw_perf_enable()   */ -static int x86_pmu_enable(struct perf_event *event) +static int x86_pmu_add(struct perf_event *event, int flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	struct hw_perf_event *hwc; @@ -975,10 +982,14 @@ static int x86_pmu_enable(struct perf_event *event)  	if (ret < 0)  		goto out; +	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; +	if (!(flags & PERF_EF_START)) +		hwc->state |= PERF_HES_ARCH; +  	/*  	 * If group events scheduling transaction was started,  	 * skip the schedulability test here, it will be peformed -	 * at commit time(->commit_txn) as a whole +	 * at commit time (->commit_txn) as a whole  	 */  	if (cpuc->group_flag & PERF_EVENT_TXN)  		goto done_collect; @@ -1003,27 +1014,28 @@ out:  	return ret;  } -static int x86_pmu_start(struct perf_event *event) +static void x86_pmu_start(struct perf_event *event, int flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	int idx = event->hw.idx; -	if (idx == -1) -		return -EAGAIN; +	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) +		return; + +	if (WARN_ON_ONCE(idx == -1)) +		return; + +	if (flags & PERF_EF_RELOAD) { +		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); +		x86_perf_event_set_period(event); +	} + +	event->hw.state = 0; -	x86_perf_event_set_period(event);  	cpuc->events[idx] = event;  	__set_bit(idx, cpuc->active_mask);  	x86_pmu.enable(event);  	perf_event_update_userpage(event); - -	return 0; -} - -static void x86_pmu_unthrottle(struct perf_event *event) -{ -	int ret = x86_pmu_start(event); -	WARN_ON_ONCE(ret);  }  void perf_event_print_debug(void) @@ -1080,27 +1092,29 @@ void perf_event_print_debug(void)  	local_irq_restore(flags);  } -static void x86_pmu_stop(struct perf_event *event) +static void x86_pmu_stop(struct perf_event *event, int flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	struct hw_perf_event *hwc = &event->hw; -	int idx = hwc->idx; - -	if (!__test_and_clear_bit(idx, cpuc->active_mask)) -		return; - -	x86_pmu.disable(event); -	/* -	 * Drain the remaining delta count out of a event -	 * that we are disabling: -	 */ -	x86_perf_event_update(event); +	if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { +		x86_pmu.disable(event); +		cpuc->events[hwc->idx] = NULL; +		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); +		hwc->state |= PERF_HES_STOPPED; +	} -	cpuc->events[idx] = NULL; +	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { +		/* +		 * Drain the remaining delta count out of a event +		 * that we are disabling: +		 */ +		x86_perf_event_update(event); +		hwc->state |= PERF_HES_UPTODATE; +	}  } -static void x86_pmu_disable(struct perf_event *event) +static void x86_pmu_del(struct perf_event *event, int flags)  {  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);  	int i; @@ -1113,7 +1127,7 @@ static void x86_pmu_disable(struct perf_event *event)  	if (cpuc->group_flag & PERF_EVENT_TXN)  		return; -	x86_pmu_stop(event); +	x86_pmu_stop(event, PERF_EF_UPDATE);  	for (i = 0; i < cpuc->n_events; i++) {  		if (event == cpuc->event_list[i]) { @@ -1165,7 +1179,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)  			continue;  		if (perf_event_overflow(event, 1, &data, regs)) -			x86_pmu_stop(event); +			x86_pmu_stop(event, 0);  	}  	if (handled) @@ -1605,15 +1619,17 @@ int x86_pmu_event_init(struct perf_event *event)  }  static struct pmu pmu = { -	.pmu_enable	= x86_pmu_pmu_enable, -	.pmu_disable	= x86_pmu_pmu_disable, +	.pmu_enable	= x86_pmu_enable, +	.pmu_disable	= x86_pmu_disable, +  	.event_init	= x86_pmu_event_init, -	.enable		= x86_pmu_enable, -	.disable	= x86_pmu_disable, + +	.add		= x86_pmu_add, +	.del		= x86_pmu_del,  	.start		= x86_pmu_start,  	.stop		= x86_pmu_stop,  	.read		= x86_pmu_read, -	.unthrottle	= x86_pmu_unthrottle, +  	.start_txn	= x86_pmu_start_txn,  	.cancel_txn	= x86_pmu_cancel_txn,  	.commit_txn	= x86_pmu_commit_txn, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index ee05c90012d..82395f2378e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -763,7 +763,7 @@ again:  		data.period = event->hw.last_period;  		if (perf_event_overflow(event, 1, &data, regs)) -			x86_pmu_stop(event); +			x86_pmu_stop(event, 0);  	}  	/* diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 18018d1311c..9893a2f77b7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -491,7 +491,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,  		regs.flags &= ~PERF_EFLAGS_EXACT;  	if (perf_event_overflow(event, 1, &data, ®s)) -		x86_pmu_stop(event); +		x86_pmu_stop(event, 0);  }  static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)  |