diff options
| -rw-r--r-- | include/linux/perf_event.h | 1 | ||||
| -rw-r--r-- | kernel/events/core.c | 48 | 
2 files changed, 33 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b1f89122bf6..cb44c9e7566 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -890,6 +890,7 @@ struct perf_event_context {  	int				nr_active;  	int				is_active;  	int				nr_stat; +	int				nr_freq;  	int				rotate_disable;  	atomic_t			refcount;  	struct task_struct		*task; diff --git a/kernel/events/core.c b/kernel/events/core.c index a355ffb0b28..b3fed52aaf2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1130,6 +1130,8 @@ event_sched_out(struct perf_event *event,  	if (!is_software_event(event))  		cpuctx->active_oncpu--;  	ctx->nr_active--; +	if (event->attr.freq && event->attr.sample_freq) +		ctx->nr_freq--;  	if (event->attr.exclusive || !cpuctx->active_oncpu)  		cpuctx->exclusive = 0;  } @@ -1407,6 +1409,8 @@ event_sched_in(struct perf_event *event,  	if (!is_software_event(event))  		cpuctx->active_oncpu++;  	ctx->nr_active++; +	if (event->attr.freq && event->attr.sample_freq) +		ctx->nr_freq++;  	if (event->attr.exclusive)  		cpuctx->exclusive = 1; @@ -2329,6 +2333,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)  	u64 interrupts, now;  	s64 delta; +	if (!ctx->nr_freq) +		return; +  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {  		if (event->state != PERF_EVENT_STATE_ACTIVE)  			continue; @@ -2384,12 +2391,14 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)  {  	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;  	struct perf_event_context *ctx = NULL; -	int rotate = 0, remove = 1; +	int rotate = 0, remove = 1, freq = 0;  	if (cpuctx->ctx.nr_events) {  		remove = 0;  		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)  			rotate = 1; +		if (cpuctx->ctx.nr_freq) +			freq = 1;  	}  	ctx = cpuctx->task_ctx; @@ -2397,33 +2406,40 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)  		remove = 0;  		if (ctx->nr_events != ctx->nr_active)  			rotate = 1; +		if (ctx->nr_freq) +			freq = 1;  	} +	if (!rotate && !freq) +		goto done; +  	perf_ctx_lock(cpuctx, cpuctx->task_ctx);  	perf_pmu_disable(cpuctx->ctx.pmu); -	perf_ctx_adjust_freq(&cpuctx->ctx, interval); -	if (ctx) -		perf_ctx_adjust_freq(ctx, interval); -	if (!rotate) -		goto done; +	if (freq) { +		perf_ctx_adjust_freq(&cpuctx->ctx, interval); +		if (ctx) +			perf_ctx_adjust_freq(ctx, interval); +	} -	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); -	if (ctx) -		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); +	if (rotate) { +		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); +		if (ctx) +			ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); -	rotate_ctx(&cpuctx->ctx); -	if (ctx) -		rotate_ctx(ctx); +		rotate_ctx(&cpuctx->ctx); +		if (ctx) +			rotate_ctx(ctx); + +		perf_event_sched_in(cpuctx, ctx, current); +	} -	perf_event_sched_in(cpuctx, ctx, current); +	perf_pmu_enable(cpuctx->ctx.pmu); +	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);  done:  	if (remove)  		list_del_init(&cpuctx->rotation_list); - -	perf_pmu_enable(cpuctx->ctx.pmu); -	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);  }  void perf_event_task_tick(void)  |