diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 20 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 13 | ||||
| -rw-r--r-- | kernel/sched/debug.c | 4 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 2 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 2 | 
5 files changed, 32 insertions, 9 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 301079d06f2..7b6646a8c06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)  }  /* + * Initialize event state based on the perf_event_attr::disabled. + */ +static inline void perf_event__state_init(struct perf_event *event) +{ +	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : +					      PERF_EVENT_STATE_INACTIVE; +} + +/*   * Called at perf_event creation and when events are attached/detached from a   * group.   */ @@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,  	event->overflow_handler	= overflow_handler;  	event->overflow_handler_context = context; -	if (attr->disabled) -		event->state = PERF_EVENT_STATE_OFF; +	perf_event__state_init(event);  	pmu = NULL; @@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,  		mutex_lock(&gctx->mutex);  		perf_remove_from_context(group_leader); + +		/* +		 * Removing from the context ends up with disabled +		 * event. What we want here is event in the initial +		 * startup state, ready to be add into new context. +		 */ +		perf_event__state_init(group_leader);  		list_for_each_entry(sibling, &group_leader->sibling_list,  				    group_entry) {  			perf_remove_from_context(sibling); +			perf_event__state_init(sibling);  			put_ctx(gctx);  		}  		mutex_unlock(&gctx->mutex); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f6e5ec2932b..c1cc7e17ff9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -40,8 +40,7 @@  #ifdef CONFIG_RCU_NOCB_CPU  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */  static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */ -static bool rcu_nocb_poll;	    /* Offload kthread are to poll. */ -module_param(rcu_nocb_poll, bool, 0444); +static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */  static char __initdata nocb_buf[NR_CPUS * 5];  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ @@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)  }  __setup("rcu_nocbs=", rcu_nocb_setup); +static int __init parse_rcu_nocb_poll(char *arg) +{ +	rcu_nocb_poll = 1; +	return 0; +} +early_param("rcu_nocb_poll", parse_rcu_nocb_poll); +  /* Is the specified CPU a no-CPUs CPU? */  static bool is_nocb_cpu(int cpu)  { @@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)  	for (;;) {  		/* If not polling, wait for next batch of callbacks. */  		if (!rcu_nocb_poll) -			wait_event(rdp->nocb_wq, rdp->nocb_head); +			wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);  		list = ACCESS_ONCE(rdp->nocb_head);  		if (!list) {  			schedule_timeout_interruptible(1); +			flush_signals(current);  			continue;  		} diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cd3c1b4e58..7ae4c4c5420 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)  			cfs_rq->runnable_load_avg);  	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",  			cfs_rq->blocked_load_avg); -	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg", -			atomic64_read(&cfs_rq->tg->load_avg)); +	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg", +			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));  	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",  			cfs_rq->tg_load_contrib);  	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234..81fa5364340 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)  	hrtimer_cancel(&cfs_b->slack_timer);  } -static void unthrottle_offline_cfs_rqs(struct rq *rq) +static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)  {  	struct cfs_rq *cfs_rq; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 418feb01344..4f02b284735 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)  static int do_balance_runtime(struct rt_rq *rt_rq)  {  	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); -	struct root_domain *rd = cpu_rq(smp_processor_id())->rd; +	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;  	int i, weight, more = 0;  	u64 rt_period;  |