diff options
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 119 | 
1 files changed, 119 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e2e272b5c27..0108570a192 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1363,6 +1363,103 @@ int rcu_needs_cpu(int cpu)  	       rcu_preempt_needs_cpu(cpu);  } +static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; +static atomic_t rcu_barrier_cpu_count; +static DEFINE_MUTEX(rcu_barrier_mutex); +static struct completion rcu_barrier_completion; +static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); +static struct rcu_head rcu_migrate_head[3]; +static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); + +static void rcu_barrier_callback(struct rcu_head *notused) +{ +	if (atomic_dec_and_test(&rcu_barrier_cpu_count)) +		complete(&rcu_barrier_completion); +} + +/* + * Called with preemption disabled, and from cross-cpu IRQ context. + */ +static void rcu_barrier_func(void *type) +{ +	int cpu = smp_processor_id(); +	struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); +	void (*call_rcu_func)(struct rcu_head *head, +			      void (*func)(struct rcu_head *head)); + +	atomic_inc(&rcu_barrier_cpu_count); +	call_rcu_func = type; +	call_rcu_func(head, rcu_barrier_callback); +} + +static inline void wait_migrated_callbacks(void) +{ +	wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); +	smp_mb(); /* In case we didn't sleep. */ +} + +/* + * Orchestrate the specified type of RCU barrier, waiting for all + * RCU callbacks of the specified type to complete. + */ +static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head, +					       void (*func)(struct rcu_head *head))) +{ +	BUG_ON(in_interrupt()); +	/* Take cpucontrol mutex to protect against CPU hotplug */ +	mutex_lock(&rcu_barrier_mutex); +	init_completion(&rcu_barrier_completion); +	/* +	 * Initialize rcu_barrier_cpu_count to 1, then invoke +	 * rcu_barrier_func() on each CPU, so that each CPU also has +	 * incremented rcu_barrier_cpu_count.  Only then is it safe to +	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU +	 * might complete its grace period before all of the other CPUs +	 * did their increment, causing this function to return too +	 * early. +	 */ +	atomic_set(&rcu_barrier_cpu_count, 1); +	on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); +	if (atomic_dec_and_test(&rcu_barrier_cpu_count)) +		complete(&rcu_barrier_completion); +	wait_for_completion(&rcu_barrier_completion); +	mutex_unlock(&rcu_barrier_mutex); +	wait_migrated_callbacks(); +} + +/** + * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. + */ +void rcu_barrier(void) +{ +	_rcu_barrier(call_rcu); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + +/** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +void rcu_barrier_bh(void) +{ +	_rcu_barrier(call_rcu_bh); +} +EXPORT_SYMBOL_GPL(rcu_barrier_bh); + +/** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. + */ +void rcu_barrier_sched(void) +{ +	_rcu_barrier(call_rcu_sched); +} +EXPORT_SYMBOL_GPL(rcu_barrier_sched); + +static void rcu_migrate_callback(struct rcu_head *notused) +{ +	if (atomic_dec_and_test(&rcu_migrate_type_count)) +		wake_up(&rcu_migrate_wq); +} +  /*   * Do boot-time initialization of a CPU's per-CPU RCU data.   */ @@ -1459,6 +1556,28 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,  	case CPU_UP_PREPARE_FROZEN:  		rcu_online_cpu(cpu);  		break; +	case CPU_DOWN_PREPARE: +	case CPU_DOWN_PREPARE_FROZEN: +		/* Don't need to wait until next removal operation. */ +		/* rcu_migrate_head is protected by cpu_add_remove_lock */ +		wait_migrated_callbacks(); +		break; +	case CPU_DYING: +	case CPU_DYING_FROZEN: +		/* +		 * preempt_disable() in on_each_cpu() prevents stop_machine(), +		 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" +		 * returns, all online cpus have queued rcu_barrier_func(), +		 * and the dead cpu(if it exist) queues rcu_migrate_callback()s. +		 * +		 * These callbacks ensure _rcu_barrier() waits for all +		 * RCU callbacks of the specified type to complete. +		 */ +		atomic_set(&rcu_migrate_type_count, 3); +		call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); +		call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); +		call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); +		break;  	case CPU_DEAD:  	case CPU_DEAD_FROZEN:  	case CPU_UP_CANCELED:  |