diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-21 17:38:49 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-21 17:38:49 -0800 | 
| commit | 7c2db36e730ee4facd341679ecb21ee73ba92831 (patch) | |
| tree | 75016fba72aaf0581b9263f7fa4c565e6e634f3c /kernel/smp.c | |
| parent | 8b5628ab83b671f96ac9f174c1bd51c92589fc82 (diff) | |
| parent | a47a376f1c025e23e836c0376813c0424de665c2 (diff) | |
| download | olio-linux-3.10-7c2db36e730ee4facd341679ecb21ee73ba92831.tar.xz olio-linux-3.10-7c2db36e730ee4facd341679ecb21ee73ba92831.zip  | |
Merge branch 'akpm' (incoming from Andrew)
Merge misc patches from Andrew Morton:
 - Florian has vanished so I appear to have become fbdev maintainer
   again :(
 - Joel and Mark are distracted to welcome to the new OCFS2 maintainer
 - The backlight queue
 - Small core kernel changes
 - lib/ updates
 - The rtc queue
 - Various random bits
* akpm: (164 commits)
  rtc: rtc-davinci: use devm_*() functions
  rtc: rtc-max8997: use devm_request_threaded_irq()
  rtc: rtc-max8907: use devm_request_threaded_irq()
  rtc: rtc-da9052: use devm_request_threaded_irq()
  rtc: rtc-wm831x: use devm_request_threaded_irq()
  rtc: rtc-tps80031: use devm_request_threaded_irq()
  rtc: rtc-lp8788: use devm_request_threaded_irq()
  rtc: rtc-coh901331: use devm_clk_get()
  rtc: rtc-vt8500: use devm_*() functions
  rtc: rtc-tps6586x: use devm_request_threaded_irq()
  rtc: rtc-imxdi: use devm_clk_get()
  rtc: rtc-cmos: use dev_warn()/dev_dbg() instead of printk()/pr_debug()
  rtc: rtc-pcf8583: use dev_warn() instead of printk()
  rtc: rtc-sun4v: use pr_warn() instead of printk()
  rtc: rtc-vr41xx: use dev_info() instead of printk()
  rtc: rtc-rs5c313: use pr_err() instead of printk()
  rtc: rtc-at91rm9200: use dev_dbg()/dev_err() instead of printk()/pr_debug()
  rtc: rtc-rs5c372: use dev_dbg()/dev_warn() instead of printk()/pr_debug()
  rtc: rtc-ds2404: use dev_err() instead of printk()
  rtc: rtc-efi: use dev_err()/dev_warn()/pr_err() instead of printk()
  ...
Diffstat (limited to 'kernel/smp.c')
| -rw-r--r-- | kernel/smp.c | 183 | 
1 files changed, 30 insertions, 153 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 69f38bd98b4..8e451f3ff51 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -16,22 +16,12 @@  #include "smpboot.h"  #ifdef CONFIG_USE_GENERIC_SMP_HELPERS -static struct { -	struct list_head	queue; -	raw_spinlock_t		lock; -} call_function __cacheline_aligned_in_smp = -	{ -		.queue		= LIST_HEAD_INIT(call_function.queue), -		.lock		= __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), -	}; -  enum {  	CSD_FLAG_LOCK		= 0x01,  };  struct call_function_data { -	struct call_single_data	csd; -	atomic_t		refs; +	struct call_single_data	__percpu *csd;  	cpumask_var_t		cpumask;  	cpumask_var_t		cpumask_ipi;  }; @@ -60,6 +50,11 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)  		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,  				cpu_to_node(cpu)))  			return notifier_from_errno(-ENOMEM); +		cfd->csd = alloc_percpu(struct call_single_data); +		if (!cfd->csd) { +			free_cpumask_var(cfd->cpumask); +			return notifier_from_errno(-ENOMEM); +		}  		break;  #ifdef CONFIG_HOTPLUG_CPU @@ -70,6 +65,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)  	case CPU_DEAD_FROZEN:  		free_cpumask_var(cfd->cpumask);  		free_cpumask_var(cfd->cpumask_ipi); +		free_percpu(cfd->csd);  		break;  #endif  	}; @@ -171,85 +167,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)  }  /* - * Invoked by arch to handle an IPI for call function. Must be called with - * interrupts disabled. - */ -void generic_smp_call_function_interrupt(void) -{ -	struct call_function_data *data; -	int cpu = smp_processor_id(); - -	/* -	 * Shouldn't receive this interrupt on a cpu that is not yet online. -	 */ -	WARN_ON_ONCE(!cpu_online(cpu)); - -	/* -	 * Ensure entry is visible on call_function_queue after we have -	 * entered the IPI. See comment in smp_call_function_many. -	 * If we don't have this, then we may miss an entry on the list -	 * and never get another IPI to process it. -	 */ -	smp_mb(); - -	/* -	 * It's ok to use list_for_each_rcu() here even though we may -	 * delete 'pos', since list_del_rcu() doesn't clear ->next -	 */ -	list_for_each_entry_rcu(data, &call_function.queue, csd.list) { -		int refs; -		smp_call_func_t func; - -		/* -		 * Since we walk the list without any locks, we might -		 * see an entry that was completed, removed from the -		 * list and is in the process of being reused. -		 * -		 * We must check that the cpu is in the cpumask before -		 * checking the refs, and both must be set before -		 * executing the callback on this cpu. -		 */ - -		if (!cpumask_test_cpu(cpu, data->cpumask)) -			continue; - -		smp_rmb(); - -		if (atomic_read(&data->refs) == 0) -			continue; - -		func = data->csd.func;		/* save for later warn */ -		func(data->csd.info); - -		/* -		 * If the cpu mask is not still set then func enabled -		 * interrupts (BUG), and this cpu took another smp call -		 * function interrupt and executed func(info) twice -		 * on this cpu.  That nested execution decremented refs. -		 */ -		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { -			WARN(1, "%pf enabled interrupts and double executed\n", func); -			continue; -		} - -		refs = atomic_dec_return(&data->refs); -		WARN_ON(refs < 0); - -		if (refs) -			continue; - -		WARN_ON(!cpumask_empty(data->cpumask)); - -		raw_spin_lock(&call_function.lock); -		list_del_rcu(&data->csd.list); -		raw_spin_unlock(&call_function.lock); - -		csd_unlock(&data->csd); -	} - -} - -/*   * Invoked by arch to handle an IPI for call function single. Must be   * called from the arch with interrupts disabled.   */ @@ -453,8 +370,7 @@ void smp_call_function_many(const struct cpumask *mask,  			    smp_call_func_t func, void *info, bool wait)  {  	struct call_function_data *data; -	unsigned long flags; -	int refs, cpu, next_cpu, this_cpu = smp_processor_id(); +	int cpu, next_cpu, this_cpu = smp_processor_id();  	/*  	 * Can deadlock when called with interrupts disabled. @@ -486,50 +402,13 @@ void smp_call_function_many(const struct cpumask *mask,  	}  	data = &__get_cpu_var(cfd_data); -	csd_lock(&data->csd); - -	/* This BUG_ON verifies our reuse assertions and can be removed */ -	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); - -	/* -	 * The global call function queue list add and delete are protected -	 * by a lock, but the list is traversed without any lock, relying -	 * on the rcu list add and delete to allow safe concurrent traversal. -	 * We reuse the call function data without waiting for any grace -	 * period after some other cpu removes it from the global queue. -	 * This means a cpu might find our data block as it is being -	 * filled out. -	 * -	 * We hold off the interrupt handler on the other cpu by -	 * ordering our writes to the cpu mask vs our setting of the -	 * refs counter.  We assert only the cpu owning the data block -	 * will set a bit in cpumask, and each bit will only be cleared -	 * by the subject cpu.  Each cpu must first find its bit is -	 * set and then check that refs is set indicating the element is -	 * ready to be processed, otherwise it must skip the entry. -	 * -	 * On the previous iteration refs was set to 0 by another cpu. -	 * To avoid the use of transitivity, set the counter to 0 here -	 * so the wmb will pair with the rmb in the interrupt handler. -	 */ -	atomic_set(&data->refs, 0);	/* convert 3rd to 1st party write */ - -	data->csd.func = func; -	data->csd.info = info; -	/* Ensure 0 refs is visible before mask.  Also orders func and info */ -	smp_wmb(); - -	/* We rely on the "and" being processed before the store */  	cpumask_and(data->cpumask, mask, cpu_online_mask);  	cpumask_clear_cpu(this_cpu, data->cpumask); -	refs = cpumask_weight(data->cpumask);  	/* Some callers race with other cpus changing the passed mask */ -	if (unlikely(!refs)) { -		csd_unlock(&data->csd); +	if (unlikely(!cpumask_weight(data->cpumask)))  		return; -	}  	/*  	 * After we put an entry into the list, data->cpumask @@ -537,34 +416,32 @@ void smp_call_function_many(const struct cpumask *mask,  	 * a SMP function call, so data->cpumask will be zero.  	 */  	cpumask_copy(data->cpumask_ipi, data->cpumask); -	raw_spin_lock_irqsave(&call_function.lock, flags); -	/* -	 * Place entry at the _HEAD_ of the list, so that any cpu still -	 * observing the entry in generic_smp_call_function_interrupt() -	 * will not miss any other list entries: -	 */ -	list_add_rcu(&data->csd.list, &call_function.queue); -	/* -	 * We rely on the wmb() in list_add_rcu to complete our writes -	 * to the cpumask before this write to refs, which indicates -	 * data is on the list and is ready to be processed. -	 */ -	atomic_set(&data->refs, refs); -	raw_spin_unlock_irqrestore(&call_function.lock, flags); -	/* -	 * Make the list addition visible before sending the ipi. -	 * (IPIs must obey or appear to obey normal Linux cache -	 * coherency rules -- see comment in generic_exec_single). -	 */ -	smp_mb(); +	for_each_cpu(cpu, data->cpumask) { +		struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); +		struct call_single_queue *dst = +					&per_cpu(call_single_queue, cpu); +		unsigned long flags; + +		csd_lock(csd); +		csd->func = func; +		csd->info = info; + +		raw_spin_lock_irqsave(&dst->lock, flags); +		list_add_tail(&csd->list, &dst->list); +		raw_spin_unlock_irqrestore(&dst->lock, flags); +	}  	/* Send a message to all CPUs in the map */  	arch_send_call_function_ipi_mask(data->cpumask_ipi); -	/* Optionally wait for the CPUs to complete */ -	if (wait) -		csd_lock_wait(&data->csd); +	if (wait) { +		for_each_cpu(cpu, data->cpumask) { +			struct call_single_data *csd = +					per_cpu_ptr(data->csd, cpu); +			csd_lock_wait(csd); +		} +	}  }  EXPORT_SYMBOL(smp_call_function_many);  |