diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/chip.c | 5 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 26 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 12 | ||||
| -rw-r--r-- | kernel/irq/migration.c | 12 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 12 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 4 | 
6 files changed, 43 insertions, 28 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e..c248eba98b4 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)  	desc->irq_count = 0;  	desc->irqs_unhandled = 0;  #ifdef CONFIG_SMP -	cpumask_setall(&desc->affinity); +	cpumask_setall(desc->affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ +	cpumask_clear(desc->pending_mask); +#endif  #endif  	spin_unlock_irqrestore(&desc->lock, flags);  } diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be917..b8fa1354f01 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -64,9 +64,6 @@ static struct irq_desc irq_desc_init = {  	.handle_irq = handle_bad_irq,  	.depth      = 1,  	.lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), -#ifdef CONFIG_SMP -	.affinity   = CPU_MASK_ALL -#endif  };  void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) @@ -88,6 +85,8 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)  static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)  { +	int node = cpu_to_node(cpu); +  	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));  	spin_lock_init(&desc->lock); @@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)  		printk(KERN_ERR "can not alloc kstat_irqs\n");  		BUG_ON(1);  	} +	if (!init_alloc_desc_masks(desc, node, false)) { +		printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); +		BUG_ON(1); +	}  	arch_init_chip_data(desc, cpu);  } @@ -119,9 +122,6 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm  		.handle_irq = handle_bad_irq,  		.depth	    = 1,  		.lock	    = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), -#ifdef CONFIG_SMP -		.affinity   = CPU_MASK_ALL -#endif  	}  }; @@ -141,7 +141,7 @@ int __init early_irq_init(void)  		desc[i].irq = i;  		desc[i].kstat_irqs = kstat_irqs_legacy[i];  		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - +		init_alloc_desc_masks(&desc[i], 0, true);  		irq_desc_ptrs[i] = desc + i;  	} @@ -188,6 +188,10 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)  		printk(KERN_ERR "can not alloc irq_desc\n");  		BUG_ON(1);  	} +	if (!init_alloc_desc_masks(desc, node, false)) { +		printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); +		BUG_ON(1); +	}  	init_one_irq_desc(irq, desc, cpu);  	irq_desc_ptrs[irq] = desc; @@ -207,9 +211,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {  		.handle_irq = handle_bad_irq,  		.depth = 1,  		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), -#ifdef CONFIG_SMP -		.affinity = CPU_MASK_ALL -#endif  	}  }; @@ -222,9 +223,10 @@ int __init early_irq_init(void)  	desc = irq_desc;  	count = ARRAY_SIZE(irq_desc); -	for (i = 0; i < count; i++) +	for (i = 0; i < count; i++) {  		desc[i].irq = i; - +		init_alloc_desc_masks(&desc[i], 0, true); +	}  	return arch_early_irq_init();  } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb34..b98739af455 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)  #ifdef CONFIG_GENERIC_PENDING_IRQ  	if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { -		cpumask_copy(&desc->affinity, cpumask); +		cpumask_copy(desc->affinity, cpumask);  		desc->chip->set_affinity(irq, cpumask);  	} else {  		desc->status |= IRQ_MOVE_PENDING; -		cpumask_copy(&desc->pending_mask, cpumask); +		cpumask_copy(desc->pending_mask, cpumask);  	}  #else -	cpumask_copy(&desc->affinity, cpumask); +	cpumask_copy(desc->affinity, cpumask);  	desc->chip->set_affinity(irq, cpumask);  #endif  	desc->status |= IRQ_AFFINITY_SET; @@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)  	 * one of the targets is online.  	 */  	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { -		if (cpumask_any_and(&desc->affinity, cpu_online_mask) +		if (cpumask_any_and(desc->affinity, cpu_online_mask)  		    < nr_cpu_ids)  			goto set_affinity;  		else  			desc->status &= ~IRQ_AFFINITY_SET;  	} -	cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); +	cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);  set_affinity: -	desc->chip->set_affinity(irq, &desc->affinity); +	desc->chip->set_affinity(irq, desc->affinity);  	return 0;  } diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bd72329e630..e05ad9be43b 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -18,7 +18,7 @@ void move_masked_irq(int irq)  	desc->status &= ~IRQ_MOVE_PENDING; -	if (unlikely(cpumask_empty(&desc->pending_mask))) +	if (unlikely(cpumask_empty(desc->pending_mask)))  		return;  	if (!desc->chip->set_affinity) @@ -38,13 +38,13 @@ void move_masked_irq(int irq)  	 * For correct operation this depends on the caller  	 * masking the irqs.  	 */ -	if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) +	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)  		   < nr_cpu_ids)) { -		cpumask_and(&desc->affinity, -			    &desc->pending_mask, cpu_online_mask); -		desc->chip->set_affinity(irq, &desc->affinity); +		cpumask_and(desc->affinity, +			    desc->pending_mask, cpu_online_mask); +		desc->chip->set_affinity(irq, desc->affinity);  	} -	cpumask_clear(&desc->pending_mask); +	cpumask_clear(desc->pending_mask);  }  void move_native_irq(int irq) diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77..f001a4ea641 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -46,6 +46,7 @@ static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,  	desc->cpu = cpu;  	lockdep_set_class(&desc->lock, &irq_desc_lock_class);  	init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); +	init_copy_desc_masks(old_desc, desc);  	arch_init_copy_chip_data(old_desc, desc, cpu);  } @@ -76,11 +77,20 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,  	node = cpu_to_node(cpu);  	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);  	if (!desc) { -		printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); +		printk(KERN_ERR "irq %d: can not get new irq_desc " +				"for migration.\n", irq);  		/* still use old one */  		desc = old_desc;  		goto out_unlock;  	} +	if (!init_alloc_desc_masks(desc, node, false)) { +		printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " +				"for migration.\n", irq); +		/* still use old one */ +		kfree(desc); +		desc = old_desc; +		goto out_unlock; +	}  	init_copy_one_irq_desc(irq, old_desc, desc, cpu);  	irq_desc_ptrs[irq] = desc; diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index aae3f742bce..692363dd591 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;  static int irq_affinity_proc_show(struct seq_file *m, void *v)  {  	struct irq_desc *desc = irq_to_desc((long)m->private); -	const struct cpumask *mask = &desc->affinity; +	const struct cpumask *mask = desc->affinity;  #ifdef CONFIG_GENERIC_PENDING_IRQ  	if (desc->status & IRQ_MOVE_PENDING) -		mask = &desc->pending_mask; +		mask = desc->pending_mask;  #endif  	seq_cpumask(m, mask);  	seq_putc(m, '\n');  |