diff options
Diffstat (limited to 'kernel/irq/handle.c')
| -rw-r--r-- | kernel/irq/handle.c | 189 | 
1 files changed, 179 insertions, 10 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c815b42d0f5..6492400cb50 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -15,9 +15,16 @@  #include <linux/random.h>  #include <linux/interrupt.h>  #include <linux/kernel_stat.h> +#include <linux/rculist.h> +#include <linux/hash.h>  #include "internals.h" +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +struct lock_class_key irq_desc_lock_class; +  /**   * handle_bad_irq - handle spurious and unhandled irqs   * @irq:       the interrupt number @@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)  int nr_irqs = NR_IRQS;  EXPORT_SYMBOL_GPL(nr_irqs); +void __init __attribute__((weak)) arch_early_irq_init(void) +{ +} + +#ifdef CONFIG_SPARSE_IRQ +static struct irq_desc irq_desc_init = { +	.irq	    = -1, +	.status	    = IRQ_DISABLED, +	.chip	    = &no_irq_chip, +	.handle_irq = handle_bad_irq, +	.depth      = 1, +	.lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +#ifdef CONFIG_SMP +	.affinity   = CPU_MASK_ALL +#endif +}; + +void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) +{ +	unsigned long bytes; +	char *ptr; +	int node; + +	/* Compute how many bytes we need per irq and allocate them */ +	bytes = nr * sizeof(unsigned int); + +	node = cpu_to_node(cpu); +	ptr = kzalloc_node(bytes, GFP_ATOMIC, node); +	printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node); + +	if (ptr) +		desc->kstat_irqs = (unsigned int *)ptr; +} + +void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) +{ +} + +static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) +{ +	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); +	desc->irq = irq; +#ifdef CONFIG_SMP +	desc->cpu = cpu; +#endif +	lockdep_set_class(&desc->lock, &irq_desc_lock_class); +	init_kstat_irqs(desc, cpu, nr_cpu_ids); +	if (!desc->kstat_irqs) { +		printk(KERN_ERR "can not alloc kstat_irqs\n"); +		BUG_ON(1); +	} +	arch_init_chip_data(desc, cpu); +} + +/* + * Protect the sparse_irqs: + */ +DEFINE_SPINLOCK(sparse_irq_lock); + +struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; + +static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { +	[0 ... NR_IRQS_LEGACY-1] = { +		.irq	    = -1, +		.status	    = IRQ_DISABLED, +		.chip	    = &no_irq_chip, +		.handle_irq = handle_bad_irq, +		.depth	    = 1, +		.lock	    = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +#ifdef CONFIG_SMP +		.affinity   = CPU_MASK_ALL +#endif +	} +}; + +/* FIXME: use bootmem alloc ...*/ +static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; + +void __init early_irq_init(void) +{ +	struct irq_desc *desc; +	int legacy_count; +	int i; + +	desc = irq_desc_legacy; +	legacy_count = ARRAY_SIZE(irq_desc_legacy); + +	for (i = 0; i < legacy_count; i++) { +		desc[i].irq = i; +		desc[i].kstat_irqs = kstat_irqs_legacy[i]; + +		irq_desc_ptrs[i] = desc + i; +	} + +	for (i = legacy_count; i < NR_IRQS; i++) +		irq_desc_ptrs[i] = NULL; + +	arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ +	return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; +} + +struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) +{ +	struct irq_desc *desc; +	unsigned long flags; +	int node; + +	if (irq >= NR_IRQS) { +		printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", +				irq, NR_IRQS); +		WARN_ON(1); +		return NULL; +	} + +	desc = irq_desc_ptrs[irq]; +	if (desc) +		return desc; + +	spin_lock_irqsave(&sparse_irq_lock, flags); + +	/* We have to check it to avoid races with another CPU */ +	desc = irq_desc_ptrs[irq]; +	if (desc) +		goto out_unlock; + +	node = cpu_to_node(cpu); +	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); +	printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n", +		 irq, cpu, node); +	if (!desc) { +		printk(KERN_ERR "can not alloc irq_desc\n"); +		BUG_ON(1); +	} +	init_one_irq_desc(irq, desc, cpu); + +	irq_desc_ptrs[irq] = desc; + +out_unlock: +	spin_unlock_irqrestore(&sparse_irq_lock, flags); + +	return desc; +} + +#else +  struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {  	[0 ... NR_IRQS-1] = {  		.status = IRQ_DISABLED, @@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {  	}  }; +#endif +  /*   * What should we do if we get a hw irq event on an illegal vector?   * Each architecture has to answer this themself. @@ -179,8 +337,11 @@ unsigned int __do_IRQ(unsigned int irq)  		/*  		 * No locking required for CPU-local interrupts:  		 */ -		if (desc->chip->ack) +		if (desc->chip->ack) {  			desc->chip->ack(irq); +			/* get new one */ +			desc = irq_remap_to_desc(irq, desc); +		}  		if (likely(!(desc->status & IRQ_DISABLED))) {  			action_ret = handle_IRQ_event(irq, desc->action);  			if (!noirqdebug) @@ -191,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq)  	}  	spin_lock(&desc->lock); -	if (desc->chip->ack) +	if (desc->chip->ack) {  		desc->chip->ack(irq); +		desc = irq_remap_to_desc(irq, desc); +	}  	/*  	 * REPLAY is when Linux resends an IRQ that was dropped earlier  	 * WAITING is used by probe to mark irqs that are being tested @@ -259,19 +422,25 @@ out:  }  #endif - -#ifdef CONFIG_TRACE_IRQFLAGS -/* - * lockdep: we want to handle all irq_desc locks as a single lock-class: - */ -static struct lock_class_key irq_desc_lock_class; -  void early_init_irq_lock_class(void)  {  	struct irq_desc *desc;  	int i; -	for_each_irq_desc(i, desc) +	for_each_irq_desc(i, desc) { +		if (!desc) +			continue; +  		lockdep_set_class(&desc->lock, &irq_desc_lock_class); +	} +} + +#ifdef CONFIG_SPARSE_IRQ +unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ +	struct irq_desc *desc = irq_to_desc(irq); +	return desc->kstat_irqs[cpu];  }  #endif +EXPORT_SYMBOL(kstat_irqs_cpu); +  |