diff options
| -rw-r--r-- | include/linux/interrupt.h | 38 | ||||
| -rw-r--r-- | include/linux/irq.h | 16 | ||||
| -rw-r--r-- | include/linux/irqdesc.h | 1 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 64 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 19 | ||||
| -rw-r--r-- | kernel/irq/irqdesc.c | 32 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 202 | ||||
| -rw-r--r-- | kernel/irq/settings.h | 7 | 
8 files changed, 345 insertions, 34 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a103732b758..1cdfd09c8ab 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -95,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);   * @flags:	flags (see IRQF_* above)   * @name:	name of the device   * @dev_id:	cookie to identify the device + * @percpu_dev_id:	cookie to identify the device   * @next:	pointer to the next irqaction for shared interrupts   * @irq:	interrupt number   * @dir:	pointer to the proc/irq/NN/name entry @@ -104,17 +105,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);   * @thread_mask:	bitmask for keeping track of @thread activity   */  struct irqaction { -	irq_handler_t handler; -	unsigned long flags; -	void *dev_id; -	struct irqaction *next; -	int irq; -	irq_handler_t thread_fn; -	struct task_struct *thread; -	unsigned long thread_flags; -	unsigned long thread_mask; -	const char *name; -	struct proc_dir_entry *dir; +	irq_handler_t		handler; +	unsigned long		flags; +	void			*dev_id; +	void __percpu		*percpu_dev_id; +	struct irqaction	*next; +	int			irq; +	irq_handler_t		thread_fn; +	struct task_struct	*thread; +	unsigned long		thread_flags; +	unsigned long		thread_mask; +	const char		*name; +	struct proc_dir_entry	*dir;  } ____cacheline_internodealigned_in_smp;  extern irqreturn_t no_action(int cpl, void *dev_id); @@ -136,6 +138,10 @@ extern int __must_check  request_any_context_irq(unsigned int irq, irq_handler_t handler,  			unsigned long flags, const char *name, void *dev_id); +extern int __must_check +request_percpu_irq(unsigned int irq, irq_handler_t handler, +		   const char *devname, void __percpu *percpu_dev_id); +  extern void exit_irq_thread(void);  #else @@ -164,10 +170,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,  	return request_irq(irq, handler, flags, name, dev_id);  } +static inline int __must_check +request_percpu_irq(unsigned int irq, irq_handler_t handler, +		   const char *devname, void __percpu *percpu_dev_id) +{ +	return request_irq(irq, handler, 0, devname, percpu_dev_id); +} +  static inline void exit_irq_thread(void) { }  #endif  extern void free_irq(unsigned int, void *); +extern void free_percpu_irq(unsigned int, void __percpu *);  struct device; @@ -207,7 +221,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);  extern void disable_irq_nosync(unsigned int irq);  extern void disable_irq(unsigned int irq); +extern void disable_percpu_irq(unsigned int irq);  extern void enable_irq(unsigned int irq); +extern void enable_percpu_irq(unsigned int irq);  /* The following three functions are for the core kernel use only. */  #ifdef CONFIG_GENERIC_HARDIRQS diff --git a/include/linux/irq.h b/include/linux/irq.h index 73e31abeba1..59e49c80cc2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -66,6 +66,7 @@ typedef	void (*irq_preflow_handler_t)(struct irq_data *data);   * IRQ_NO_BALANCING		- Interrupt cannot be balanced (affinity set)   * IRQ_MOVE_PCNTXT		- Interrupt can be migrated from process context   * IRQ_NESTED_TRHEAD		- Interrupt nests into another thread + * IRQ_PER_CPU_DEVID		- Dev_id is a per-cpu variable   */  enum {  	IRQ_TYPE_NONE		= 0x00000000, @@ -88,12 +89,13 @@ enum {  	IRQ_MOVE_PCNTXT		= (1 << 14),  	IRQ_NESTED_THREAD	= (1 << 15),  	IRQ_NOTHREAD		= (1 << 16), +	IRQ_PER_CPU_DEVID	= (1 << 17),  };  #define IRQF_MODIFY_MASK	\  	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \  	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ -	 IRQ_PER_CPU | IRQ_NESTED_THREAD) +	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)  #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -367,6 +369,8 @@ enum {  struct irqaction;  extern int setup_irq(unsigned int irq, struct irqaction *new);  extern void remove_irq(unsigned int irq, struct irqaction *act); +extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); +extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);  extern void irq_cpu_online(void);  extern void irq_cpu_offline(void); @@ -394,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);  extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);  extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);  extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);  extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);  extern void handle_nested_irq(unsigned int irq); @@ -422,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c  	irq_set_chip_and_handler_name(irq, chip, handle, NULL);  } +extern int irq_set_percpu_devid(unsigned int irq); +  extern void  __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,  		  const char *name); @@ -483,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)  		irq_clear_status_flags(irq, IRQ_NESTED_THREAD);  } +static inline void irq_set_percpu_devid_flags(unsigned int irq) +{ +	irq_set_status_flags(irq, +			     IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | +			     IRQ_NOPROBE | IRQ_PER_CPU_DEVID); +} +  /* Handle dynamic irq creation and destruction */  extern unsigned int create_irq_nr(unsigned int irq_want, int node);  extern int create_irq(void); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 150134ac709..6b69c2c9dff 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -53,6 +53,7 @@ struct irq_desc {  	unsigned long		last_unhandled;	/* Aging timer for unhandled count */  	unsigned int		irqs_unhandled;  	raw_spinlock_t		lock; +	struct cpumask		*percpu_enabled;  #ifdef CONFIG_SMP  	const struct cpumask	*affinity_hint;  	struct irq_affinity_notify *affinity_notify; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dc5114b4c16..f7c543a801d 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -26,7 +26,7 @@  int irq_set_chip(unsigned int irq, struct irq_chip *chip)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  	if (!desc)  		return -EINVAL; @@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip);  int irq_set_irq_type(unsigned int irq, unsigned int type)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	int ret = 0;  	if (!desc) @@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type);  int irq_set_handler_data(unsigned int irq, void *data)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  	if (!desc)  		return -EINVAL; @@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data);  int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	if (!desc)  		return -EINVAL; @@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)  int irq_set_chip_data(unsigned int irq, void *data)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  	if (!desc)  		return -EINVAL; @@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc)  	}  } +void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) +{ +	if (desc->irq_data.chip->irq_enable) +		desc->irq_data.chip->irq_enable(&desc->irq_data); +	else +		desc->irq_data.chip->irq_unmask(&desc->irq_data); +	cpumask_set_cpu(cpu, desc->percpu_enabled); +} + +void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) +{ +	if (desc->irq_data.chip->irq_disable) +		desc->irq_data.chip->irq_disable(&desc->irq_data); +	else +		desc->irq_data.chip->irq_mask(&desc->irq_data); +	cpumask_clear_cpu(cpu, desc->percpu_enabled); +} +  static inline void mask_ack_irq(struct irq_desc *desc)  {  	if (desc->irq_data.chip->irq_mask_ack) @@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)  		chip->irq_eoi(&desc->irq_data);  } +/** + * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids + * @irq:	the interrupt number + * @desc:	the interrupt description structure for this irq + * + * Per CPU interrupts on SMP machines without locking requirements. Same as + * handle_percpu_irq() above but with the following extras: + * + * action->percpu_dev_id is a pointer to percpu variables which + * contain the real device id for the cpu on which this handler is + * called + */ +void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) +{ +	struct irq_chip *chip = irq_desc_get_chip(desc); +	struct irqaction *action = desc->action; +	void *dev_id = __this_cpu_ptr(action->percpu_dev_id); +	irqreturn_t res; + +	kstat_incr_irqs_this_cpu(irq, desc); + +	if (chip->irq_ack) +		chip->irq_ack(&desc->irq_data); + +	trace_irq_handler_entry(irq, action); +	res = action->handler(irq, dev_id); +	trace_irq_handler_exit(irq, action, res); + +	if (chip->irq_eoi) +		chip->irq_eoi(&desc->irq_data); +} +  void  __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,  		  const char *name)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);  	if (!desc)  		return; @@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,  void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  	if (!desc)  		return; diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6546431447d..a73dd6c7372 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc);  extern void irq_shutdown(struct irq_desc *desc);  extern void irq_enable(struct irq_desc *desc);  extern void irq_disable(struct irq_desc *desc); +extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); +extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);  extern void mask_irq(struct irq_desc *desc);  extern void unmask_irq(struct irq_desc *desc); @@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)  		desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);  } +#define _IRQ_DESC_CHECK		(1 << 0) +#define _IRQ_DESC_PERCPU	(1 << 1) + +#define IRQ_GET_DESC_CHECK_GLOBAL	(_IRQ_DESC_CHECK) +#define IRQ_GET_DESC_CHECK_PERCPU	(_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) +  struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); +__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, +		    unsigned int check);  void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);  static inline struct irq_desc * -irq_get_desc_buslock(unsigned int irq, unsigned long *flags) +irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)  { -	return __irq_get_desc_lock(irq, flags, true); +	return __irq_get_desc_lock(irq, flags, true, check);  }  static inline void @@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)  }  static inline struct irq_desc * -irq_get_desc_lock(unsigned int irq, unsigned long *flags) +irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)  { -	return __irq_get_desc_lock(irq, flags, false); +	return __irq_get_desc_lock(irq, flags, false, check);  }  static inline void diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 039b889ea05..1550e8447a1 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset)  }  struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) +__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, +		    unsigned int check)  {  	struct irq_desc *desc = irq_to_desc(irq);  	if (desc) { +		if (check & _IRQ_DESC_CHECK) { +			if ((check & _IRQ_DESC_PERCPU) && +			    !irq_settings_is_per_cpu_devid(desc)) +				return NULL; + +			if (!(check & _IRQ_DESC_PERCPU) && +			    irq_settings_is_per_cpu_devid(desc)) +				return NULL; +		} +  		if (bus)  			chip_bus_lock(desc);  		raw_spin_lock_irqsave(&desc->lock, *flags); @@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)  		chip_bus_sync_unlock(desc);  } +int irq_set_percpu_devid(unsigned int irq) +{ +	struct irq_desc *desc = irq_to_desc(irq); + +	if (!desc) +		return -EINVAL; + +	if (desc->percpu_enabled) +		return -EINVAL; + +	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); + +	if (!desc->percpu_enabled) +		return -ENOMEM; + +	irq_set_percpu_devid_flags(irq); +	return 0; +} +  /**   * dynamic_irq_cleanup - cleanup a dynamically allocated irq   * @irq:	irq number to initialize diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7e1a3ed1e61..7b4b156d065 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)  int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	if (!desc)  		return -EINVAL; @@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)  static int __disable_irq_nosync(unsigned int irq)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	if (!desc)  		return -EINVAL; @@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)  void enable_irq(unsigned int irq)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	if (!desc)  		return; @@ -491,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)  int irq_set_irq_wake(unsigned int irq, unsigned int on)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);  	int ret = 0;  	if (!desc) @@ -532,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake);  int can_request_irq(unsigned int irq, unsigned long irqflags)  {  	unsigned long flags; -	struct irq_desc *desc = irq_get_desc_lock(irq, &flags); +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);  	int canrequest = 0;  	if (!desc) @@ -1121,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act)  	int retval;  	struct irq_desc *desc = irq_to_desc(irq); +	if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) +		return -EINVAL;  	chip_bus_lock(desc);  	retval = __setup_irq(irq, desc, act);  	chip_bus_sync_unlock(desc); @@ -1129,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)  }  EXPORT_SYMBOL_GPL(setup_irq); - /* +/*   * Internal function to unregister an irqaction - used to free   * regular and special interrupts that are part of the architecture.   */ @@ -1227,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)   */  void remove_irq(unsigned int irq, struct irqaction *act)  { -	__free_irq(irq, act->dev_id); +	struct irq_desc *desc = irq_to_desc(irq); + +	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) +	    __free_irq(irq, act->dev_id);  }  EXPORT_SYMBOL_GPL(remove_irq); @@ -1249,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id)  {  	struct irq_desc *desc = irq_to_desc(irq); -	if (!desc) +	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))  		return;  #ifdef CONFIG_SMP @@ -1327,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,  	if (!desc)  		return -EINVAL; -	if (!irq_settings_can_request(desc)) +	if (!irq_settings_can_request(desc) || +	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))  		return -EINVAL;  	if (!handler) { @@ -1412,3 +1418,181 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,  	return !ret ? IRQC_IS_HARDIRQ : ret;  }  EXPORT_SYMBOL_GPL(request_any_context_irq); + +void enable_percpu_irq(unsigned int irq) +{ +	unsigned int cpu = smp_processor_id(); +	unsigned long flags; +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); + +	if (!desc) +		return; + +	irq_percpu_enable(desc, cpu); +	irq_put_desc_unlock(desc, flags); +} + +void disable_percpu_irq(unsigned int irq) +{ +	unsigned int cpu = smp_processor_id(); +	unsigned long flags; +	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); + +	if (!desc) +		return; + +	irq_percpu_disable(desc, cpu); +	irq_put_desc_unlock(desc, flags); +} + +/* + * Internal function to unregister a percpu irqaction. + */ +static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) +{ +	struct irq_desc *desc = irq_to_desc(irq); +	struct irqaction *action; +	unsigned long flags; + +	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); + +	if (!desc) +		return NULL; + +	raw_spin_lock_irqsave(&desc->lock, flags); + +	action = desc->action; +	if (!action || action->percpu_dev_id != dev_id) { +		WARN(1, "Trying to free already-free IRQ %d\n", irq); +		goto bad; +	} + +	if (!cpumask_empty(desc->percpu_enabled)) { +		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", +		     irq, cpumask_first(desc->percpu_enabled)); +		goto bad; +	} + +	/* Found it - now remove it from the list of entries: */ +	desc->action = NULL; + +	raw_spin_unlock_irqrestore(&desc->lock, flags); + +	unregister_handler_proc(irq, action); + +	module_put(desc->owner); +	return action; + +bad: +	raw_spin_unlock_irqrestore(&desc->lock, flags); +	return NULL; +} + +/** + *	remove_percpu_irq - free a per-cpu interrupt + *	@irq: Interrupt line to free + *	@act: irqaction for the interrupt + * + * Used to remove interrupts statically setup by the early boot process. + */ +void remove_percpu_irq(unsigned int irq, struct irqaction *act) +{ +	struct irq_desc *desc = irq_to_desc(irq); + +	if (desc && irq_settings_is_per_cpu_devid(desc)) +	    __free_percpu_irq(irq, act->percpu_dev_id); +} + +/** + *	free_percpu_irq - free an interrupt allocated with request_percpu_irq + *	@irq: Interrupt line to free + *	@dev_id: Device identity to free + * + *	Remove a percpu interrupt handler. The handler is removed, but + *	the interrupt line is not disabled. This must be done on each + *	CPU before calling this function. The function does not return + *	until any executing interrupts for this IRQ have completed. + * + *	This function must not be called from interrupt context. + */ +void free_percpu_irq(unsigned int irq, void __percpu *dev_id) +{ +	struct irq_desc *desc = irq_to_desc(irq); + +	if (!desc || !irq_settings_is_per_cpu_devid(desc)) +		return; + +	chip_bus_lock(desc); +	kfree(__free_percpu_irq(irq, dev_id)); +	chip_bus_sync_unlock(desc); +} + +/** + *	setup_percpu_irq - setup a per-cpu interrupt + *	@irq: Interrupt line to setup + *	@act: irqaction for the interrupt + * + * Used to statically setup per-cpu interrupts in the early boot process. + */ +int setup_percpu_irq(unsigned int irq, struct irqaction *act) +{ +	struct irq_desc *desc = irq_to_desc(irq); +	int retval; + +	if (!desc || !irq_settings_is_per_cpu_devid(desc)) +		return -EINVAL; +	chip_bus_lock(desc); +	retval = __setup_irq(irq, desc, act); +	chip_bus_sync_unlock(desc); + +	return retval; +} + +/** + *	request_percpu_irq - allocate a percpu interrupt line + *	@irq: Interrupt line to allocate + *	@handler: Function to be called when the IRQ occurs. + *	@devname: An ascii name for the claiming device + *	@dev_id: A percpu cookie passed back to the handler function + * + *	This call allocates interrupt resources, but doesn't + *	automatically enable the interrupt. It has to be done on each + *	CPU using enable_percpu_irq(). + * + *	Dev_id must be globally unique. It is a per-cpu variable, and + *	the handler gets called with the interrupted CPU's instance of + *	that variable. + */ +int request_percpu_irq(unsigned int irq, irq_handler_t handler, +		       const char *devname, void __percpu *dev_id) +{ +	struct irqaction *action; +	struct irq_desc *desc; +	int retval; + +	if (!dev_id) +		return -EINVAL; + +	desc = irq_to_desc(irq); +	if (!desc || !irq_settings_can_request(desc) || +	    !irq_settings_is_per_cpu_devid(desc)) +		return -EINVAL; + +	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); +	if (!action) +		return -ENOMEM; + +	action->handler = handler; +	action->flags = IRQF_PERCPU; +	action->name = devname; +	action->percpu_dev_id = dev_id; + +	chip_bus_lock(desc); +	retval = __setup_irq(irq, desc, action); +	chip_bus_sync_unlock(desc); + +	if (retval) +		kfree(action); + +	return retval; +} diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index f1667833d44..1162f1030f1 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -13,6 +13,7 @@ enum {  	_IRQ_MOVE_PCNTXT	= IRQ_MOVE_PCNTXT,  	_IRQ_NO_BALANCING	= IRQ_NO_BALANCING,  	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD, +	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID,  	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,  }; @@ -24,6 +25,7 @@ enum {  #define IRQ_NOTHREAD		GOT_YOU_MORON  #define IRQ_NOAUTOEN		GOT_YOU_MORON  #define IRQ_NESTED_THREAD	GOT_YOU_MORON +#define IRQ_PER_CPU_DEVID	GOT_YOU_MORON  #undef IRQF_MODIFY_MASK  #define IRQF_MODIFY_MASK	GOT_YOU_MORON @@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)  	return desc->status_use_accessors & _IRQ_PER_CPU;  } +static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) +{ +	return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; +} +  static inline void irq_settings_set_per_cpu(struct irq_desc *desc)  {  	desc->status_use_accessors |= _IRQ_PER_CPU;  |