diff options
Diffstat (limited to 'arch/powerpc/sysdev')
24 files changed, 1612 insertions, 182 deletions
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 396582835cb..d775fd148d1 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -12,3 +12,13 @@ config PPC_MSI_BITMAP  	depends on PCI_MSI  	default y if MPIC  	default y if FSL_PCI + +source "arch/powerpc/sysdev/xics/Kconfig" + +config PPC_SCOM +	bool + +config SCOM_DEBUGFS +	bool "Expose SCOM controllers via debugfs" +	depends on PPC_SCOM +	default n diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 1e0c933ef77..6076e0074a8 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -57,3 +57,9 @@ obj-$(CONFIG_PPC_MPC52xx)	+= mpc5xxx_clocks.o  ifeq ($(CONFIG_SUSPEND),y)  obj-$(CONFIG_6xx)		+= 6xx-suspend.o  endif + +obj-$(CONFIG_PPC_SCOM)		+= scom.o + +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-$(CONFIG_PPC_XICS)		+= xics/ diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 1636dd89670..bd0d54060b9 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device)  			AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);  	bank->ph_addr = resource.start; -	bank->io_addr = (unsigned long) ioremap_flags( +	bank->io_addr = (unsigned long) ioremap_prot(  			bank->ph_addr, bank->size, _PAGE_NO_CACHE);  	if (bank->io_addr == 0) {  		dev_err(&device->dev, "ioremap() failed\n"); diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index e0bc944eb23..350787c83e2 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -58,21 +58,21 @@ static struct irq_host *cpm_pic_host;  static void cpm_mask_irq(struct irq_data *d)  { -	unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);  	clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));  }  static void cpm_unmask_irq(struct irq_data *d)  { -	unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);  	setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));  }  static void cpm_end_irq(struct irq_data *d)  { -	unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);  	out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));  } @@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void)  		goto end;  	/* Initialize the CPM interrupt controller. */ -	hwirq = (unsigned int)irq_map[sirq].hwirq; +	hwirq = (unsigned int)virq_to_hw(sirq);  	out_be32(&cpic_reg->cpic_cicr,  	    (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |  		((hwirq/2) << 13) | CICR_HP_MASK); diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 5495c1be472..bcab50e2a9e 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -81,7 +81,7 @@ static const u_char irq_to_siubit[] = {  static void cpm2_mask_irq(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = virq_to_hw(d->irq); +	unsigned int irq_nr = irqd_to_hwirq(d);  	bit = irq_to_siubit[irq_nr];  	word = irq_to_siureg[irq_nr]; @@ -93,7 +93,7 @@ static void cpm2_mask_irq(struct irq_data *d)  static void cpm2_unmask_irq(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = virq_to_hw(d->irq); +	unsigned int irq_nr = irqd_to_hwirq(d);  	bit = irq_to_siubit[irq_nr];  	word = irq_to_siureg[irq_nr]; @@ -105,7 +105,7 @@ static void cpm2_unmask_irq(struct irq_data *d)  static void cpm2_ack(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = virq_to_hw(d->irq); +	unsigned int irq_nr = irqd_to_hwirq(d);  	bit = irq_to_siubit[irq_nr];  	word = irq_to_siureg[irq_nr]; @@ -116,7 +116,7 @@ static void cpm2_ack(struct irq_data *d)  static void cpm2_end_irq(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = virq_to_hw(d->irq); +	unsigned int irq_nr = irqd_to_hwirq(d);  	bit = irq_to_siubit[irq_nr];  	word = irq_to_siureg[irq_nr]; @@ -133,7 +133,7 @@ static void cpm2_end_irq(struct irq_data *d)  static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)  { -	unsigned int src = virq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned int vold, vnew, edibit;  	/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c index 54fb1922fe3..11641589917 100644 --- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c @@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev,  		goto out_free;  	} -	cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, +	cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,  				cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);  	if (!cache_sram->base_virt) { -		dev_err(&dev->dev, "%s: ioremap_flags failed\n", +		dev_err(&dev->dev, "%s: ioremap_prot failed\n",  				dev->dev.of_node->full_name);  		ret = -ENOMEM;  		goto out_release; diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index d5679dc1e20..92e78333c47 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -110,7 +110,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)  	list_for_each_entry(entry, &pdev->msi_list, list) {  		if (entry->irq == NO_IRQ)  			continue; -		msi_data = irq_get_handler_data(entry->irq); +		msi_data = irq_get_chip_data(entry->irq);  		irq_set_msi_desc(entry->irq, NULL);  		msi_bitmap_free_hwirqs(&msi_data->bitmap,  				       virq_to_hw(entry->irq), 1); @@ -168,7 +168,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)  			rc = -ENOSPC;  			goto out_free;  		} -		irq_set_handler_data(virq, msi_data); +		/* chip_data is msi_data via host->hostdata in host->map() */  		irq_set_msi_desc(virq, entry);  		fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); @@ -193,7 +193,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)  	u32 have_shift = 0;  	struct fsl_msi_cascade_data *cascade_data; -	cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq); +	cascade_data = irq_get_handler_data(irq);  	msi_data = cascade_data->msi_data;  	raw_spin_lock(&desc->lock); @@ -253,7 +253,7 @@ unlock:  static int fsl_of_msi_remove(struct platform_device *ofdev)  { -	struct fsl_msi *msi = ofdev->dev.platform_data; +	struct fsl_msi *msi = platform_get_drvdata(ofdev);  	int virq, i;  	struct fsl_msi_cascade_data *cascade_data; @@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,  	return 0;  } +static const struct of_device_id fsl_of_msi_ids[];  static int __devinit fsl_of_msi_probe(struct platform_device *dev)  { +	const struct of_device_id *match;  	struct fsl_msi *msi;  	struct resource res;  	int err, i, j, irq_index, count; @@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)  	u32 offset;  	static const u32 all_avail[] = { 0, NR_MSI_IRQS }; -	if (!dev->dev.of_match) +	match = of_match_device(fsl_of_msi_ids, &dev->dev); +	if (!match)  		return -EINVAL; -	features = dev->dev.of_match->data; +	features = match->data;  	printk(KERN_DEBUG "Setting up Freescale MSI support\n"); @@ -327,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)  		dev_err(&dev->dev, "No memory for MSI structure\n");  		return -ENOMEM;  	} -	dev->dev.platform_data = msi; +	platform_set_drvdata(dev, msi);  	msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,  				      NR_MSI_IRQS, &fsl_msi_host_ops, 0); diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 142770cb84b..d18bb27e4df 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -185,18 +185,6 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,  	return 0;  } -static void i8259_host_unmap(struct irq_host *h, unsigned int virq) -{ -	/* Make sure irq is masked in hardware */ -	i8259_mask_irq(irq_get_irq_data(virq)); - -	/* remove chip and handler */ -	irq_set_chip_and_handler(virq, NULL, NULL); - -	/* Make sure it's completed */ -	synchronize_irq(virq); -} -  static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,  			    const u32 *intspec, unsigned int intsize,  			    irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,  static struct irq_host_ops i8259_host_ops = {  	.match = i8259_host_match,  	.map = i8259_host_map, -	.unmap = i8259_host_unmap,  	.xlate = i8259_host_xlate,  }; diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index fa438be962b..7367d17364c 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -18,7 +18,7 @@  #include <linux/stddef.h>  #include <linux/sched.h>  #include <linux/signal.h> -#include <linux/sysdev.h> +#include <linux/syscore_ops.h>  #include <linux/device.h>  #include <linux/bootmem.h>  #include <linux/spinlock.h> @@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq)  	return primary_ipic;  } -#define ipic_irq_to_hw(virq)	((unsigned int)irq_map[virq].hwirq) -  static void ipic_unmask_irq(struct irq_data *d)  {  	struct ipic *ipic = ipic_from_irq(d->irq); -	unsigned int src = ipic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; @@ -542,7 +540,7 @@ static void ipic_unmask_irq(struct irq_data *d)  static void ipic_mask_irq(struct irq_data *d)  {  	struct ipic *ipic = ipic_from_irq(d->irq); -	unsigned int src = ipic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; @@ -562,7 +560,7 @@ static void ipic_mask_irq(struct irq_data *d)  static void ipic_ack_irq(struct irq_data *d)  {  	struct ipic *ipic = ipic_from_irq(d->irq); -	unsigned int src = ipic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; @@ -581,7 +579,7 @@ static void ipic_ack_irq(struct irq_data *d)  static void ipic_mask_irq_and_ack(struct irq_data *d)  {  	struct ipic *ipic = ipic_from_irq(d->irq); -	unsigned int src = ipic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; @@ -604,7 +602,7 @@ static void ipic_mask_irq_and_ack(struct irq_data *d)  static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)  {  	struct ipic *ipic = ipic_from_irq(d->irq); -	unsigned int src = ipic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned int vold, vnew, edibit;  	if (flow_type == IRQ_TYPE_NONE) @@ -793,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)  int ipic_set_priority(unsigned int virq, unsigned int priority)  {  	struct ipic *ipic = ipic_from_irq(virq); -	unsigned int src = ipic_irq_to_hw(virq); +	unsigned int src = virq_to_hw(virq);  	u32 temp;  	if (priority > 7) @@ -821,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority)  void ipic_set_highest_priority(unsigned int virq)  {  	struct ipic *ipic = ipic_from_irq(virq); -	unsigned int src = ipic_irq_to_hw(virq); +	unsigned int src = virq_to_hw(virq);  	u32 temp;  	temp = ipic_read(ipic->regs, IPIC_SICFR); @@ -902,7 +900,7 @@ static struct {  	u32 sercr;  } ipic_saved_state; -static int ipic_suspend(struct sys_device *sdev, pm_message_t state) +static int ipic_suspend(void)  {  	struct ipic *ipic = primary_ipic; @@ -933,7 +931,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state)  	return 0;  } -static int ipic_resume(struct sys_device *sdev) +static void ipic_resume(void)  {  	struct ipic *ipic = primary_ipic; @@ -949,44 +947,26 @@ static int ipic_resume(struct sys_device *sdev)  	ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);  	ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);  	ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); - -	return 0;  }  #else  #define ipic_suspend NULL  #define ipic_resume NULL  #endif -static struct sysdev_class ipic_sysclass = { -	.name = "ipic", +static struct syscore_ops ipic_syscore_ops = {  	.suspend = ipic_suspend,  	.resume = ipic_resume,  }; -static struct sys_device device_ipic = { -	.id		= 0, -	.cls		= &ipic_sysclass, -}; - -static int __init init_ipic_sysfs(void) +static int __init init_ipic_syscore(void)  { -	int rc; -  	if (!primary_ipic || !primary_ipic->regs)  		return -ENODEV; -	printk(KERN_DEBUG "Registering ipic with sysfs...\n"); -	rc = sysdev_class_register(&ipic_sysclass); -	if (rc) { -		printk(KERN_ERR "Failed registering ipic sys class\n"); -		return -ENODEV; -	} -	rc = sysdev_register(&device_ipic); -	if (rc) { -		printk(KERN_ERR "Failed registering ipic sys device\n"); -		return -ENODEV; -	} +	printk(KERN_DEBUG "Registering ipic system core operations\n"); +	register_syscore_ops(&ipic_syscore_ops); +  	return 0;  } -subsys_initcall(init_ipic_sysfs); +subsys_initcall(init_ipic_syscore); diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 20732420906..ddc877a3a23 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c @@ -115,6 +115,8 @@ int __init mmio_nvram_init(void)  	int ret;  	nvram_node = of_find_node_by_type(NULL, "nvram"); +	if (!nvram_node) +		nvram_node = of_find_compatible_node(NULL, NULL, "nvram");  	if (!nvram_node) {  		printk(KERN_WARNING "nvram: no node found in device-tree\n");  		return -ENODEV; diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index a88800ff4d0..20924f2246f 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -28,7 +28,7 @@ int cpm_get_irq(struct pt_regs *regs);  static void mpc8xx_unmask_irq(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);  	bit = irq_nr & 0x1f;  	word = irq_nr >> 5; @@ -40,7 +40,7 @@ static void mpc8xx_unmask_irq(struct irq_data *d)  static void mpc8xx_mask_irq(struct irq_data *d)  {  	int	bit, word; -	unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);  	bit = irq_nr & 0x1f;  	word = irq_nr >> 5; @@ -52,7 +52,7 @@ static void mpc8xx_mask_irq(struct irq_data *d)  static void mpc8xx_ack(struct irq_data *d)  {  	int	bit; -	unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);  	bit = irq_nr & 0x1f;  	out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); @@ -61,7 +61,7 @@ static void mpc8xx_ack(struct irq_data *d)  static void mpc8xx_end_irq(struct irq_data *d)  {  	int bit, word; -	unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; +	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);  	bit = irq_nr & 0x1f;  	word = irq_nr >> 5; @@ -73,7 +73,7 @@ static void mpc8xx_end_irq(struct irq_data *d)  static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)  {  	if (flow_type & IRQ_TYPE_EDGE_FALLING) { -		irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; +		irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);  		unsigned int siel = in_be32(&siu_reg->sc_siel);  		/* only external IRQ senses are programmable */ diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c index 0892a2841c2..fb4963abdf5 100644 --- a/arch/powerpc/sysdev/mpc8xxx_gpio.c +++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c @@ -163,7 +163,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)  	spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); +	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));  	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);  } @@ -176,7 +176,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d)  	spin_lock_irqsave(&mpc8xxx_gc->lock, flags); -	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); +	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));  	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);  } @@ -186,7 +186,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d)  	struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);  	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; -	out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); +	out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));  }  static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) @@ -199,14 +199,14 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)  	case IRQ_TYPE_EDGE_FALLING:  		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);  		setbits32(mm->regs + GPIO_ICR, -			  mpc8xxx_gpio2mask(virq_to_hw(d->irq))); +			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));  		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);  		break;  	case IRQ_TYPE_EDGE_BOTH:  		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);  		clrbits32(mm->regs + GPIO_ICR, -			  mpc8xxx_gpio2mask(virq_to_hw(d->irq))); +			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));  		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);  		break; @@ -221,7 +221,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)  {  	struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);  	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; -	unsigned long gpio = virq_to_hw(d->irq); +	unsigned long gpio = irqd_to_hwirq(d);  	void __iomem *reg;  	unsigned int shift;  	unsigned long flags; diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index f91c065bed5..3a8de5bb628 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -6,6 +6,7 @@   *  with various broken implementations of this HW.   *   *  Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. + *  Copyright 2010-2011 Freescale Semiconductor, Inc.   *   *  This file is subject to the terms and conditions of the GNU General Public   *  License.  See the file COPYING in the main directory of this archive @@ -27,6 +28,7 @@  #include <linux/spinlock.h>  #include <linux/pci.h>  #include <linux/slab.h> +#include <linux/syscore_ops.h>  #include <asm/ptrace.h>  #include <asm/signal.h> @@ -218,6 +220,28 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu  	_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);  } +static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) +{ +	unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + +			      ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + +	if (tm >= 4) +		offset += 0x1000 / 4; + +	return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); +} + +static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) +{ +	unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + +			      ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + +	if (tm >= 4) +		offset += 0x1000 / 4; + +	_mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); +} +  static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)  {  	unsigned int cpu = mpic_processor_id(mpic); @@ -268,6 +292,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,  #define mpic_write(b,r,v)	_mpic_write(mpic->reg_type,&(b),(r),(v))  #define mpic_ipi_read(i)	_mpic_ipi_read(mpic,(i))  #define mpic_ipi_write(i,v)	_mpic_ipi_write(mpic,(i),(v)) +#define mpic_tm_read(i)		_mpic_tm_read(mpic,(i)) +#define mpic_tm_write(i,v)	_mpic_tm_write(mpic,(i),(v))  #define mpic_cpu_read(i)	_mpic_cpu_read(mpic,(i))  #define mpic_cpu_write(i,v)	_mpic_cpu_write(mpic,(i),(v))  #define mpic_irq_read(s,r)	_mpic_irq_read(mpic,(s),(r)) @@ -607,8 +633,6 @@ static int irq_choose_cpu(const struct cpumask *mask)  }  #endif -#define mpic_irq_to_hw(virq)	((unsigned int)irq_map[virq].hwirq) -  /* Find an mpic associated with a given linux interrupt */  static struct mpic *mpic_find(unsigned int irq)  { @@ -621,11 +645,18 @@ static struct mpic *mpic_find(unsigned int irq)  /* Determine if the linux irq is an IPI */  static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)  { -	unsigned int src = mpic_irq_to_hw(irq); +	unsigned int src = virq_to_hw(irq);  	return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);  } +/* Determine if the linux irq is a timer */ +static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) +{ +	unsigned int src = virq_to_hw(irq); + +	return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); +}  /* Convert a cpu mask from logical to physical cpu numbers. */  static inline u32 mpic_physmask(u32 cpumask) @@ -633,7 +664,7 @@ static inline u32 mpic_physmask(u32 cpumask)  	int i;  	u32 mask = 0; -	for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) +	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)  		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);  	return mask;  } @@ -674,7 +705,7 @@ void mpic_unmask_irq(struct irq_data *d)  {  	unsigned int loops = 100000;  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); @@ -695,7 +726,7 @@ void mpic_mask_irq(struct irq_data *d)  {  	unsigned int loops = 100000;  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); @@ -733,7 +764,7 @@ void mpic_end_irq(struct irq_data *d)  static void mpic_unmask_ht_irq(struct irq_data *d)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	mpic_unmask_irq(d); @@ -744,7 +775,7 @@ static void mpic_unmask_ht_irq(struct irq_data *d)  static unsigned int mpic_startup_ht_irq(struct irq_data *d)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	mpic_unmask_irq(d);  	mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); @@ -755,7 +786,7 @@ static unsigned int mpic_startup_ht_irq(struct irq_data *d)  static void mpic_shutdown_ht_irq(struct irq_data *d)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	mpic_shutdown_ht_interrupt(mpic, src);  	mpic_mask_irq(d); @@ -764,7 +795,7 @@ static void mpic_shutdown_ht_irq(struct irq_data *d)  static void mpic_end_ht_irq(struct irq_data *d)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  #ifdef DEBUG_IRQ  	DBG("%s: end_irq: %d\n", mpic->name, d->irq); @@ -785,7 +816,7 @@ static void mpic_end_ht_irq(struct irq_data *d)  static void mpic_unmask_ipi(struct irq_data *d)  {  	struct mpic *mpic = mpic_from_ipi(d); -	unsigned int src = mpic_irq_to_hw(d->irq) - mpic->ipi_vecs[0]; +	unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];  	DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);  	mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); @@ -812,27 +843,42 @@ static void mpic_end_ipi(struct irq_data *d)  #endif /* CONFIG_SMP */ +static void mpic_unmask_tm(struct irq_data *d) +{ +	struct mpic *mpic = mpic_from_irq_data(d); +	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + +	DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src); +	mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); +	mpic_tm_read(src); +} + +static void mpic_mask_tm(struct irq_data *d) +{ +	struct mpic *mpic = mpic_from_irq_data(d); +	unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + +	mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); +	mpic_tm_read(src); +} +  int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,  		      bool force)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	if (mpic->flags & MPIC_SINGLE_DEST_CPU) {  		int cpuid = irq_choose_cpu(cpumask);  		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);  	} else { -		cpumask_var_t tmp; - -		alloc_cpumask_var(&tmp, GFP_KERNEL); +		u32 mask = cpumask_bits(cpumask)[0]; -		cpumask_and(tmp, cpumask, cpu_online_mask); +		mask &= cpumask_bits(cpu_online_mask)[0];  		mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), -			       mpic_physmask(cpumask_bits(tmp)[0])); - -		free_cpumask_var(tmp); +			       mpic_physmask(mask));  	}  	return 0; @@ -862,7 +908,7 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)  int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)  {  	struct mpic *mpic = mpic_from_irq_data(d); -	unsigned int src = mpic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned int vecpri, vold, vnew;  	DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", @@ -898,7 +944,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)  void mpic_set_vector(unsigned int virq, unsigned int vector)  {  	struct mpic *mpic = mpic_from_irq(virq); -	unsigned int src = mpic_irq_to_hw(virq); +	unsigned int src = virq_to_hw(virq);  	unsigned int vecpri;  	DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", @@ -916,7 +962,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)  void mpic_set_destination(unsigned int virq, unsigned int cpuid)  {  	struct mpic *mpic = mpic_from_irq(virq); -	unsigned int src = mpic_irq_to_hw(virq); +	unsigned int src = virq_to_hw(virq);  	DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",  	    mpic, virq, src, cpuid); @@ -942,6 +988,12 @@ static struct irq_chip mpic_ipi_chip = {  };  #endif /* CONFIG_SMP */ +static struct irq_chip mpic_tm_chip = { +	.irq_mask	= mpic_mask_tm, +	.irq_unmask	= mpic_unmask_tm, +	.irq_eoi	= mpic_end_irq, +}; +  #ifdef CONFIG_MPIC_U3_HT_IRQS  static struct irq_chip mpic_irq_ht_chip = {  	.irq_startup	= mpic_startup_ht_irq, @@ -985,6 +1037,16 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,  	}  #endif /* CONFIG_SMP */ +	if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { +		WARN_ON(!(mpic->flags & MPIC_PRIMARY)); + +		DBG("mpic: mapping as timer\n"); +		irq_set_chip_data(virq, mpic); +		irq_set_chip_and_handler(virq, &mpic->hc_tm, +					 handle_fasteoi_irq); +		return 0; +	} +  	if (hw >= mpic->irq_count)  		return -EINVAL; @@ -1025,6 +1087,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,  			   irq_hw_number_t *out_hwirq, unsigned int *out_flags)  { +	struct mpic *mpic = h->host_data;  	static unsigned char map_mpic_senses[4] = {  		IRQ_TYPE_EDGE_RISING,  		IRQ_TYPE_LEVEL_LOW, @@ -1033,7 +1096,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,  	};  	*out_hwirq = intspec[0]; -	if (intsize > 1) { +	if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { +		/* +		 * Freescale MPIC with extended intspec: +		 * First two cells are as usual.  Third specifies +		 * an "interrupt type".  Fourth is type-specific data. +		 * +		 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt +		 */ +		switch (intspec[2]) { +		case 0: +		case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ +			break; +		case 2: +			if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) +				return -EINVAL; + +			*out_hwirq = mpic->ipi_vecs[intspec[0]]; +			break; +		case 3: +			if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) +				return -EINVAL; + +			*out_hwirq = mpic->timer_vecs[intspec[0]]; +			break; +		default: +			pr_debug("%s: unknown irq type %u\n", +				 __func__, intspec[2]); +			return -EINVAL; +		} + +		*out_flags = map_mpic_senses[intspec[1] & 3]; +	} else if (intsize > 1) {  		u32 mask = 0x3;  		/* Apple invented a new race of encoding on machines with @@ -1109,6 +1203,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,  	mpic->hc_ipi.name = name;  #endif /* CONFIG_SMP */ +	mpic->hc_tm = mpic_tm_chip; +	mpic->hc_tm.name = name; +  	mpic->flags = flags;  	mpic->isu_size = isu_size;  	mpic->irq_count = irq_count; @@ -1119,10 +1216,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,  	else  		intvec_top = 255; -	mpic->timer_vecs[0] = intvec_top - 8; -	mpic->timer_vecs[1] = intvec_top - 7; -	mpic->timer_vecs[2] = intvec_top - 6; -	mpic->timer_vecs[3] = intvec_top - 5; +	mpic->timer_vecs[0] = intvec_top - 12; +	mpic->timer_vecs[1] = intvec_top - 11; +	mpic->timer_vecs[2] = intvec_top - 10; +	mpic->timer_vecs[3] = intvec_top - 9; +	mpic->timer_vecs[4] = intvec_top - 8; +	mpic->timer_vecs[5] = intvec_top - 7; +	mpic->timer_vecs[6] = intvec_top - 6; +	mpic->timer_vecs[7] = intvec_top - 5;  	mpic->ipi_vecs[0]   = intvec_top - 4;  	mpic->ipi_vecs[1]   = intvec_top - 3;  	mpic->ipi_vecs[2]   = intvec_top - 2; @@ -1132,6 +1233,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,  	/* Check for "big-endian" in device-tree */  	if (node && of_get_property(node, "big-endian", NULL) != NULL)  		mpic->flags |= MPIC_BIG_ENDIAN; +	if (node && of_device_is_compatible(node, "fsl,mpic")) +		mpic->flags |= MPIC_FSL;  	/* Look for protected sources */  	if (node) { @@ -1323,15 +1426,17 @@ void __init mpic_init(struct mpic *mpic)  	/* Set current processor priority to max */  	mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); -	/* Initialize timers: just disable them all */ +	/* Initialize timers to our reserved vectors and mask them for now */  	for (i = 0; i < 4; i++) {  		mpic_write(mpic->tmregs,  			   i * MPIC_INFO(TIMER_STRIDE) + -			   MPIC_INFO(TIMER_DESTINATION), 0); +			   MPIC_INFO(TIMER_DESTINATION), +			   1 << hard_smp_processor_id());  		mpic_write(mpic->tmregs,  			   i * MPIC_INFO(TIMER_STRIDE) +  			   MPIC_INFO(TIMER_VECTOR_PRI),  			   MPIC_VECPRI_MASK | +			   (9 << MPIC_VECPRI_PRIORITY_SHIFT) |  			   (mpic->timer_vecs[0] + i));  	} @@ -1427,7 +1532,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)  void mpic_irq_set_priority(unsigned int irq, unsigned int pri)  {  	struct mpic *mpic = mpic_find(irq); -	unsigned int src = mpic_irq_to_hw(irq); +	unsigned int src = virq_to_hw(irq);  	unsigned long flags;  	u32 reg; @@ -1440,6 +1545,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)  			~MPIC_VECPRI_PRIORITY_MASK;  		mpic_ipi_write(src - mpic->ipi_vecs[0],  			       reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); +	} else if (mpic_is_tm(mpic, irq)) { +		reg = mpic_tm_read(src - mpic->timer_vecs[0]) & +			~MPIC_VECPRI_PRIORITY_MASK; +		mpic_tm_write(src - mpic->timer_vecs[0], +			      reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));  	} else {  		reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))  			& ~MPIC_VECPRI_PRIORITY_MASK; @@ -1619,46 +1729,28 @@ void mpic_request_ipis(void)  	}  } -static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) +void smp_mpic_message_pass(int cpu, int msg)  {  	struct mpic *mpic = mpic_primary; +	u32 physmask;  	BUG_ON(mpic == NULL); -#ifdef DEBUG_IPI -	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); -#endif - -	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + -		       ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), -		       mpic_physmask(cpumask_bits(cpu_mask)[0])); -} - -void smp_mpic_message_pass(int target, int msg) -{ -	cpumask_var_t tmp; -  	/* make sure we're sending something that translates to an IPI */  	if ((unsigned int)msg > 3) {  		printk("SMP %d: smp_message_pass: unknown msg %d\n",  		       smp_processor_id(), msg);  		return;  	} -	switch (target) { -	case MSG_ALL: -		mpic_send_ipi(msg, cpu_online_mask); -		break; -	case MSG_ALL_BUT_SELF: -		alloc_cpumask_var(&tmp, GFP_NOWAIT); -		cpumask_andnot(tmp, cpu_online_mask, -			       cpumask_of(smp_processor_id())); -		mpic_send_ipi(msg, tmp); -		free_cpumask_var(tmp); -		break; -	default: -		mpic_send_ipi(msg, cpumask_of(target)); -		break; -	} + +#ifdef DEBUG_IPI +	DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); +#endif + +	physmask = 1 << get_hard_smp_processor_id(cpu); + +	mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + +		       msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);  }  int __init smp_mpic_probe(void) @@ -1702,9 +1794,8 @@ void mpic_reset_core(int cpu)  #endif /* CONFIG_SMP */  #ifdef CONFIG_PM -static int mpic_suspend(struct sys_device *dev, pm_message_t state) +static void mpic_suspend_one(struct mpic *mpic)  { -	struct mpic *mpic = container_of(dev, struct mpic, sysdev);  	int i;  	for (i = 0; i < mpic->num_sources; i++) { @@ -1713,13 +1804,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state)  		mpic->save_data[i].dest =  			mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));  	} +} + +static int mpic_suspend(void) +{ +	struct mpic *mpic = mpics; + +	while (mpic) { +		mpic_suspend_one(mpic); +		mpic = mpic->next; +	}  	return 0;  } -static int mpic_resume(struct sys_device *dev) +static void mpic_resume_one(struct mpic *mpic)  { -	struct mpic *mpic = container_of(dev, struct mpic, sysdev);  	int i;  	for (i = 0; i < mpic->num_sources; i++) { @@ -1746,33 +1846,28 @@ static int mpic_resume(struct sys_device *dev)  	}  #endif  	} /* end for loop */ +} -	return 0; +static void mpic_resume(void) +{ +	struct mpic *mpic = mpics; + +	while (mpic) { +		mpic_resume_one(mpic); +		mpic = mpic->next; +	}  } -#endif -static struct sysdev_class mpic_sysclass = { -#ifdef CONFIG_PM +static struct syscore_ops mpic_syscore_ops = {  	.resume = mpic_resume,  	.suspend = mpic_suspend, -#endif -	.name = "mpic",  };  static int mpic_init_sys(void)  { -	struct mpic *mpic = mpics; -	int error, id = 0; - -	error = sysdev_class_register(&mpic_sysclass); - -	while (mpic && !error) { -		mpic->sysdev.cls = &mpic_sysclass; -		mpic->sysdev.id = id++; -		error = sysdev_register(&mpic->sysdev); -		mpic = mpic->next; -	} -	return error; +	register_syscore_ops(&mpic_syscore_ops); +	return 0;  }  device_initcall(mpic_init_sys); +#endif diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c index e9c633c7c08..14d130268e7 100644 --- a/arch/powerpc/sysdev/mv64x60_pic.c +++ b/arch/powerpc/sysdev/mv64x60_pic.c @@ -78,7 +78,7 @@ static struct irq_host *mv64x60_irq_host;  static void mv64x60_mask_low(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -91,7 +91,7 @@ static void mv64x60_mask_low(struct irq_data *d)  static void mv64x60_unmask_low(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -115,7 +115,7 @@ static struct irq_chip mv64x60_chip_low = {  static void mv64x60_mask_high(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -128,7 +128,7 @@ static void mv64x60_mask_high(struct irq_data *d)  static void mv64x60_unmask_high(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -152,7 +152,7 @@ static struct irq_chip mv64x60_chip_high = {  static void mv64x60_mask_gpp(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -165,7 +165,7 @@ static void mv64x60_mask_gpp(struct irq_data *d)  static void mv64x60_mask_ack_gpp(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); @@ -180,7 +180,7 @@ static void mv64x60_mask_ack_gpp(struct irq_data *d)  static void mv64x60_unmask_gpp(struct irq_data *d)  { -	int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; +	int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;  	unsigned long flags;  	spin_lock_irqsave(&mv64x60_lock, flags); diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 832d6924ad1..b2acda07220 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -197,12 +197,10 @@ static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)  	return irq_data_get_irq_chip_data(d);  } -#define virq_to_hw(virq)	((unsigned int)irq_map[virq].hwirq) -  static void qe_ic_unmask_irq(struct irq_data *d)  {  	struct qe_ic *qe_ic = qe_ic_from_irq_data(d); -	unsigned int src = virq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; @@ -218,7 +216,7 @@ static void qe_ic_unmask_irq(struct irq_data *d)  static void qe_ic_mask_irq(struct irq_data *d)  {  	struct qe_ic *qe_ic = qe_ic_from_irq_data(d); -	unsigned int src = virq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 temp; diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c new file mode 100644 index 00000000000..b2593ce30c9 --- /dev/null +++ b/arch/powerpc/sysdev/scom.c @@ -0,0 +1,192 @@ +/* + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp + *                <benh@kernel.crashing.org> + *     and        David Gibson, IBM Corporation. + * + *   This program is free software;  you can redistribute it and/or modify + *   it under the terms of the GNU General Public License as published by + *   the Free Software Foundation; either version 2 of the License, or + *   (at your option) any later version. + * + *   This program is distributed in the hope that it will be useful, + *   but WITHOUT ANY WARRANTY;  without even the implied warranty of + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See + *   the GNU General Public License for more details. + * + *   You should have received a copy of the GNU General Public License + *   along with this program;  if not, write to the Free Software + *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <asm/prom.h> +#include <asm/scom.h> + +const struct scom_controller *scom_controller; +EXPORT_SYMBOL_GPL(scom_controller); + +struct device_node *scom_find_parent(struct device_node *node) +{ +	struct device_node *par, *tmp; +	const u32 *p; + +	for (par = of_node_get(node); par;) { +		if (of_get_property(par, "scom-controller", NULL)) +			break; +		p = of_get_property(par, "scom-parent", NULL); +		tmp = par; +		if (p == NULL) +			par = of_get_parent(par); +		else +			par = of_find_node_by_phandle(*p); +		of_node_put(tmp); +	} +	return par; +} +EXPORT_SYMBOL_GPL(scom_find_parent); + +scom_map_t scom_map_device(struct device_node *dev, int index) +{ +	struct device_node *parent; +	unsigned int cells, size; +	const u32 *prop; +	u64 reg, cnt; +	scom_map_t ret; + +	parent = scom_find_parent(dev); + +	if (parent == NULL) +		return 0; + +	prop = of_get_property(parent, "#scom-cells", NULL); +	cells = prop ? *prop : 1; + +	prop = of_get_property(dev, "scom-reg", &size); +	if (!prop) +		return 0; +	size >>= 2; + +	if (index >= (size / (2*cells))) +		return 0; + +	reg = of_read_number(&prop[index * cells * 2], cells); +	cnt = of_read_number(&prop[index * cells * 2 + cells], cells); + +	ret = scom_map(parent, reg, cnt); +	of_node_put(parent); + +	return ret; +} +EXPORT_SYMBOL_GPL(scom_map_device); + +#ifdef CONFIG_SCOM_DEBUGFS +struct scom_debug_entry { +	struct device_node *dn; +	unsigned long addr; +	scom_map_t map; +	spinlock_t lock; +	char name[8]; +	struct debugfs_blob_wrapper blob; +}; + +static int scom_addr_set(void *data, u64 val) +{ +	struct scom_debug_entry *ent = data; + +	ent->addr = 0; +	scom_unmap(ent->map); + +	ent->map = scom_map(ent->dn, val, 1); +	if (scom_map_ok(ent->map)) +		ent->addr = val; +	else +		return -EFAULT; + +	return 0; +} + +static int scom_addr_get(void *data, u64 *val) +{ +	struct scom_debug_entry *ent = data; +	*val = ent->addr; +	return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set, +			"0x%llx\n"); + +static int scom_val_set(void *data, u64 val) +{ +	struct scom_debug_entry *ent = data; + +	if (!scom_map_ok(ent->map)) +		return -EFAULT; + +	scom_write(ent->map, 0, val); + +	return 0; +} + +static int scom_val_get(void *data, u64 *val) +{ +	struct scom_debug_entry *ent = data; + +	if (!scom_map_ok(ent->map)) +		return -EFAULT; + +	*val = scom_read(ent->map, 0); +	return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set, +			"0x%llx\n"); + +static int scom_debug_init_one(struct dentry *root, struct device_node *dn, +			       int i) +{ +	struct scom_debug_entry *ent; +	struct dentry *dir; + +	ent = kzalloc(sizeof(*ent), GFP_KERNEL); +	if (!ent) +		return -ENOMEM; + +	ent->dn = of_node_get(dn); +	ent->map = SCOM_MAP_INVALID; +	spin_lock_init(&ent->lock); +	snprintf(ent->name, 8, "scom%d", i); +	ent->blob.data = dn->full_name; +	ent->blob.size = strlen(dn->full_name); + +	dir = debugfs_create_dir(ent->name, root); +	if (!dir) { +		of_node_put(dn); +		kfree(ent); +		return -1; +	} + +	debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops); +	debugfs_create_file("value", 0600, dir, ent, &scom_val_fops); +	debugfs_create_blob("path", 0400, dir, &ent->blob); + +	return 0; +} + +static int scom_debug_init(void) +{ +	struct device_node *dn; +	struct dentry *root; +	int i, rc; + +	root = debugfs_create_dir("scom", powerpc_debugfs_root); +	if (!root) +		return -1; + +	i = rc = 0; +	for_each_node_with_property(dn, "scom-controller") +		rc |= scom_debug_init_one(root, dn, i++); + +	return rc; +} +device_initcall(scom_debug_init); +#endif /* CONFIG_SCOM_DEBUGFS */ diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 5d913851662..984cd202915 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -41,8 +41,6 @@  #define UIC_VR		0x7  #define UIC_VCR		0x8 -#define uic_irq_to_hw(virq)	(irq_map[virq].hwirq) -  struct uic *primary_uic;  struct uic { @@ -58,7 +56,7 @@ struct uic {  static void uic_unmask_irq(struct irq_data *d)  {  	struct uic *uic = irq_data_get_irq_chip_data(d); -	unsigned int src = uic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 er, sr; @@ -76,7 +74,7 @@ static void uic_unmask_irq(struct irq_data *d)  static void uic_mask_irq(struct irq_data *d)  {  	struct uic *uic = irq_data_get_irq_chip_data(d); -	unsigned int src = uic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 er; @@ -90,7 +88,7 @@ static void uic_mask_irq(struct irq_data *d)  static void uic_ack_irq(struct irq_data *d)  {  	struct uic *uic = irq_data_get_irq_chip_data(d); -	unsigned int src = uic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	spin_lock_irqsave(&uic->lock, flags); @@ -101,7 +99,7 @@ static void uic_ack_irq(struct irq_data *d)  static void uic_mask_ack_irq(struct irq_data *d)  {  	struct uic *uic = irq_data_get_irq_chip_data(d); -	unsigned int src = uic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	u32 er, sr; @@ -126,7 +124,7 @@ static void uic_mask_ack_irq(struct irq_data *d)  static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)  {  	struct uic *uic = irq_data_get_irq_chip_data(d); -	unsigned int src = uic_irq_to_hw(d->irq); +	unsigned int src = irqd_to_hwirq(d);  	unsigned long flags;  	int trigger, polarity;  	u32 tr, pr, mask; diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 00000000000..0031eda320c --- /dev/null +++ b/arch/powerpc/sysdev/xics/Kconfig @@ -0,0 +1,13 @@ +config PPC_XICS +       def_bool n +       select PPC_SMP_MUXED_IPI + +config PPC_ICP_NATIVE +       def_bool n + +config PPC_ICP_HV +       def_bool n + +config PPC_ICS_RTAS +       def_bool n + diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 00000000000..b75a6059337 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Makefile @@ -0,0 +1,6 @@ +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-y				+= xics-common.o +obj-$(CONFIG_PPC_ICP_NATIVE)	+= icp-native.o +obj-$(CONFIG_PPC_ICP_HV)	+= icp-hv.o +obj-$(CONFIG_PPC_ICS_RTAS)	+= ics-rtas.o diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 00000000000..9518d367a64 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -0,0 +1,164 @@ +/* + * Copyright 2011 IBM Corporation. + * + *  This program is free software; you can redistribute it and/or + *  modify it under the terms of the GNU General Public License + *  as published by the Free Software Foundation; either version + *  2 of the License, or (at your option) any later version. + * + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> + +#include <asm/smp.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> +#include <asm/io.h> +#include <asm/hvcall.h> + +static inline unsigned int icp_hv_get_xirr(unsigned char cppr) +{ +	unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; +	long rc; + +	rc = plpar_hcall(H_XIRR, retbuf, cppr); +	if (rc != H_SUCCESS) +		panic(" bad return code xirr - rc = %lx\n", rc); +	return (unsigned int)retbuf[0]; +} + +static inline void icp_hv_set_xirr(unsigned int value) +{ +	long rc = plpar_hcall_norets(H_EOI, value); +	if (rc != H_SUCCESS) +		panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); +} + +static inline void icp_hv_set_cppr(u8 value) +{ +	long rc = plpar_hcall_norets(H_CPPR, value); +	if (rc != H_SUCCESS) +		panic("bad return code cppr - rc = %lx\n", rc); +} + +static inline void icp_hv_set_qirr(int n_cpu , u8 value) +{ +	long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), +				     value); +	if (rc != H_SUCCESS) +		panic("bad return code qirr - rc = %lx\n", rc); +} + +static void icp_hv_eoi(struct irq_data *d) +{ +	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + +	iosync(); +	icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_hv_teardown_cpu(void) +{ +	int cpu = smp_processor_id(); + +	/* Clear any pending IPI */ +	icp_hv_set_qirr(cpu, 0xff); +} + +static void icp_hv_flush_ipi(void) +{ +	/* We take the ipi irq but and never return so we +	 * need to EOI the IPI, but want to leave our priority 0 +	 * +	 * should we check all the other interrupts too? +	 * should we be flagging idle loop instead? +	 * or creating some task to be scheduled? +	 */ + +	icp_hv_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_hv_get_irq(void) +{ +	unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); +	unsigned int vec = xirr & 0x00ffffff; +	unsigned int irq; + +	if (vec == XICS_IRQ_SPURIOUS) +		return NO_IRQ; + +	irq = irq_radix_revmap_lookup(xics_host, vec); +	if (likely(irq != NO_IRQ)) { +		xics_push_cppr(vec); +		return irq; +	} + +	/* We don't have a linux mapping, so have rtas mask it. */ +	xics_mask_unknown_vec(vec); + +	/* We might learn about it later, so EOI it */ +	icp_hv_set_xirr(xirr); + +	return NO_IRQ; +} + +static void icp_hv_set_cpu_priority(unsigned char cppr) +{ +	xics_set_base_cppr(cppr); +	icp_hv_set_cppr(cppr); +	iosync(); +} + +#ifdef CONFIG_SMP + +static void icp_hv_cause_ipi(int cpu, unsigned long data) +{ +	icp_hv_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) +{ +	int cpu = smp_processor_id(); + +	icp_hv_set_qirr(cpu, 0xff); + +	return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static const struct icp_ops icp_hv_ops = { +	.get_irq	= icp_hv_get_irq, +	.eoi		= icp_hv_eoi, +	.set_priority	= icp_hv_set_cpu_priority, +	.teardown_cpu	= icp_hv_teardown_cpu, +	.flush_ipi	= icp_hv_flush_ipi, +#ifdef CONFIG_SMP +	.ipi_action	= icp_hv_ipi_action, +	.cause_ipi	= icp_hv_cause_ipi, +#endif +}; + +int icp_hv_init(void) +{ +	struct device_node *np; + +	np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); +	if (!np) +		np = of_find_node_by_type(NULL, +				    "PowerPC-External-Interrupt-Presentation"); +	if (!np) +		return -ENODEV; + +	icp_ops = &icp_hv_ops; + +	return 0; +} + diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 00000000000..1f15ad43614 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -0,0 +1,293 @@ +/* + * Copyright 2011 IBM Corporation. + * + *  This program is free software; you can redistribute it and/or + *  modify it under the terms of the GNU General Public License + *  as published by the Free Software Foundation; either version + *  2 of the License, or (at your option) any later version. + * + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/spinlock.h> + +#include <asm/prom.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> + +struct icp_ipl { +	union { +		u32 word; +		u8 bytes[4]; +	} xirr_poll; +	union { +		u32 word; +		u8 bytes[4]; +	} xirr; +	u32 dummy; +	union { +		u32 word; +		u8 bytes[4]; +	} qirr; +	u32 link_a; +	u32 link_b; +	u32 link_c; +}; + +static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; + +static inline unsigned int icp_native_get_xirr(void) +{ +	int cpu = smp_processor_id(); + +	return in_be32(&icp_native_regs[cpu]->xirr.word); +} + +static inline void icp_native_set_xirr(unsigned int value) +{ +	int cpu = smp_processor_id(); + +	out_be32(&icp_native_regs[cpu]->xirr.word, value); +} + +static inline void icp_native_set_cppr(u8 value) +{ +	int cpu = smp_processor_id(); + +	out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); +} + +static inline void icp_native_set_qirr(int n_cpu, u8 value) +{ +	out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); +} + +static void icp_native_set_cpu_priority(unsigned char cppr) +{ +	xics_set_base_cppr(cppr); +	icp_native_set_cppr(cppr); +	iosync(); +} + +static void icp_native_eoi(struct irq_data *d) +{ +	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + +	iosync(); +	icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_native_teardown_cpu(void) +{ +	int cpu = smp_processor_id(); + +	/* Clear any pending IPI */ +	icp_native_set_qirr(cpu, 0xff); +} + +static void icp_native_flush_ipi(void) +{ +	/* We take the ipi irq but and never return so we +	 * need to EOI the IPI, but want to leave our priority 0 +	 * +	 * should we check all the other interrupts too? +	 * should we be flagging idle loop instead? +	 * or creating some task to be scheduled? +	 */ + +	icp_native_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_native_get_irq(void) +{ +	unsigned int xirr = icp_native_get_xirr(); +	unsigned int vec = xirr & 0x00ffffff; +	unsigned int irq; + +	if (vec == XICS_IRQ_SPURIOUS) +		return NO_IRQ; + +	irq = irq_radix_revmap_lookup(xics_host, vec); +	if (likely(irq != NO_IRQ)) { +		xics_push_cppr(vec); +		return irq; +	} + +	/* We don't have a linux mapping, so have rtas mask it. */ +	xics_mask_unknown_vec(vec); + +	/* We might learn about it later, so EOI it */ +	icp_native_set_xirr(xirr); + +	return NO_IRQ; +} + +#ifdef CONFIG_SMP + +static void icp_native_cause_ipi(int cpu, unsigned long data) +{ +	icp_native_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) +{ +	int cpu = smp_processor_id(); + +	icp_native_set_qirr(cpu, 0xff); + +	return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, +					 unsigned long size) +{ +	char *rname; +	int i, cpu = -1; + +	/* This may look gross but it's good enough for now, we don't quite +	 * have a hard -> linux processor id matching. +	 */ +	for_each_possible_cpu(i) { +		if (!cpu_present(i)) +			continue; +		if (hw_id == get_hard_smp_processor_id(i)) { +			cpu = i; +			break; +		} +	} + +	/* Fail, skip that CPU. Don't print, it's normal, some XICS come up +	 * with way more entries in there than you have CPUs +	 */ +	if (cpu == -1) +		return 0; + +	rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", +			  cpu, hw_id); + +	if (!request_mem_region(addr, size, rname)) { +		pr_warning("icp_native: Could not reserve ICP MMIO" +			   " for CPU %d, interrupt server #0x%x\n", +			   cpu, hw_id); +		return -EBUSY; +	} + +	icp_native_regs[cpu] = ioremap(addr, size); +	if (!icp_native_regs[cpu]) { +		pr_warning("icp_native: Failed ioremap for CPU %d, " +			   "interrupt server #0x%x, addr %#lx\n", +			   cpu, hw_id, addr); +		release_mem_region(addr, size); +		return -ENOMEM; +	} +	return 0; +} + +static int __init icp_native_init_one_node(struct device_node *np, +					   unsigned int *indx) +{ +	unsigned int ilen; +	const u32 *ireg; +	int i; +	int reg_tuple_size; +	int num_servers = 0; + +	/* This code does the theorically broken assumption that the interrupt +	 * server numbers are the same as the hard CPU numbers. +	 * This happens to be the case so far but we are playing with fire... +	 * should be fixed one of these days. -BenH. +	 */ +	ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); + +	/* Do that ever happen ? we'll know soon enough... but even good'old +	 * f80 does have that property .. +	 */ +	WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); + +	if (ireg) { +		*indx = of_read_number(ireg, 1); +		if (ilen >= 2*sizeof(u32)) +			num_servers = of_read_number(ireg + 1, 1); +	} + +	ireg = of_get_property(np, "reg", &ilen); +	if (!ireg) { +		pr_err("icp_native: Can't find interrupt reg property"); +		return -1; +	} + +	reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; +	if (((ilen % reg_tuple_size) != 0) +	    || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { +		pr_err("icp_native: ICP reg len (%d) != num servers (%d)", +		       ilen / reg_tuple_size, num_servers); +		return -1; +	} + +	for (i = 0; i < (ilen / reg_tuple_size); i++) { +		struct resource r; +		int err; + +		err = of_address_to_resource(np, i, &r); +		if (err) { +			pr_err("icp_native: Could not translate ICP MMIO" +			       " for interrupt server 0x%x (%d)\n", *indx, err); +			return -1; +		} + +		if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) +			return -1; + +		(*indx)++; +	} +	return 0; +} + +static const struct icp_ops icp_native_ops = { +	.get_irq	= icp_native_get_irq, +	.eoi		= icp_native_eoi, +	.set_priority	= icp_native_set_cpu_priority, +	.teardown_cpu	= icp_native_teardown_cpu, +	.flush_ipi	= icp_native_flush_ipi, +#ifdef CONFIG_SMP +	.ipi_action	= icp_native_ipi_action, +	.cause_ipi	= icp_native_cause_ipi, +#endif +}; + +int icp_native_init(void) +{ +	struct device_node *np; +	u32 indx = 0; +	int found = 0; + +	for_each_compatible_node(np, NULL, "ibm,ppc-xicp") +		if (icp_native_init_one_node(np, &indx) == 0) +			found = 1; +	if (!found) { +		for_each_node_by_type(np, +			"PowerPC-External-Interrupt-Presentation") { +				if (icp_native_init_one_node(np, &indx) == 0) +					found = 1; +		} +	} + +	if (found == 0) +		return -ENODEV; + +	icp_ops = &icp_native_ops; + +	return 0; +} diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 00000000000..c782f85cf7e --- /dev/null +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -0,0 +1,240 @@ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/msi.h> + +#include <asm/prom.h> +#include <asm/smp.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> +#include <asm/rtas.h> + +/* RTAS service tokens */ +static int ibm_get_xive; +static int ibm_set_xive; +static int ibm_int_on; +static int ibm_int_off; + +static int ics_rtas_map(struct ics *ics, unsigned int virq); +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); +static long ics_rtas_get_server(struct ics *ics, unsigned long vec); +static int ics_rtas_host_match(struct ics *ics, struct device_node *node); + +/* Only one global & state struct ics */ +static struct ics ics_rtas = { +	.map		= ics_rtas_map, +	.mask_unknown	= ics_rtas_mask_unknown, +	.get_server	= ics_rtas_get_server, +	.host_match	= ics_rtas_host_match, +}; + +static void ics_rtas_unmask_irq(struct irq_data *d) +{ +	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); +	int call_status; +	int server; + +	pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); + +	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) +		return; + +	server = xics_get_irq_server(d->irq, d->affinity, 0); + +	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, +				DEFAULT_PRIORITY); +	if (call_status != 0) { +		printk(KERN_ERR +			"%s: ibm_set_xive irq %u server %x returned %d\n", +			__func__, hw_irq, server, call_status); +		return; +	} + +	/* Now unmask the interrupt (often a no-op) */ +	call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); +	if (call_status != 0) { +		printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", +			__func__, hw_irq, call_status); +		return; +	} +} + +static unsigned int ics_rtas_startup(struct irq_data *d) +{ +#ifdef CONFIG_PCI_MSI +	/* +	 * The generic MSI code returns with the interrupt disabled on the +	 * card, using the MSI mask bits. Firmware doesn't appear to unmask +	 * at that level, so we do it here by hand. +	 */ +	if (d->msi_desc) +		unmask_msi_irq(d); +#endif +	/* unmask it */ +	ics_rtas_unmask_irq(d); +	return 0; +} + +static void ics_rtas_mask_real_irq(unsigned int hw_irq) +{ +	int call_status; + +	if (hw_irq == XICS_IPI) +		return; + +	call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); +	if (call_status != 0) { +		printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", +			__func__, hw_irq, call_status); +		return; +	} + +	/* Have to set XIVE to 0xff to be able to remove a slot */ +	call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, +				xics_default_server, 0xff); +	if (call_status != 0) { +		printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", +			__func__, hw_irq, call_status); +		return; +	} +} + +static void ics_rtas_mask_irq(struct irq_data *d) +{ +	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + +	pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); + +	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) +		return; +	ics_rtas_mask_real_irq(hw_irq); +} + +static int ics_rtas_set_affinity(struct irq_data *d, +				 const struct cpumask *cpumask, +				 bool force) +{ +	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); +	int status; +	int xics_status[2]; +	int irq_server; + +	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) +		return -1; + +	status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); + +	if (status) { +		printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", +			__func__, hw_irq, status); +		return -1; +	} + +	irq_server = xics_get_irq_server(d->irq, cpumask, 1); +	if (irq_server == -1) { +		char cpulist[128]; +		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); +		printk(KERN_WARNING +			"%s: No online cpus in the mask %s for irq %d\n", +			__func__, cpulist, d->irq); +		return -1; +	} + +	status = rtas_call(ibm_set_xive, 3, 1, NULL, +			   hw_irq, irq_server, xics_status[1]); + +	if (status) { +		printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", +			__func__, hw_irq, status); +		return -1; +	} + +	return IRQ_SET_MASK_OK; +} + +static struct irq_chip ics_rtas_irq_chip = { +	.name = "XICS", +	.irq_startup = ics_rtas_startup, +	.irq_mask = ics_rtas_mask_irq, +	.irq_unmask = ics_rtas_unmask_irq, +	.irq_eoi = NULL, /* Patched at init time */ +	.irq_set_affinity = ics_rtas_set_affinity +}; + +static int ics_rtas_map(struct ics *ics, unsigned int virq) +{ +	unsigned int hw_irq = (unsigned int)virq_to_hw(virq); +	int status[2]; +	int rc; + +	if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) +		return -EINVAL; + +	/* Check if RTAS knows about this interrupt */ +	rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); +	if (rc) +		return -ENXIO; + +	irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); +	irq_set_chip_data(virq, &ics_rtas); + +	return 0; +} + +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) +{ +	ics_rtas_mask_real_irq(vec); +} + +static long ics_rtas_get_server(struct ics *ics, unsigned long vec) +{ +	int rc, status[2]; + +	rc = rtas_call(ibm_get_xive, 1, 3, status, vec); +	if (rc) +		return -1; +	return status[0]; +} + +static int ics_rtas_host_match(struct ics *ics, struct device_node *node) +{ +	/* IBM machines have interrupt parents of various funky types for things +	 * like vdevices, events, etc... The trick we use here is to match +	 * everything here except the legacy 8259 which is compatible "chrp,iic" +	 */ +	return !of_device_is_compatible(node, "chrp,iic"); +} + +int ics_rtas_init(void) +{ +	ibm_get_xive = rtas_token("ibm,get-xive"); +	ibm_set_xive = rtas_token("ibm,set-xive"); +	ibm_int_on  = rtas_token("ibm,int-on"); +	ibm_int_off = rtas_token("ibm,int-off"); + +	/* We enable the RTAS "ICS" if RTAS is present with the +	 * appropriate tokens +	 */ +	if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || +	    ibm_set_xive == RTAS_UNKNOWN_SERVICE) +		return -ENODEV; + +	/* We need to patch our irq chip's EOI to point to the +	 * right ICP +	 */ +	ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; + +	/* Register ourselves */ +	xics_register_ics(&ics_rtas); + +	return 0; +} + diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 00000000000..445c5a01b76 --- /dev/null +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -0,0 +1,443 @@ +/* + * Copyright 2011 IBM Corporation. + * + *  This program is free software; you can redistribute it and/or + *  modify it under the terms of the GNU General Public License + *  as published by the Free Software Foundation; either version + *  2 of the License, or (at your option) any later version. + * + */ +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/debugfs.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <asm/prom.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/rtas.h> +#include <asm/xics.h> +#include <asm/firmware.h> + +/* Globals common to all ICP/ICS implementations */ +const struct icp_ops	*icp_ops; + +unsigned int xics_default_server		= 0xff; +unsigned int xics_default_distrib_server	= 0; +unsigned int xics_interrupt_server_size		= 8; + +DEFINE_PER_CPU(struct xics_cppr, xics_cppr); + +struct irq_host *xics_host; + +static LIST_HEAD(ics_list); + +void xics_update_irq_servers(void) +{ +	int i, j; +	struct device_node *np; +	u32 ilen; +	const u32 *ireg; +	u32 hcpuid; + +	/* Find the server numbers for the boot cpu. */ +	np = of_get_cpu_node(boot_cpuid, NULL); +	BUG_ON(!np); + +	hcpuid = get_hard_smp_processor_id(boot_cpuid); +	xics_default_server = xics_default_distrib_server = hcpuid; + +	pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); + +	ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); +	if (!ireg) { +		of_node_put(np); +		return; +	} + +	i = ilen / sizeof(int); + +	/* Global interrupt distribution server is specified in the last +	 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last +	 * entry fom this property for current boot cpu id and use it as +	 * default distribution server +	 */ +	for (j = 0; j < i; j += 2) { +		if (ireg[j] == hcpuid) { +			xics_default_distrib_server = ireg[j+1]; +			break; +		} +	} +	pr_devel("xics: xics_default_distrib_server = 0x%x\n", +		 xics_default_distrib_server); +	of_node_put(np); +} + +/* GIQ stuff, currently only supported on RTAS setups, will have + * to be sorted properly for bare metal + */ +void xics_set_cpu_giq(unsigned int gserver, unsigned int join) +{ +#ifdef CONFIG_PPC_RTAS +	int index; +	int status; + +	if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) +		return; + +	index = (1UL << xics_interrupt_server_size) - 1 - gserver; + +	status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); + +	WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", +	     GLOBAL_INTERRUPT_QUEUE, index, join, status); +#endif +} + +void xics_setup_cpu(void) +{ +	icp_ops->set_priority(LOWEST_PRIORITY); + +	xics_set_cpu_giq(xics_default_distrib_server, 1); +} + +void xics_mask_unknown_vec(unsigned int vec) +{ +	struct ics *ics; + +	pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); + +	list_for_each_entry(ics, &ics_list, link) +		ics->mask_unknown(ics, vec); +} + + +#ifdef CONFIG_SMP + +static void xics_request_ipi(void) +{ +	unsigned int ipi; + +	ipi = irq_create_mapping(xics_host, XICS_IPI); +	BUG_ON(ipi == NO_IRQ); + +	/* +	 * IPIs are marked IRQF_DISABLED as they must run with irqs +	 * disabled, and PERCPU.  The handler was set in map. +	 */ +	BUG_ON(request_irq(ipi, icp_ops->ipi_action, +			   IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); +} + +int __init xics_smp_probe(void) +{ +	/* Setup cause_ipi callback  based on which ICP is used */ +	smp_ops->cause_ipi = icp_ops->cause_ipi; + +	/* Register all the IPIs */ +	xics_request_ipi(); + +	return cpumask_weight(cpu_possible_mask); +} + +#endif /* CONFIG_SMP */ + +void xics_teardown_cpu(void) +{ +	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + +	/* +	 * we have to reset the cppr index to 0 because we're +	 * not going to return from the IPI +	 */ +	os_cppr->index = 0; +	icp_ops->set_priority(0); +	icp_ops->teardown_cpu(); +} + +void xics_kexec_teardown_cpu(int secondary) +{ +	xics_teardown_cpu(); + +	icp_ops->flush_ipi(); + +	/* +	 * Some machines need to have at least one cpu in the GIQ, +	 * so leave the master cpu in the group. +	 */ +	if (secondary) +		xics_set_cpu_giq(xics_default_distrib_server, 0); +} + + +#ifdef CONFIG_HOTPLUG_CPU + +/* Interrupts are disabled. */ +void xics_migrate_irqs_away(void) +{ +	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); +	unsigned int irq, virq; + +	/* If we used to be the default server, move to the new "boot_cpuid" */ +	if (hw_cpu == xics_default_server) +		xics_update_irq_servers(); + +	/* Reject any interrupt that was queued to us... */ +	icp_ops->set_priority(0); + +	/* Remove ourselves from the global interrupt queue */ +	xics_set_cpu_giq(xics_default_distrib_server, 0); + +	/* Allow IPIs again... */ +	icp_ops->set_priority(DEFAULT_PRIORITY); + +	for_each_irq(virq) { +		struct irq_desc *desc; +		struct irq_chip *chip; +		long server; +		unsigned long flags; +		struct ics *ics; + +		/* We can't set affinity on ISA interrupts */ +		if (virq < NUM_ISA_INTERRUPTS) +			continue; +		if (!virq_is_host(virq, xics_host)) +			continue; +		irq = (unsigned int)virq_to_hw(virq); +		/* We need to get IPIs still. */ +		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) +			continue; +		desc = irq_to_desc(virq); +		/* We only need to migrate enabled IRQS */ +		if (!desc || !desc->action) +			continue; +		chip = irq_desc_get_chip(desc); +		if (!chip || !chip->irq_set_affinity) +			continue; + +		raw_spin_lock_irqsave(&desc->lock, flags); + +		/* Locate interrupt server */ +		server = -1; +		ics = irq_get_chip_data(virq); +		if (ics) +			server = ics->get_server(ics, irq); +		if (server < 0) { +			printk(KERN_ERR "%s: Can't find server for irq %d\n", +			       __func__, irq); +			goto unlock; +		} + +		/* We only support delivery to all cpus or to one cpu. +		 * The irq has to be migrated only in the single cpu +		 * case. +		 */ +		if (server != hw_cpu) +			goto unlock; + +		/* This is expected during cpu offline. */ +		if (cpu_online(cpu)) +			pr_warning("IRQ %u affinity broken off cpu %u\n", +			       virq, cpu); + +		/* Reset affinity to all cpus */ +		raw_spin_unlock_irqrestore(&desc->lock, flags); +		irq_set_affinity(virq, cpu_all_mask); +		continue; +unlock: +		raw_spin_unlock_irqrestore(&desc->lock, flags); +	} +} +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_SMP +/* + * For the moment we only implement delivery to all cpus or one cpu. + * + * If the requested affinity is cpu_all_mask, we set global affinity. + * If not we set it to the first cpu in the mask, even if multiple cpus + * are set. This is so things like irqbalance (which set core and package + * wide affinities) do the right thing. + * + * We need to fix this to implement support for the links + */ +int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, +			unsigned int strict_check) +{ + +	if (!distribute_irqs) +		return xics_default_server; + +	if (!cpumask_subset(cpu_possible_mask, cpumask)) { +		int server = cpumask_first_and(cpu_online_mask, cpumask); + +		if (server < nr_cpu_ids) +			return get_hard_smp_processor_id(server); + +		if (strict_check) +			return -1; +	} + +	/* +	 * Workaround issue with some versions of JS20 firmware that +	 * deliver interrupts to cpus which haven't been started. This +	 * happens when using the maxcpus= boot option. +	 */ +	if (cpumask_equal(cpu_online_mask, cpu_present_mask)) +		return xics_default_distrib_server; + +	return xics_default_server; +} +#endif /* CONFIG_SMP */ + +static int xics_host_match(struct irq_host *h, struct device_node *node) +{ +	struct ics *ics; + +	list_for_each_entry(ics, &ics_list, link) +		if (ics->host_match(ics, node)) +			return 1; + +	return 0; +} + +/* Dummies */ +static void xics_ipi_unmask(struct irq_data *d) { } +static void xics_ipi_mask(struct irq_data *d) { } + +static struct irq_chip xics_ipi_chip = { +	.name = "XICS", +	.irq_eoi = NULL, /* Patched at init time */ +	.irq_mask = xics_ipi_mask, +	.irq_unmask = xics_ipi_unmask, +}; + +static int xics_host_map(struct irq_host *h, unsigned int virq, +			 irq_hw_number_t hw) +{ +	struct ics *ics; + +	pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); + +	/* Insert the interrupt mapping into the radix tree for fast lookup */ +	irq_radix_revmap_insert(xics_host, virq, hw); + +	/* They aren't all level sensitive but we just don't really know */ +	irq_set_status_flags(virq, IRQ_LEVEL); + +	/* Don't call into ICS for IPIs */ +	if (hw == XICS_IPI) { +		irq_set_chip_and_handler(virq, &xics_ipi_chip, +					 handle_percpu_irq); +		return 0; +	} + +	/* Let the ICS setup the chip data */ +	list_for_each_entry(ics, &ics_list, link) +		if (ics->map(ics, virq) == 0) +			return 0; + +	return -EINVAL; +} + +static int xics_host_xlate(struct irq_host *h, struct device_node *ct, +			   const u32 *intspec, unsigned int intsize, +			   irq_hw_number_t *out_hwirq, unsigned int *out_flags) + +{ +	/* Current xics implementation translates everything +	 * to level. It is not technically right for MSIs but this +	 * is irrelevant at this point. We might get smarter in the future +	 */ +	*out_hwirq = intspec[0]; +	*out_flags = IRQ_TYPE_LEVEL_LOW; + +	return 0; +} + +static struct irq_host_ops xics_host_ops = { +	.match = xics_host_match, +	.map = xics_host_map, +	.xlate = xics_host_xlate, +}; + +static void __init xics_init_host(void) +{ +	xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, +				   XICS_IRQ_SPURIOUS); +	BUG_ON(xics_host == NULL); +	irq_set_default_host(xics_host); +} + +void __init xics_register_ics(struct ics *ics) +{ +	list_add(&ics->link, &ics_list); +} + +static void __init xics_get_server_size(void) +{ +	struct device_node *np; +	const u32 *isize; + +	/* We fetch the interrupt server size from the first ICS node +	 * we find if any +	 */ +	np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); +	if (!np) +		return; +	isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); +	if (!isize) +		return; +	xics_interrupt_server_size = *isize; +	of_node_put(np); +} + +void __init xics_init(void) +{ +	int rc = -1; + +	/* Fist locate ICP */ +#ifdef CONFIG_PPC_ICP_HV +	if (firmware_has_feature(FW_FEATURE_LPAR)) +		rc = icp_hv_init(); +#endif +#ifdef CONFIG_PPC_ICP_NATIVE +	if (rc < 0) +		rc = icp_native_init(); +#endif +	if (rc < 0) { +		pr_warning("XICS: Cannot find a Presentation Controller !\n"); +		return; +	} + +	/* Copy get_irq callback over to ppc_md */ +	ppc_md.get_irq = icp_ops->get_irq; + +	/* Patch up IPI chip EOI */ +	xics_ipi_chip.irq_eoi = icp_ops->eoi; + +	/* Now locate ICS */ +#ifdef CONFIG_PPC_ICS_RTAS +	rc = ics_rtas_init(); +#endif +	if (rc < 0) +		pr_warning("XICS: Cannot find a Source Controller !\n"); + +	/* Initialize common bits */ +	xics_get_server_size(); +	xics_update_irq_servers(); +	xics_init_host(); +	xics_setup_cpu(); +} diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 0a13fc19e28..6183799754a 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -71,7 +71,7 @@ static unsigned char xilinx_intc_map_senses[] = {   */  static void xilinx_intc_mask(struct irq_data *d)  { -	int irq = virq_to_hw(d->irq); +	int irq = irqd_to_hwirq(d);  	void * regs = irq_data_get_irq_chip_data(d);  	pr_debug("mask: %d\n", irq);  	out_be32(regs + XINTC_CIE, 1 << irq); @@ -87,7 +87,7 @@ static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)   */  static void xilinx_intc_level_unmask(struct irq_data *d)  { -	int irq = virq_to_hw(d->irq); +	int irq = irqd_to_hwirq(d);  	void * regs = irq_data_get_irq_chip_data(d);  	pr_debug("unmask: %d\n", irq);  	out_be32(regs + XINTC_SIE, 1 << irq); @@ -112,7 +112,7 @@ static struct irq_chip xilinx_intc_level_irqchip = {   */  static void xilinx_intc_edge_unmask(struct irq_data *d)  { -	int irq = virq_to_hw(d->irq); +	int irq = irqd_to_hwirq(d);  	void *regs = irq_data_get_irq_chip_data(d);  	pr_debug("unmask: %d\n", irq);  	out_be32(regs + XINTC_SIE, 1 << irq); @@ -120,7 +120,7 @@ static void xilinx_intc_edge_unmask(struct irq_data *d)  static void xilinx_intc_edge_ack(struct irq_data *d)  { -	int irq = virq_to_hw(d->irq); +	int irq = irqd_to_hwirq(d);  	void * regs = irq_data_get_irq_chip_data(d);  	pr_debug("ack: %d\n", irq);  	out_be32(regs + XINTC_IAR, 1 << irq);  |