diff options
| author | Olof Johansson <olof@lixom.net> | 2012-10-04 20:17:25 -0700 | 
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2012-10-04 20:17:25 -0700 | 
| commit | 54d69df5849ec2e660aa12ac75562618c10fb499 (patch) | |
| tree | adbfb8bcc7cc73b83bf2b784fa331911ba03573a /arch/arm/kernel | |
| parent | ad932bb6b549722a561fb31ac2fa50dcbcb3e36b (diff) | |
| parent | 46f2007c1efadfa4071c17e75f140c47f09293de (diff) | |
| download | olio-linux-3.10-54d69df5849ec2e660aa12ac75562618c10fb499.tar.xz olio-linux-3.10-54d69df5849ec2e660aa12ac75562618c10fb499.zip  | |
Merge branch 'late/kirkwood' into late/soc
Merge in the late Kirkwood branch with the OMAP late branch for upstream
submission.
Final contents described in shared tag.
Fixup remove/change conflicts in arch/arm/mach-omap2/devices.c and
drivers/spi/spi-omap2-mcspi.c.
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/arm/kernel')
| -rw-r--r-- | arch/arm/kernel/Makefile | 3 | ||||
| -rw-r--r-- | arch/arm/kernel/bios32.c | 54 | ||||
| -rw-r--r-- | arch/arm/kernel/debug.S | 87 | ||||
| -rw-r--r-- | arch/arm/kernel/head.S | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 347 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 295 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 12 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 32 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 10 | ||||
| -rw-r--r-- | arch/arm/kernel/pmu.c | 36 | ||||
| -rw-r--r-- | arch/arm/kernel/setup.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 72 | 
12 files changed, 500 insertions, 456 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 7ad2d5cf700..1c432143073 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -69,8 +69,7 @@ obj-$(CONFIG_CPU_XSC3)		+= xscale-cp0.o  obj-$(CONFIG_CPU_MOHAWK)	+= xscale-cp0.o  obj-$(CONFIG_CPU_PJ4)		+= pj4-cp0.o  obj-$(CONFIG_IWMMXT)		+= iwmmxt.o -obj-$(CONFIG_CPU_HAS_PMU)	+= pmu.o -obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o +obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_event_cpu.o  AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt  obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 2b2f25e7fef..b244696de1a 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -13,6 +13,7 @@  #include <linux/io.h>  #include <asm/mach-types.h> +#include <asm/mach/map.h>  #include <asm/mach/pci.h>  static int debug_pci; @@ -423,6 +424,38 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  	return irq;  } +static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +{ +	int ret; +	struct pci_host_bridge_window *window; + +	if (list_empty(&sys->resources)) { +		pci_add_resource_offset(&sys->resources, +			 &iomem_resource, sys->mem_offset); +	} + +	list_for_each_entry(window, &sys->resources, list) { +		if (resource_type(window->res) == IORESOURCE_IO) +			return 0; +	} + +	sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io; +	sys->io_res.end = (busnr + 1) * SZ_64K - 1; +	sys->io_res.flags = IORESOURCE_IO; +	sys->io_res.name = sys->io_res_name; +	sprintf(sys->io_res_name, "PCI%d I/O", busnr); + +	ret = request_resource(&ioport_resource, &sys->io_res); +	if (ret) { +		pr_err("PCI: unable to allocate I/O port region (%d)\n", ret); +		return ret; +	} +	pci_add_resource_offset(&sys->resources, &sys->io_res, +				sys->io_offset); + +	return 0; +} +  static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  {  	struct pci_sys_data *sys = NULL; @@ -445,11 +478,10 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  		ret = hw->setup(nr, sys);  		if (ret > 0) { -			if (list_empty(&sys->resources)) { -				pci_add_resource_offset(&sys->resources, -					 &ioport_resource, sys->io_offset); -				pci_add_resource_offset(&sys->resources, -					 &iomem_resource, sys->mem_offset); +			ret = pcibios_init_resources(nr, sys); +			if (ret)  { +				kfree(sys); +				break;  			}  			if (hw->scan) @@ -627,3 +659,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,  	return 0;  } + +void __init pci_map_io_early(unsigned long pfn) +{ +	struct map_desc pci_io_desc = { +		.virtual	= PCI_IO_VIRT_BASE, +		.type		= MT_DEVICE, +		.length		= SZ_64K, +	}; + +	pci_io_desc.pfn = pfn; +	iotable_init(&pci_io_desc, 1); +} diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index c45522c3678..66f711b2e0e 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -20,90 +20,9 @@   * references to these in a production kernel!   */ -#if defined(CONFIG_DEBUG_ICEDCC) -		@@ debug using ARM EmbeddedICE DCC channel - -		.macro	addruart, rp, rv, tmp -		.endm - -#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c0, c5, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c0, c1, 0 -		tst	\rx, #0x20000000 -		beq	1001b -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x2000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c0, c1, 0 -		tst	\rx, #0x20000000 -		bne	1001b -1002: -		.endm - -#elif defined(CONFIG_CPU_XSCALE) - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c8, c0, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c14, c0, 0 -		tst	\rx, #0x10000000 -		beq	1001b -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x10000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c14, c0, 0 -		tst	\rx, #0x10000000 -		bne	1001b -1002: -		.endm - -#else - -		.macro	senduart, rd, rx -		mcr	p14, 0, \rd, c1, c0, 0 -		.endm - -		.macro	busyuart, rd, rx -1001: -		mrc	p14, 0, \rx, c0, c0, 0 -		tst	\rx, #2 -		beq	1001b - -		.endm - -		.macro	waituart, rd, rx -		mov	\rd, #0x2000000 -1001: -		subs	\rd, \rd, #1 -		bmi	1002f -		mrc	p14, 0, \rx, c0, c0, 0 -		tst	\rx, #2 -		bne	1001b -1002: -		.endm - -#endif	/* CONFIG_CPU_V6 */ - -#elif !defined(CONFIG_DEBUG_SEMIHOSTING) -#include <mach/debug-macro.S> -#endif	/* CONFIG_DEBUG_ICEDCC */ +#if !defined(CONFIG_DEBUG_SEMIHOSTING) +#include CONFIG_DEBUG_LL_INCLUDE +#endif  #ifdef CONFIG_MMU  		.macro	addruart_current, rx, tmp1, tmp2 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3db960e20cb..9874d074119 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -23,8 +23,8 @@  #include <asm/thread_info.h>  #include <asm/pgtable.h> -#ifdef CONFIG_DEBUG_LL -#include <mach/debug-macro.S> +#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) +#include CONFIG_DEBUG_LL_INCLUDE  #endif  /* diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index ab243b87118..93971b1a4f0 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -12,68 +12,15 @@   */  #define pr_fmt(fmt) "hw perfevents: " fmt -#include <linux/bitmap.h> -#include <linux/interrupt.h>  #include <linux/kernel.h> -#include <linux/export.h> -#include <linux/perf_event.h>  #include <linux/platform_device.h> -#include <linux/spinlock.h> +#include <linux/pm_runtime.h>  #include <linux/uaccess.h> -#include <asm/cputype.h> -#include <asm/irq.h>  #include <asm/irq_regs.h>  #include <asm/pmu.h>  #include <asm/stacktrace.h> -/* - * ARMv6 supports a maximum of 3 events, starting from index 0. If we add - * another platform that supports more, we need to increase this to be the - * largest of all platforms. - * - * ARMv7 supports up to 32 events: - *  cycle counter CCNT + 31 events counters CNT0..30. - *  Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. - */ -#define ARMPMU_MAX_HWEVENTS		32 - -static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); -static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; - -const char *perf_pmu_name(void) -{ -	if (!cpu_pmu) -		return NULL; - -	return cpu_pmu->pmu.name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ -	int max_events = 0; - -	if (cpu_pmu != NULL) -		max_events = cpu_pmu->num_events; - -	return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -#define HW_OP_UNSUPPORTED		0xFFFF - -#define C(_x) \ -	PERF_COUNT_HW_CACHE_##_x - -#define CACHE_OP_UNSUPPORTED		0xFFFF -  static int  armpmu_map_cache_event(const unsigned (*cache_map)  				      [PERF_COUNT_HW_CACHE_MAX] @@ -104,7 +51,7 @@ armpmu_map_cache_event(const unsigned (*cache_map)  }  static int -armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)  {  	int mapping = (*event_map)[config];  	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; @@ -116,19 +63,20 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config)  	return (int)(config & raw_event_mask);  } -static int map_cpu_event(struct perf_event *event, -			 const unsigned (*event_map)[PERF_COUNT_HW_MAX], -			 const unsigned (*cache_map) -					[PERF_COUNT_HW_CACHE_MAX] -					[PERF_COUNT_HW_CACHE_OP_MAX] -					[PERF_COUNT_HW_CACHE_RESULT_MAX], -			 u32 raw_event_mask) +int +armpmu_map_event(struct perf_event *event, +		 const unsigned (*event_map)[PERF_COUNT_HW_MAX], +		 const unsigned (*cache_map) +				[PERF_COUNT_HW_CACHE_MAX] +				[PERF_COUNT_HW_CACHE_OP_MAX] +				[PERF_COUNT_HW_CACHE_RESULT_MAX], +		 u32 raw_event_mask)  {  	u64 config = event->attr.config;  	switch (event->attr.type) {  	case PERF_TYPE_HARDWARE: -		return armpmu_map_event(event_map, config); +		return armpmu_map_hw_event(event_map, config);  	case PERF_TYPE_HW_CACHE:  		return armpmu_map_cache_event(cache_map, config);  	case PERF_TYPE_RAW: @@ -222,7 +170,6 @@ armpmu_stop(struct perf_event *event, int flags)  	 */  	if (!(hwc->state & PERF_HES_STOPPED)) {  		armpmu->disable(hwc, hwc->idx); -		barrier(); /* why? */  		armpmu_event_update(event, hwc, hwc->idx);  		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;  	} @@ -350,99 +297,41 @@ validate_group(struct perf_event *event)  	return 0;  } -static irqreturn_t armpmu_platform_irq(int irq, void *dev) +static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)  {  	struct arm_pmu *armpmu = (struct arm_pmu *) dev;  	struct platform_device *plat_device = armpmu->plat_device;  	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); -	return plat->handle_irq(irq, dev, armpmu->handle_irq); +	if (plat && plat->handle_irq) +		return plat->handle_irq(irq, dev, armpmu->handle_irq); +	else +		return armpmu->handle_irq(irq, dev);  }  static void  armpmu_release_hardware(struct arm_pmu *armpmu)  { -	int i, irq, irqs; -	struct platform_device *pmu_device = armpmu->plat_device; -	struct arm_pmu_platdata *plat = -		dev_get_platdata(&pmu_device->dev); - -	irqs = min(pmu_device->num_resources, num_possible_cpus()); - -	for (i = 0; i < irqs; ++i) { -		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) -			continue; -		irq = platform_get_irq(pmu_device, i); -		if (irq >= 0) { -			if (plat && plat->disable_irq) -				plat->disable_irq(irq); -			free_irq(irq, armpmu); -		} -	} - -	release_pmu(armpmu->type); +	armpmu->free_irq(); +	pm_runtime_put_sync(&armpmu->plat_device->dev);  }  static int  armpmu_reserve_hardware(struct arm_pmu *armpmu)  { -	struct arm_pmu_platdata *plat; -	irq_handler_t handle_irq; -	int i, err, irq, irqs; +	int err;  	struct platform_device *pmu_device = armpmu->plat_device;  	if (!pmu_device)  		return -ENODEV; -	err = reserve_pmu(armpmu->type); +	pm_runtime_get_sync(&pmu_device->dev); +	err = armpmu->request_irq(armpmu_dispatch_irq);  	if (err) { -		pr_warning("unable to reserve pmu\n"); +		armpmu_release_hardware(armpmu);  		return err;  	} -	plat = dev_get_platdata(&pmu_device->dev); -	if (plat && plat->handle_irq) -		handle_irq = armpmu_platform_irq; -	else -		handle_irq = armpmu->handle_irq; - -	irqs = min(pmu_device->num_resources, num_possible_cpus()); -	if (irqs < 1) { -		pr_err("no irqs for PMUs defined\n"); -		return -ENODEV; -	} - -	for (i = 0; i < irqs; ++i) { -		err = 0; -		irq = platform_get_irq(pmu_device, i); -		if (irq < 0) -			continue; - -		/* -		 * If we have a single PMU interrupt that we can't shift, -		 * assume that we're running on a uniprocessor machine and -		 * continue. Otherwise, continue without this interrupt. -		 */ -		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { -			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", -				    irq, i); -			continue; -		} - -		err = request_irq(irq, handle_irq, -				  IRQF_DISABLED | IRQF_NOBALANCING, -				  "arm-pmu", armpmu); -		if (err) { -			pr_err("unable to request IRQ%d for ARM PMU counters\n", -				irq); -			armpmu_release_hardware(armpmu); -			return err; -		} else if (plat && plat->enable_irq) -			plat->enable_irq(irq); - -		cpumask_set_cpu(i, &armpmu->active_irqs); -	} -  	return 0;  } @@ -581,6 +470,32 @@ static void armpmu_disable(struct pmu *pmu)  	armpmu->stop();  } +#ifdef CONFIG_PM_RUNTIME +static int armpmu_runtime_resume(struct device *dev) +{ +	struct arm_pmu_platdata *plat = dev_get_platdata(dev); + +	if (plat && plat->runtime_resume) +		return plat->runtime_resume(dev); + +	return 0; +} + +static int armpmu_runtime_suspend(struct device *dev) +{ +	struct arm_pmu_platdata *plat = dev_get_platdata(dev); + +	if (plat && plat->runtime_suspend) +		return plat->runtime_suspend(dev); + +	return 0; +} +#endif + +const struct dev_pm_ops armpmu_dev_pm_ops = { +	SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) +}; +  static void __init armpmu_init(struct arm_pmu *armpmu)  {  	atomic_set(&armpmu->active_events, 0); @@ -598,174 +513,14 @@ static void __init armpmu_init(struct arm_pmu *armpmu)  	};  } -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) +int armpmu_register(struct arm_pmu *armpmu, char *name, int type)  {  	armpmu_init(armpmu); +	pr_info("enabled with %s PMU driver, %d counters available\n", +			armpmu->name, armpmu->num_events);  	return perf_pmu_register(&armpmu->pmu, name, type);  } -/* Include the PMU-specific implementations. */ -#include "perf_event_xscale.c" -#include "perf_event_v6.c" -#include "perf_event_v7.c" - -/* - * Ensure the PMU has sane values out of reset. - * This requires SMP to be available, so exists as a separate initcall. - */ -static int __init -cpu_pmu_reset(void) -{ -	if (cpu_pmu && cpu_pmu->reset) -		return on_each_cpu(cpu_pmu->reset, NULL, 1); -	return 0; -} -arch_initcall(cpu_pmu_reset); - -/* - * PMU platform driver and devicetree bindings. - */ -static struct of_device_id armpmu_of_device_ids[] = { -	{.compatible = "arm,cortex-a9-pmu"}, -	{.compatible = "arm,cortex-a8-pmu"}, -	{.compatible = "arm,arm1136-pmu"}, -	{.compatible = "arm,arm1176-pmu"}, -	{}, -}; - -static struct platform_device_id armpmu_plat_device_ids[] = { -	{.name = "arm-pmu"}, -	{}, -}; - -static int __devinit armpmu_device_probe(struct platform_device *pdev) -{ -	if (!cpu_pmu) -		return -ENODEV; - -	cpu_pmu->plat_device = pdev; -	return 0; -} - -static struct platform_driver armpmu_driver = { -	.driver		= { -		.name	= "arm-pmu", -		.of_match_table = armpmu_of_device_ids, -	}, -	.probe		= armpmu_device_probe, -	.id_table	= armpmu_plat_device_ids, -}; - -static int __init register_pmu_driver(void) -{ -	return platform_driver_register(&armpmu_driver); -} -device_initcall(register_pmu_driver); - -static struct pmu_hw_events *armpmu_get_cpu_events(void) -{ -	return &__get_cpu_var(cpu_hw_events); -} - -static void __init cpu_pmu_init(struct arm_pmu *armpmu) -{ -	int cpu; -	for_each_possible_cpu(cpu) { -		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); -		events->events = per_cpu(hw_events, cpu); -		events->used_mask = per_cpu(used_mask, cpu); -		raw_spin_lock_init(&events->pmu_lock); -	} -	armpmu->get_hw_events = armpmu_get_cpu_events; -	armpmu->type = ARM_PMU_DEVICE_CPU; -} - -/* - * PMU hardware loses all context when a CPU goes offline. - * When a CPU is hotplugged back in, since some hardware registers are - * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading - * junk values out of them. - */ -static int __cpuinit pmu_cpu_notify(struct notifier_block *b, -					unsigned long action, void *hcpu) -{ -	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) -		return NOTIFY_DONE; - -	if (cpu_pmu && cpu_pmu->reset) -		cpu_pmu->reset(NULL); - -	return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata pmu_cpu_notifier = { -	.notifier_call = pmu_cpu_notify, -}; - -/* - * CPU PMU identification and registration. - */ -static int __init -init_hw_perf_events(void) -{ -	unsigned long cpuid = read_cpuid_id(); -	unsigned long implementor = (cpuid & 0xFF000000) >> 24; -	unsigned long part_number = (cpuid & 0xFFF0); - -	/* ARM Ltd CPUs. */ -	if (0x41 == implementor) { -		switch (part_number) { -		case 0xB360:	/* ARM1136 */ -		case 0xB560:	/* ARM1156 */ -		case 0xB760:	/* ARM1176 */ -			cpu_pmu = armv6pmu_init(); -			break; -		case 0xB020:	/* ARM11mpcore */ -			cpu_pmu = armv6mpcore_pmu_init(); -			break; -		case 0xC080:	/* Cortex-A8 */ -			cpu_pmu = armv7_a8_pmu_init(); -			break; -		case 0xC090:	/* Cortex-A9 */ -			cpu_pmu = armv7_a9_pmu_init(); -			break; -		case 0xC050:	/* Cortex-A5 */ -			cpu_pmu = armv7_a5_pmu_init(); -			break; -		case 0xC0F0:	/* Cortex-A15 */ -			cpu_pmu = armv7_a15_pmu_init(); -			break; -		case 0xC070:	/* Cortex-A7 */ -			cpu_pmu = armv7_a7_pmu_init(); -			break; -		} -	/* Intel CPUs [xscale]. */ -	} else if (0x69 == implementor) { -		part_number = (cpuid >> 13) & 0x7; -		switch (part_number) { -		case 1: -			cpu_pmu = xscale1pmu_init(); -			break; -		case 2: -			cpu_pmu = xscale2pmu_init(); -			break; -		} -	} - -	if (cpu_pmu) { -		pr_info("enabled with %s PMU driver, %d counters available\n", -			cpu_pmu->name, cpu_pmu->num_events); -		cpu_pmu_init(cpu_pmu); -		register_cpu_notifier(&pmu_cpu_notifier); -		armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); -	} else { -		pr_info("no hardware support available\n"); -	} - -	return 0; -} -early_initcall(init_hw_perf_events); -  /*   * Callchain handling code.   */ diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c new file mode 100644 index 00000000000..8d7d8d4de9d --- /dev/null +++ b/arch/arm/kernel/perf_event_cpu.c @@ -0,0 +1,295 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ +#define pr_fmt(fmt) "CPU PMU: " fmt + +#include <linux/bitmap.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <asm/cputype.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *cpu_pmu; + +static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); +static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); +static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); + +/* + * Despite the names, these two functions are CPU-specific and are used + * by the OProfile/perf code. + */ +const char *perf_pmu_name(void) +{ +	if (!cpu_pmu) +		return NULL; + +	return cpu_pmu->pmu.name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ +	int max_events = 0; + +	if (cpu_pmu != NULL) +		max_events = cpu_pmu->num_events; + +	return max_events; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +/* Include the PMU-specific implementations. */ +#include "perf_event_xscale.c" +#include "perf_event_v6.c" +#include "perf_event_v7.c" + +static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) +{ +	return &__get_cpu_var(cpu_hw_events); +} + +static void cpu_pmu_free_irq(void) +{ +	int i, irq, irqs; +	struct platform_device *pmu_device = cpu_pmu->plat_device; + +	irqs = min(pmu_device->num_resources, num_possible_cpus()); + +	for (i = 0; i < irqs; ++i) { +		if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) +			continue; +		irq = platform_get_irq(pmu_device, i); +		if (irq >= 0) +			free_irq(irq, cpu_pmu); +	} +} + +static int cpu_pmu_request_irq(irq_handler_t handler) +{ +	int i, err, irq, irqs; +	struct platform_device *pmu_device = cpu_pmu->plat_device; + +	if (!pmu_device) +		return -ENODEV; + +	irqs = min(pmu_device->num_resources, num_possible_cpus()); +	if (irqs < 1) { +		pr_err("no irqs for PMUs defined\n"); +		return -ENODEV; +	} + +	for (i = 0; i < irqs; ++i) { +		err = 0; +		irq = platform_get_irq(pmu_device, i); +		if (irq < 0) +			continue; + +		/* +		 * If we have a single PMU interrupt that we can't shift, +		 * assume that we're running on a uniprocessor machine and +		 * continue. Otherwise, continue without this interrupt. +		 */ +		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { +			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", +				    irq, i); +			continue; +		} + +		err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", +				  cpu_pmu); +		if (err) { +			pr_err("unable to request IRQ%d for ARM PMU counters\n", +				irq); +			return err; +		} + +		cpumask_set_cpu(i, &cpu_pmu->active_irqs); +	} + +	return 0; +} + +static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) +{ +	int cpu; +	for_each_possible_cpu(cpu) { +		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); +		events->events = per_cpu(hw_events, cpu); +		events->used_mask = per_cpu(used_mask, cpu); +		raw_spin_lock_init(&events->pmu_lock); +	} + +	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events; +	cpu_pmu->request_irq	= cpu_pmu_request_irq; +	cpu_pmu->free_irq	= cpu_pmu_free_irq; + +	/* Ensure the PMU has sane values out of reset. */ +	if (cpu_pmu && cpu_pmu->reset) +		on_each_cpu(cpu_pmu->reset, NULL, 1); +} + +/* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit cpu_pmu_notify(struct notifier_block *b, +				    unsigned long action, void *hcpu) +{ +	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) +		return NOTIFY_DONE; + +	if (cpu_pmu && cpu_pmu->reset) +		cpu_pmu->reset(NULL); + +	return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { +	.notifier_call = cpu_pmu_notify, +}; + +/* + * PMU platform driver and devicetree bindings. + */ +static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = { +	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init}, +	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init}, +	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init}, +	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init}, +	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init}, +	{.compatible = "arm,arm11mpcore-pmu",	.data = armv6mpcore_pmu_init}, +	{.compatible = "arm,arm1176-pmu",	.data = armv6pmu_init}, +	{.compatible = "arm,arm1136-pmu",	.data = armv6pmu_init}, +	{}, +}; + +static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { +	{.name = "arm-pmu"}, +	{}, +}; + +/* + * CPU PMU identification and probing. + */ +static struct arm_pmu *__devinit probe_current_pmu(void) +{ +	struct arm_pmu *pmu = NULL; +	int cpu = get_cpu(); +	unsigned long cpuid = read_cpuid_id(); +	unsigned long implementor = (cpuid & 0xFF000000) >> 24; +	unsigned long part_number = (cpuid & 0xFFF0); + +	pr_info("probing PMU on CPU %d\n", cpu); + +	/* ARM Ltd CPUs. */ +	if (0x41 == implementor) { +		switch (part_number) { +		case 0xB360:	/* ARM1136 */ +		case 0xB560:	/* ARM1156 */ +		case 0xB760:	/* ARM1176 */ +			pmu = armv6pmu_init(); +			break; +		case 0xB020:	/* ARM11mpcore */ +			pmu = armv6mpcore_pmu_init(); +			break; +		case 0xC080:	/* Cortex-A8 */ +			pmu = armv7_a8_pmu_init(); +			break; +		case 0xC090:	/* Cortex-A9 */ +			pmu = armv7_a9_pmu_init(); +			break; +		case 0xC050:	/* Cortex-A5 */ +			pmu = armv7_a5_pmu_init(); +			break; +		case 0xC0F0:	/* Cortex-A15 */ +			pmu = armv7_a15_pmu_init(); +			break; +		case 0xC070:	/* Cortex-A7 */ +			pmu = armv7_a7_pmu_init(); +			break; +		} +	/* Intel CPUs [xscale]. */ +	} else if (0x69 == implementor) { +		part_number = (cpuid >> 13) & 0x7; +		switch (part_number) { +		case 1: +			pmu = xscale1pmu_init(); +			break; +		case 2: +			pmu = xscale2pmu_init(); +			break; +		} +	} + +	put_cpu(); +	return pmu; +} + +static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) +{ +	const struct of_device_id *of_id; +	struct arm_pmu *(*init_fn)(void); +	struct device_node *node = pdev->dev.of_node; + +	if (cpu_pmu) { +		pr_info("attempt to register multiple PMU devices!"); +		return -ENOSPC; +	} + +	if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { +		init_fn = of_id->data; +		cpu_pmu = init_fn(); +	} else { +		cpu_pmu = probe_current_pmu(); +	} + +	if (!cpu_pmu) +		return -ENODEV; + +	cpu_pmu->plat_device = pdev; +	cpu_pmu_init(cpu_pmu); +	register_cpu_notifier(&cpu_pmu_hotplug_notifier); +	armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); + +	return 0; +} + +static struct platform_driver cpu_pmu_driver = { +	.driver		= { +		.name	= "arm-pmu", +		.pm	= &armpmu_dev_pm_ops, +		.of_match_table = cpu_pmu_of_device_ids, +	}, +	.probe		= cpu_pmu_device_probe, +	.id_table	= cpu_pmu_plat_device_ids, +}; + +static int __init register_pmu_driver(void) +{ +	return platform_driver_register(&cpu_pmu_driver); +} +device_initcall(register_pmu_driver); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c90fcb2b696..6ccc0797174 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -645,7 +645,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,  static int armv6_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv6_perf_map, +	return armpmu_map_event(event, &armv6_perf_map,  				&armv6_perf_cache_map, 0xFF);  } @@ -664,7 +664,7 @@ static struct arm_pmu armv6pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void)  {  	return &armv6pmu;  } @@ -679,7 +679,7 @@ static struct arm_pmu *__init armv6pmu_init(void)  static int armv6mpcore_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv6mpcore_perf_map, +	return armpmu_map_event(event, &armv6mpcore_perf_map,  				&armv6mpcore_perf_cache_map, 0xFF);  } @@ -698,17 +698,17 @@ static struct arm_pmu armv6mpcore_pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)  {  	return &armv6mpcore_pmu;  }  #else -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f04070bd218..bd4b090ebcf 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1204,31 +1204,31 @@ static void armv7pmu_reset(void *info)  static int armv7_a8_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a8_perf_map, +	return armpmu_map_event(event, &armv7_a8_perf_map,  				&armv7_a8_perf_cache_map, 0xFF);  }  static int armv7_a9_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a9_perf_map, +	return armpmu_map_event(event, &armv7_a9_perf_map,  				&armv7_a9_perf_cache_map, 0xFF);  }  static int armv7_a5_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a5_perf_map, +	return armpmu_map_event(event, &armv7_a5_perf_map,  				&armv7_a5_perf_cache_map, 0xFF);  }  static int armv7_a15_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a15_perf_map, +	return armpmu_map_event(event, &armv7_a15_perf_map,  				&armv7_a15_perf_cache_map, 0xFF);  }  static int armv7_a7_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &armv7_a7_perf_map, +	return armpmu_map_event(event, &armv7_a7_perf_map,  				&armv7_a7_perf_cache_map, 0xFF);  } @@ -1245,7 +1245,7 @@ static struct arm_pmu armv7pmu = {  	.max_period		= (1LLU << 32) - 1,  }; -static u32 __init armv7_read_num_pmnc_events(void) +static u32 __devinit armv7_read_num_pmnc_events(void)  {  	u32 nb_cnt; @@ -1256,7 +1256,7 @@ static u32 __init armv7_read_num_pmnc_events(void)  	return nb_cnt + 1;  } -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A8";  	armv7pmu.map_event	= armv7_a8_map_event; @@ -1264,7 +1264,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A9";  	armv7pmu.map_event	= armv7_a9_map_event; @@ -1272,7 +1272,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A5";  	armv7pmu.map_event	= armv7_a5_map_event; @@ -1280,7 +1280,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A15";  	armv7pmu.map_event	= armv7_a15_map_event; @@ -1289,7 +1289,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)  	return &armv7pmu;  } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void)  {  	armv7pmu.name		= "ARMv7 Cortex-A7";  	armv7pmu.map_event	= armv7_a7_map_event; @@ -1298,27 +1298,27 @@ static struct arm_pmu *__init armv7_a7_pmu_init(void)  	return &armv7pmu;  }  #else -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f759fe0bab6..426e19f380a 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -430,7 +430,7 @@ xscale1pmu_write_counter(int counter, u32 val)  static int xscale_map_event(struct perf_event *event)  { -	return map_cpu_event(event, &xscale_perf_map, +	return armpmu_map_event(event, &xscale_perf_map,  				&xscale_perf_cache_map, 0xFF);  } @@ -449,7 +449,7 @@ static struct arm_pmu xscale1pmu = {  	.max_period	= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void)  {  	return &xscale1pmu;  } @@ -816,17 +816,17 @@ static struct arm_pmu xscale2pmu = {  	.max_period	= (1LLU << 32) - 1,  }; -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void)  {  	return &xscale2pmu;  }  #else -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void)  {  	return NULL;  } -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void)  {  	return NULL;  } diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c deleted file mode 100644 index 2334bf8a650..00000000000 --- a/arch/arm/kernel/pmu.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - *  linux/arch/arm/kernel/pmu.c - * - *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - *  Copyright (C) 2010 ARM Ltd, Will Deacon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/pmu.h> - -/* - * PMU locking to ensure mutual exclusion between different subsystems. - */ -static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)]; - -int -reserve_pmu(enum arm_pmu_type type) -{ -	return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0; -} -EXPORT_SYMBOL_GPL(reserve_pmu); - -void -release_pmu(enum arm_pmu_type type) -{ -	clear_bit_unlock(type, pmu_lock); -} -EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a81dcecc734..725f9f2a954 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -977,8 +977,10 @@ void __init setup_arch(char **cmdline_p)  	unflatten_device_tree();  #ifdef CONFIG_SMP -	if (is_smp()) +	if (is_smp()) { +		smp_set_ops(mdesc->smp);  		smp_init_cpus(); +	}  #endif  	reserve_crashkernel(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d7..aa4ffe6e5ec 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -19,7 +19,6 @@  #include <linux/mm.h>  #include <linux/err.h>  #include <linux/cpu.h> -#include <linux/smp.h>  #include <linux/seq_file.h>  #include <linux/irq.h>  #include <linux/percpu.h> @@ -27,6 +26,7 @@  #include <linux/completion.h>  #include <linux/atomic.h> +#include <asm/smp.h>  #include <asm/cacheflush.h>  #include <asm/cpu.h>  #include <asm/cputype.h> @@ -42,6 +42,7 @@  #include <asm/ptrace.h>  #include <asm/localtimer.h>  #include <asm/smp_plat.h> +#include <asm/mach/arch.h>  /*   * as from 2.5, kernels no longer have an init_tasks structure @@ -50,6 +51,12 @@   */  struct secondary_data secondary_data; +/* + * control for which core is the next to come out of the secondary + * boot "holding pen" + */ +volatile int __cpuinitdata pen_release = -1; +  enum ipi_msg_type {  	IPI_TIMER = 2,  	IPI_RESCHEDULE, @@ -60,6 +67,14 @@ enum ipi_msg_type {  static DECLARE_COMPLETION(cpu_running); +static struct smp_operations smp_ops; + +void __init smp_set_ops(struct smp_operations *ops) +{ +	if (ops) +		smp_ops = *ops; +}; +  int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)  {  	int ret; @@ -100,13 +115,64 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)  	return ret;  } +/* platform specific SMP operations */ +void __init smp_init_cpus(void) +{ +	if (smp_ops.smp_init_cpus) +		smp_ops.smp_init_cpus(); +} + +static void __init platform_smp_prepare_cpus(unsigned int max_cpus) +{ +	if (smp_ops.smp_prepare_cpus) +		smp_ops.smp_prepare_cpus(max_cpus); +} + +static void __cpuinit platform_secondary_init(unsigned int cpu) +{ +	if (smp_ops.smp_secondary_init) +		smp_ops.smp_secondary_init(cpu); +} + +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ +	if (smp_ops.smp_boot_secondary) +		return smp_ops.smp_boot_secondary(cpu, idle); +	return -ENOSYS; +} +  #ifdef CONFIG_HOTPLUG_CPU  static void percpu_timer_stop(void); +static int platform_cpu_kill(unsigned int cpu) +{ +	if (smp_ops.cpu_kill) +		return smp_ops.cpu_kill(cpu); +	return 1; +} + +static void platform_cpu_die(unsigned int cpu) +{ +	if (smp_ops.cpu_die) +		smp_ops.cpu_die(cpu); +} + +static int platform_cpu_disable(unsigned int cpu) +{ +	if (smp_ops.cpu_disable) +		return smp_ops.cpu_disable(cpu); + +	/* +	 * By default, allow disabling all CPUs except the first one, +	 * since this is special on a lot of platforms, e.g. because +	 * of clock tick interrupts. +	 */ +	return cpu == 0 ? -EPERM : 0; +}  /*   * __cpu_disable runs on the processor to be shutdown.   */ -int __cpu_disable(void) +int __cpuinit __cpu_disable(void)  {  	unsigned int cpu = smp_processor_id();  	int ret; @@ -149,7 +215,7 @@ static DECLARE_COMPLETION(cpu_died);   * called on the thread which is asking for a CPU to be shutdown -   * waits until shutdown has completed, or it is timed out.   */ -void __cpu_die(unsigned int cpu) +void __cpuinit __cpu_die(unsigned int cpu)  {  	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {  		pr_err("CPU%u: cpu didn't die\n", cpu);  |