diff options
Diffstat (limited to 'arch/arm/mm')
| -rw-r--r-- | arch/arm/mm/abort-macro.S | 2 | ||||
| -rw-r--r-- | arch/arm/mm/alignment.c | 20 | ||||
| -rw-r--r-- | arch/arm/mm/cache-l2x0.c | 235 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v7.S | 20 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 8 | ||||
| -rw-r--r-- | arch/arm/mm/fault.c | 1 | ||||
| -rw-r--r-- | arch/arm/mm/init.c | 9 | ||||
| -rw-r--r-- | arch/arm/mm/ioremap.c | 21 | ||||
| -rw-r--r-- | arch/arm/mm/mm.h | 4 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 18 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm920.S | 2 | ||||
| -rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
| -rw-r--r-- | arch/arm/mm/proc-sa1100.S | 10 | ||||
| -rw-r--r-- | arch/arm/mm/proc-v6.S | 16 | ||||
| -rw-r--r-- | arch/arm/mm/proc-v7.S | 8 | ||||
| -rw-r--r-- | arch/arm/mm/proc-xsc3.S | 6 | 
16 files changed, 344 insertions, 38 deletions
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index 52162d59407..2cbf68ef0e8 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S @@ -17,7 +17,7 @@  	cmp	\tmp, # 0x5600			@ Is it ldrsb?  	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes  	tst	\tmp, #1 << 11			@ L = 0 -> write -	orreq	\psr, \psr, #1 << 11		@ yes. +	orreq	\fsr, \fsr, #1 << 11		@ yes.  	b	do_DataAbort  not_thumb:  	.endm diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index cfbcf8b9559..c335c76e0d8 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -86,16 +86,6 @@ core_param(alignment, ai_usermode, int, 0600);  #define UM_FIXUP	(1 << 1)  #define UM_SIGNAL	(1 << 2) -#ifdef CONFIG_PROC_FS -static const char *usermode_action[] = { -	"ignored", -	"warn", -	"fixup", -	"fixup+warn", -	"signal", -	"signal+warn" -}; -  /* Return true if and only if the ARMv6 unaligned access model is in use. */  static bool cpu_is_v6_unaligned(void)  { @@ -123,6 +113,16 @@ static int safe_usermode(int new_usermode, bool warn)  	return new_usermode;  } +#ifdef CONFIG_PROC_FS +static const char *usermode_action[] = { +	"ignored", +	"warn", +	"fixup", +	"fixup+warn", +	"signal", +	"signal+warn" +}; +  static int alignment_proc_show(struct seq_file *m, void *v)  {  	seq_printf(m, "User:\t\t%lu\n", ai_user); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 44c086710d2..3f9b9980478 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -16,9 +16,12 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA   */ +#include <linux/err.h>  #include <linux/init.h>  #include <linux/spinlock.h>  #include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h>  #include <asm/cacheflush.h>  #include <asm/hardware/cache-l2x0.h> @@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);  static uint32_t l2x0_way_mask;	/* Bitmask of active ways */  static uint32_t l2x0_size; +struct l2x0_regs l2x0_saved_regs; + +struct l2x0_of_data { +	void (*setup)(const struct device_node *, __u32 *, __u32 *); +	void (*save)(void); +	void (*resume)(void); +}; +  static inline void cache_wait_way(void __iomem *reg, unsigned long mask)  {  	/* wait for cache operation by line or way to complete */  	while (readl_relaxed(reg) & mask) -		; +		cpu_relax();  }  #ifdef CONFIG_CACHE_PL310 @@ -277,6 +288,25 @@ static void l2x0_disable(void)  	spin_unlock_irqrestore(&l2x0_lock, flags);  } +static void l2x0_unlock(__u32 cache_id) +{ +	int lockregs; +	int i; + +	if (cache_id == L2X0_CACHE_ID_PART_L310) +		lockregs = 8; +	else +		/* L210 and unknown types */ +		lockregs = 1; + +	for (i = 0; i < lockregs; i++) { +		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + +			       i * L2X0_LOCKDOWN_STRIDE); +		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + +			       i * L2X0_LOCKDOWN_STRIDE); +	} +} +  void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)  {  	__u32 aux; @@ -328,10 +358,14 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)  	 * accessing the below registers will fault.  	 */  	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { +		/* Make sure that I&D is not locked down when starting */ +		l2x0_unlock(cache_id);  		/* l2x0 controller is disabled */  		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); +		l2x0_saved_regs.aux_ctrl = aux; +  		l2x0_inv_all();  		/* enable L2X0 */ @@ -351,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)  	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",  			ways, cache_id, aux, l2x0_size);  } + +#ifdef CONFIG_OF +static void __init l2x0_of_setup(const struct device_node *np, +				 __u32 *aux_val, __u32 *aux_mask) +{ +	u32 data[2] = { 0, 0 }; +	u32 tag = 0; +	u32 dirty = 0; +	u32 val = 0, mask = 0; + +	of_property_read_u32(np, "arm,tag-latency", &tag); +	if (tag) { +		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; +		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; +	} + +	of_property_read_u32_array(np, "arm,data-latency", +				   data, ARRAY_SIZE(data)); +	if (data[0] && data[1]) { +		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | +			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; +		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | +		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); +	} + +	of_property_read_u32(np, "arm,dirty-latency", &dirty); +	if (dirty) { +		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; +		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; +	} + +	*aux_val &= ~mask; +	*aux_val |= val; +	*aux_mask &= ~mask; +} + +static void __init pl310_of_setup(const struct device_node *np, +				  __u32 *aux_val, __u32 *aux_mask) +{ +	u32 data[3] = { 0, 0, 0 }; +	u32 tag[3] = { 0, 0, 0 }; +	u32 filter[2] = { 0, 0 }; + +	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); +	if (tag[0] && tag[1] && tag[2]) +		writel_relaxed( +			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | +			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | +			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), +			l2x0_base + L2X0_TAG_LATENCY_CTRL); + +	of_property_read_u32_array(np, "arm,data-latency", +				   data, ARRAY_SIZE(data)); +	if (data[0] && data[1] && data[2]) +		writel_relaxed( +			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | +			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | +			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), +			l2x0_base + L2X0_DATA_LATENCY_CTRL); + +	of_property_read_u32_array(np, "arm,filter-ranges", +				   filter, ARRAY_SIZE(filter)); +	if (filter[1]) { +		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), +			       l2x0_base + L2X0_ADDR_FILTER_END); +		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, +			       l2x0_base + L2X0_ADDR_FILTER_START); +	} +} + +static void __init pl310_save(void) +{ +	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & +		L2X0_CACHE_ID_RTL_MASK; + +	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + +		L2X0_TAG_LATENCY_CTRL); +	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + +		L2X0_DATA_LATENCY_CTRL); +	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + +		L2X0_ADDR_FILTER_END); +	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + +		L2X0_ADDR_FILTER_START); + +	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { +		/* +		 * From r2p0, there is Prefetch offset/control register +		 */ +		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + +			L2X0_PREFETCH_CTRL); +		/* +		 * From r3p0, there is Power control register +		 */ +		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) +			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + +				L2X0_POWER_CTRL); +	} +} + +static void l2x0_resume(void) +{ +	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { +		/* restore aux ctrl and enable l2 */ +		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); + +		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + +			L2X0_AUX_CTRL); + +		l2x0_inv_all(); + +		writel_relaxed(1, l2x0_base + L2X0_CTRL); +	} +} + +static void pl310_resume(void) +{ +	u32 l2x0_revision; + +	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { +		/* restore pl310 setup */ +		writel_relaxed(l2x0_saved_regs.tag_latency, +			l2x0_base + L2X0_TAG_LATENCY_CTRL); +		writel_relaxed(l2x0_saved_regs.data_latency, +			l2x0_base + L2X0_DATA_LATENCY_CTRL); +		writel_relaxed(l2x0_saved_regs.filter_end, +			l2x0_base + L2X0_ADDR_FILTER_END); +		writel_relaxed(l2x0_saved_regs.filter_start, +			l2x0_base + L2X0_ADDR_FILTER_START); + +		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & +			L2X0_CACHE_ID_RTL_MASK; + +		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { +			writel_relaxed(l2x0_saved_regs.prefetch_ctrl, +				l2x0_base + L2X0_PREFETCH_CTRL); +			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) +				writel_relaxed(l2x0_saved_regs.pwr_ctrl, +					l2x0_base + L2X0_POWER_CTRL); +		} +	} + +	l2x0_resume(); +} + +static const struct l2x0_of_data pl310_data = { +	pl310_of_setup, +	pl310_save, +	pl310_resume, +}; + +static const struct l2x0_of_data l2x0_data = { +	l2x0_of_setup, +	NULL, +	l2x0_resume, +}; + +static const struct of_device_id l2x0_ids[] __initconst = { +	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, +	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, +	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, +	{} +}; + +int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) +{ +	struct device_node *np; +	struct l2x0_of_data *data; +	struct resource res; + +	np = of_find_matching_node(NULL, l2x0_ids); +	if (!np) +		return -ENODEV; + +	if (of_address_to_resource(np, 0, &res)) +		return -ENODEV; + +	l2x0_base = ioremap(res.start, resource_size(&res)); +	if (!l2x0_base) +		return -ENOMEM; + +	l2x0_saved_regs.phy_base = res.start; + +	data = of_match_node(l2x0_ids, np)->data; + +	/* L2 configuration can only be changed if the cache is disabled */ +	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { +		if (data->setup) +			data->setup(np, &aux_val, &aux_mask); +	} + +	if (data->save) +		data->save(); + +	l2x0_init(l2x0_base, aux_val, aux_mask); + +	outer_cache.resume = data->resume; +	return 0; +} +#endif diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 3b24bfa3b82..07c4bc8ea0a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)  	dcache_line_size r2, r3  	sub	r3, r2, #1  	bic	r12, r0, r3 +#ifdef CONFIG_ARM_ERRATA_764369 +	ALT_SMP(W(dsb)) +	ALT_UP(W(nop)) +#endif  1:   USER(	mcr	p15, 0, r12, c7, c11, 1	)	@ clean D line to the point of unification  	add	r12, r12, r2 @@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)  	add	r1, r0, r1  	sub	r3, r2, #1  	bic	r0, r0, r3 +#ifdef CONFIG_ARM_ERRATA_764369 +	ALT_SMP(W(dsb)) +	ALT_UP(W(nop)) +#endif  1:  	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line / unified line  	add	r0, r0, r2 @@ -247,6 +255,10 @@ v7_dma_inv_range:  	sub	r3, r2, #1  	tst	r0, r3  	bic	r0, r0, r3 +#ifdef CONFIG_ARM_ERRATA_764369 +	ALT_SMP(W(dsb)) +	ALT_UP(W(nop)) +#endif  	mcrne	p15, 0, r0, c7, c14, 1		@ clean & invalidate D / U line  	tst	r1, r3 @@ -270,6 +282,10 @@ v7_dma_clean_range:  	dcache_line_size r2, r3  	sub	r3, r2, #1  	bic	r0, r0, r3 +#ifdef CONFIG_ARM_ERRATA_764369 +	ALT_SMP(W(dsb)) +	ALT_UP(W(nop)) +#endif  1:  	mcr	p15, 0, r0, c7, c10, 1		@ clean D / U line  	add	r0, r0, r2 @@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)  	dcache_line_size r2, r3  	sub	r3, r2, #1  	bic	r0, r0, r3 +#ifdef CONFIG_ARM_ERRATA_764369 +	ALT_SMP(W(dsb)) +	ALT_UP(W(nop)) +#endif  1:  	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D / U line  	add	r0, r0, r2 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0a0a1e7c20d..235eb775fc7 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -123,8 +123,8 @@ static void __dma_free_buffer(struct page *page, size_t size)  #endif  #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) -#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) +#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT) +#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT)  /*   * These are the page tables (2MB each) covering uncached, DMA consistent allocations @@ -183,7 +183,7 @@ static int __init consistent_init(void)  		}  		consistent_pte[i++] = pte; -		base += (1 << PGDIR_SHIFT); +		base += PMD_SIZE;  	} while (base < CONSISTENT_END);  	return ret; @@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,  	if (addr)  		*handle = pfn_to_dma(dev, page_to_pfn(page)); +	else +		__dma_free_buffer(page, size);  	return addr;  } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 3b5ea68acbb..aa33949fef6 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -20,6 +20,7 @@  #include <linux/highmem.h>  #include <linux/perf_event.h> +#include <asm/exception.h>  #include <asm/system.h>  #include <asm/pgtable.h>  #include <asm/tlbflush.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 91bca355cd3..f8037ba338a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,  #ifdef CONFIG_HAVE_ARCH_PFN_VALID  int pfn_valid(unsigned long pfn)  { -	return memblock_is_memory(pfn << PAGE_SHIFT); +	return memblock_is_memory(__pfn_to_phys(pfn));  }  EXPORT_SYMBOL(pfn_valid);  #endif @@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)  		 */  		bank_start = min(bank_start,  				 ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#else +		/* +		 * Align down here since the VM subsystem insists that the +		 * memmap entries are valid from the bank start aligned to +		 * MAX_ORDER_NR_PAGES. +		 */ +		bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);  #endif  		/*  		 * If we had a previous bank, and there is a space diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ab506272b2d..bdb248c4f55 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -289,6 +289,27 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)  }  EXPORT_SYMBOL(__arm_ioremap); +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space as memory. Needed when the kernel wants to execute + * code in external memory. This is needed for reprogramming source + * clocks that would affect normal memory for example. Please see + * CONFIG_GENERIC_ALLOCATOR for allocating external memory. + */ +void __iomem * +__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) +{ +	unsigned int mtype; + +	if (cached) +		mtype = MT_MEMORY; +	else +		mtype = MT_MEMORY_NONCACHED; + +	return __arm_ioremap_caller(phys_addr, size, mtype, +			__builtin_return_address(0)); +} +  void __iounmap(volatile void __iomem *io_addr)  {  	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 010566799c8..ad7cce3bc43 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt)  struct mem_type {  	pteval_t prot_pte; -	unsigned int prot_l1; -	unsigned int prot_sect; +	pmdval_t prot_l1; +	pmdval_t prot_sect;  	unsigned int domain;  }; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 594d677b92c..226f1804be1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel);  struct cachepolicy {  	const char	policy[16];  	unsigned int	cr_mask; -	unsigned int	pmd; +	pmdval_t	pmd;  	pteval_t	pte;  }; @@ -288,7 +288,7 @@ static void __init build_mem_type_table(void)  {  	struct cachepolicy *cp;  	unsigned int cr = get_cr(); -	unsigned int user_pgprot, kern_pgprot, vecs_pgprot; +	pteval_t user_pgprot, kern_pgprot, vecs_pgprot;  	int cpu_arch = cpu_architecture();  	int i; @@ -863,14 +863,14 @@ static inline void prepare_page_table(void)  	/*  	 * Clear out all the mappings below the kernel image.  	 */ -	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) +	for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)  		pmd_clear(pmd_off_k(addr));  #ifdef CONFIG_XIP_KERNEL  	/* The XIP kernel is mapped in the module area -- skip over it */ -	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; +	addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;  #endif -	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) +	for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)  		pmd_clear(pmd_off_k(addr));  	/* @@ -885,10 +885,12 @@ static inline void prepare_page_table(void)  	 * memory bank, up to the end of the vmalloc region.  	 */  	for (addr = __phys_to_virt(end); -	     addr < VMALLOC_END; addr += PGDIR_SIZE) +	     addr < VMALLOC_END; addr += PMD_SIZE)  		pmd_clear(pmd_off_k(addr));  } +#define SWAPPER_PG_DIR_SIZE	(PTRS_PER_PGD * sizeof(pgd_t)) +  /*   * Reserve the special regions of memory   */ @@ -898,7 +900,7 @@ void __init arm_mm_memblock_reserve(void)  	 * Reserve the page tables.  These are already in use,  	 * and can only be in node 0.  	 */ -	memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); +	memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);  #ifdef CONFIG_SA1111  	/* @@ -926,7 +928,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)  	 */  	vectors_page = early_alloc(PAGE_SIZE); -	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) +	for (addr = VMALLOC_END; addr; addr += PMD_SIZE)  		pmd_clear(pmd_off_k(addr));  	/* diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 92bd102e398..2e6849b41f6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)  /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */  .globl	cpu_arm920_suspend_size -.equ	cpu_arm920_suspend_size, 4 * 3 +.equ	cpu_arm920_suspend_size, 4 * 4  #ifdef CONFIG_PM_SLEEP  ENTRY(cpu_arm920_do_suspend)  	stmfd	sp!, {r4 - r7, lr} diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 2bbcf053dff..cd8f79c3a28 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)  /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */  .globl	cpu_arm926_suspend_size -.equ	cpu_arm926_suspend_size, 4 * 3 +.equ	cpu_arm926_suspend_size, 4 * 4  #ifdef CONFIG_PM_SLEEP  ENTRY(cpu_arm926_do_suspend)  	stmfd	sp!, {r4 - r7, lr} diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 07219c2ae11..69e7f2ef738 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)  ENTRY(cpu_sa1100_do_resume)  	ldmia	r0, {r4 - r7}			@ load cp regs -	mov	r1, #0 -	mcr	p15, 0, r1, c8, c7, 0		@ flush I+D TLBs -	mcr	p15, 0, r1, c7, c7, 0		@ flush I&D cache -	mcr	p15, 0, r1, c9, c0, 0		@ invalidate RB -	mcr	p15, 0, r1, c9, c0, 5		@ allow user space to use RB +	mov	ip, #0 +	mcr	p15, 0, ip, c8, c7, 0		@ flush I+D TLBs +	mcr	p15, 0, ip, c7, c7, 0		@ flush I&D cache +	mcr	p15, 0, ip, c9, c0, 0		@ invalidate RB +	mcr	p15, 0, ip, c9, c0, 5		@ allow user space to use RB  	mcr	p15, 0, r4, c3, c0, 0		@ domain ID  	mcr	p15, 0, r5, c2, c0, 0		@ translation table base addr diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 219138d2f15..a923aa0fd00 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -223,6 +223,22 @@ __v6_setup:  	mrc	p15, 0, r0, c1, c0, 0		@ read control register  	bic	r0, r0, r5			@ clear bits them  	orr	r0, r0, r6			@ set them +#ifdef CONFIG_ARM_ERRATA_364296 +	/* +	 * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data +	 * corruption with hit-under-miss enabled). The conditional code below +	 * (setting the undocumented bit 31 in the auxiliary control register +	 * and the FI bit in the control register) disables hit-under-miss +	 * without putting the processor into full low interrupt latency mode. +	 */ +	ldr	r6, =0x4107b362			@ id for ARM1136 r0p2 +	mrc	p15, 0, r5, c0, c0, 0		@ get processor id +	teq	r5, r6				@ check for the faulty core +	mrceq	p15, 0, r5, c1, c0, 1		@ load aux control reg +	orreq	r5, r5, #(1 << 31)		@ set the undocumented bit 31 +	mcreq	p15, 0, r5, c1, c0, 1		@ write aux control reg +	orreq	r0, r0, #(1 << 21)		@ low interrupt latency configuration +#endif  	mov	pc, lr				@ return to head.S:__ret  	/* diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index a30e78542cc..9591c8e9fb8 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)  ENTRY(cpu_v7_reset)  	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register  	bic	r1, r1, #0x1			@ ...............m + THUMB(	bic	r1, r1, #1 << 30 )		@ SCTLR.TE (Thumb exceptions)  	mcr	p15, 0, r1, c1, c0, 0		@ disable MMU  	isb  	mov	pc, r0 @@ -217,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext)  /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */  .globl	cpu_v7_suspend_size  .equ	cpu_v7_suspend_size, 4 * 9 -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_ARM_CPU_SUSPEND  ENTRY(cpu_v7_do_suspend)  	stmfd	sp!, {r4 - r11, lr}  	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID @@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)  	mcr	p15, 0, r7, c2, c0, 0	@ TTB 0  	mcr	p15, 0, r8, c2, c0, 1	@ TTB 1  	mcr	p15, 0, ip, c2, c0, 2	@ TTB control register -	mcr	p15, 0, r10, c1, c0, 1	@ Auxiliary control register +	mrc	p15, 0, r4, c1, c0, 1	@ Read Auxiliary control register +	teq	r4, r10			@ Is it already set? +	mcrne	p15, 0, r10, c1, c0, 1	@ No, so write it  	mcr	p15, 0, r11, c1, c0, 2	@ Co-processor access control  	ldr	r4, =PRRR		@ PRRR  	ldr	r5, =NMRR		@ NMRR  	mcr	p15, 0, r4, c10, c2, 0	@ write PRRR  	mcr	p15, 0, r5, c10, c2, 1	@ write NMRR  	isb +	dsb  	mov	r0, r9			@ control register  	mov	r2, r7, lsr #14		@ get TTB0 base  	mov	r2, r2, lsl #14 diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 28c72a2006a..755e1bf2268 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)  	.align  .globl	cpu_xsc3_suspend_size -.equ	cpu_xsc3_suspend_size, 4 * 8 +.equ	cpu_xsc3_suspend_size, 4 * 7  #ifdef CONFIG_PM_SLEEP  ENTRY(cpu_xsc3_do_suspend)  	stmfd	sp!, {r4 - r10, lr} @@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)  	mrc	p15, 0, r9, c1, c0, 1	@ auxiliary control reg  	mrc 	p15, 0, r10, c1, c0, 0	@ control reg  	bic	r4, r4, #2		@ clear frequency change bit -	stmia	r0, {r1, r4 - r10}	@ store v:p offset + cp regs +	stmia	r0, {r4 - r10}		@ store cp regs  	ldmia	sp!, {r4 - r10, pc}  ENDPROC(cpu_xsc3_do_suspend)  ENTRY(cpu_xsc3_do_resume) -	ldmia	r0, {r1, r4 - r10}	@ load v:p offset + cp regs +	ldmia	r0, {r4 - r10}		@ load cp regs  	mov	ip, #0  	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB  	mcr	p15, 0, ip, c7, c10, 4	@ drain write (&fill) buffer  |