diff options
Diffstat (limited to 'arch/arm/mm')
| -rw-r--r-- | arch/arm/mm/Kconfig | 13 | ||||
| -rw-r--r-- | arch/arm/mm/alignment.c | 24 | ||||
| -rw-r--r-- | arch/arm/mm/cache-l2x0.c | 10 | ||||
| -rw-r--r-- | arch/arm/mm/copypage-v6.c | 9 | ||||
| -rw-r--r-- | arch/arm/mm/dma-mapping.c | 7 | ||||
| -rw-r--r-- | arch/arm/mm/fault-armv.c | 1 | ||||
| -rw-r--r-- | arch/arm/mm/flush.c | 25 | ||||
| -rw-r--r-- | arch/arm/mm/highmem.c | 87 | ||||
| -rw-r--r-- | arch/arm/mm/init.c | 15 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 14 | ||||
| -rw-r--r-- | arch/arm/mm/pgd.c | 1 | ||||
| -rw-r--r-- | arch/arm/mm/proc-sa1100.S | 2 | 
12 files changed, 161 insertions, 47 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f64..5bd7c89a604 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG  config OUTER_CACHE  	bool +config OUTER_CACHE_SYNC +	bool +	help +	  The outer cache has a outer_cache_fns.sync function pointer +	  that can be used to drain the write buffer of the outer cache. +  config CACHE_FEROCEON_L2  	bool "Enable the Feroceon L2 cache controller"  	depends on ARCH_KIRKWOOD || ARCH_MV78XX0 @@ -757,6 +763,7 @@ config CACHE_L2X0  		   REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4  	default y  	select OUTER_CACHE +	select OUTER_CACHE_SYNC  	help  	  This option enables the L2x0 PrimeCell. @@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT  	int  	default 6 if ARM_L1_CACHE_SHIFT_6  	default 5 + +config ARCH_HAS_BARRIERS +	bool +	help +	  This option allows the use of custom mandatory barriers +	  included via the mach/barriers.h file. diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index edddd66faac..a2ab51fa73e 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -166,15 +166,15 @@ union offset_union {   THUMB(	"1:	"ins"	%1, [%2]\n"	)		\   THUMB(	"	add	%2, %2, #1\n"	)		\  	"2:\n"						\ -	"	.section .fixup,\"ax\"\n"		\ +	"	.pushsection .fixup,\"ax\"\n"		\  	"	.align	2\n"				\  	"3:	mov	%0, #1\n"			\  	"	b	2b\n"				\ -	"	.previous\n"				\ -	"	.section __ex_table,\"a\"\n"		\ +	"	.popsection\n"				\ +	"	.pushsection __ex_table,\"a\"\n"	\  	"	.align	3\n"				\  	"	.long	1b, 3b\n"			\ -	"	.previous\n"				\ +	"	.popsection\n"				\  	: "=r" (err), "=&r" (val), "=r" (addr)		\  	: "0" (err), "2" (addr)) @@ -226,16 +226,16 @@ union offset_union {  		"	mov	%1, %1, "NEXT_BYTE"\n"		\  		"2:	"ins"	%1, [%2]\n"			\  		"3:\n"						\ -		"	.section .fixup,\"ax\"\n"		\ +		"	.pushsection .fixup,\"ax\"\n"		\  		"	.align	2\n"				\  		"4:	mov	%0, #1\n"			\  		"	b	3b\n"				\ -		"	.previous\n"				\ -		"	.section __ex_table,\"a\"\n"		\ +		"	.popsection\n"				\ +		"	.pushsection __ex_table,\"a\"\n"	\  		"	.align	3\n"				\  		"	.long	1b, 4b\n"			\  		"	.long	2b, 4b\n"			\ -		"	.previous\n"				\ +		"	.popsection\n"				\  		: "=r" (err), "=&r" (v), "=&r" (a)		\  		: "0" (err), "1" (v), "2" (a));			\  		if (err)					\ @@ -266,18 +266,18 @@ union offset_union {  		"	mov	%1, %1, "NEXT_BYTE"\n"		\  		"4:	"ins"	%1, [%2]\n"			\  		"5:\n"						\ -		"	.section .fixup,\"ax\"\n"		\ +		"	.pushsection .fixup,\"ax\"\n"		\  		"	.align	2\n"				\  		"6:	mov	%0, #1\n"			\  		"	b	5b\n"				\ -		"	.previous\n"				\ -		"	.section __ex_table,\"a\"\n"		\ +		"	.popsection\n"				\ +		"	.pushsection __ex_table,\"a\"\n"	\  		"	.align	3\n"				\  		"	.long	1b, 6b\n"			\  		"	.long	2b, 6b\n"			\  		"	.long	3b, 6b\n"			\  		"	.long	4b, 6b\n"			\ -		"	.previous\n"				\ +		"	.popsection\n"				\  		: "=r" (err), "=&r" (v), "=&r" (a)		\  		: "0" (err), "1" (v), "2" (a));			\  		if (err)					\ diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 07334632d3e..21ad68ba22b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)  }  #endif +static void l2x0_cache_sync(void) +{ +	unsigned long flags; + +	spin_lock_irqsave(&l2x0_lock, flags); +	cache_sync(); +	spin_unlock_irqrestore(&l2x0_lock, flags); +} +  static inline void l2x0_inv_all(void)  {  	unsigned long flags; @@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)  	outer_cache.inv_range = l2x0_inv_range;  	outer_cache.clean_range = l2x0_clean_range;  	outer_cache.flush_range = l2x0_flush_range; +	outer_cache.sync = l2x0_cache_sync;  	printk(KERN_INFO "L2X0 cache controller enabled\n");  } diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6df..f55fa1044f7 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,  	kfrom = kmap_atomic(from, KM_USER0);  	kto = kmap_atomic(to, KM_USER1);  	copy_page(kto, kfrom); -#ifdef CONFIG_HIGHMEM -	/* -	 * kmap_atomic() doesn't set the page virtual address, and -	 * kunmap_atomic() takes care of cache flushing already. -	 */ -	if (page_address(to) != NULL) -#endif -		__cpuc_flush_dcache_area(kto, PAGE_SIZE); +	__cpuc_flush_dcache_area(kto, PAGE_SIZE);  	kunmap_atomic(kto, KM_USER1);  	kunmap_atomic(kfrom, KM_USER0);  } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0da7eccf774..13fa536d82e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -11,7 +11,7 @@   */  #include <linux/module.h>  #include <linux/mm.h> -#include <linux/slab.h> +#include <linux/gfp.h>  #include <linux/errno.h>  #include <linux/list.h>  #include <linux/init.h> @@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,  				vaddr += offset;  				op(vaddr, len, dir);  				kunmap_high(page); +			} else if (cache_is_vipt()) { +				pte_t saved_pte; +				vaddr = kmap_high_l1_vipt(page, &saved_pte); +				op(vaddr + offset, len, dir); +				kunmap_high_l1_vipt(page, saved_pte);  			}  		} else {  			vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c9b97e9836a..0d414c28eb2 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -16,6 +16,7 @@  #include <linux/vmalloc.h>  #include <linux/init.h>  #include <linux/pagemap.h> +#include <linux/gfp.h>  #include <asm/bugs.h>  #include <asm/cacheflush.h> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e209..c6844cb9b50 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@  #include <asm/cacheflush.h>  #include <asm/cachetype.h> +#include <asm/highmem.h>  #include <asm/smp_plat.h>  #include <asm/system.h>  #include <asm/tlbflush.h> @@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,  void __flush_dcache_page(struct address_space *mapping, struct page *page)  { -	void *addr = page_address(page); -  	/*  	 * Writeback any data associated with the kernel mapping of this  	 * page.  This ensures that data in the physical page is mutually  	 * coherent with the kernels mapping.  	 */ -#ifdef CONFIG_HIGHMEM -	/* -	 * kmap_atomic() doesn't set the page virtual address, and -	 * kunmap_atomic() takes care of cache flushing already. -	 */ -	if (addr) -#endif -		__cpuc_flush_dcache_area(addr, PAGE_SIZE); +	if (!PageHighMem(page)) { +		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); +	} else { +		void *addr = kmap_high_get(page); +		if (addr) { +			__cpuc_flush_dcache_area(addr, PAGE_SIZE); +			kunmap_high(page); +		} else if (cache_is_vipt()) { +			pte_t saved_pte; +			addr = kmap_high_l1_vipt(page, &saved_pte); +			__cpuc_flush_dcache_area(addr, PAGE_SIZE); +			kunmap_high_l1_vipt(page, saved_pte); +		} +	}  	/*  	 * If this is a page cache page, and we have an aliasing VIPT cache, diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b4..77b030f5ec0 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)  	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();  	if (kvaddr >= (void *)FIXADDR_START) { -		__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); +		if (cache_is_vivt()) +			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);  #ifdef CONFIG_DEBUG_HIGHMEM  		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));  		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)  	pte = TOP_PTE(vaddr);  	return pte_page(*pte);  } + +#ifdef CONFIG_CPU_CACHE_VIPT + +#include <linux/percpu.h> + +/* + * The VIVT cache of a highmem page is always flushed before the page + * is unmapped. Hence unmapped highmem pages need no cache maintenance + * in that case. + * + * However unmapped pages may still be cached with a VIPT cache, and + * it is not possible to perform cache maintenance on them using physical + * addresses unfortunately.  So we have no choice but to set up a temporary + * virtual mapping for that purpose. + * + * Yet this VIPT cache maintenance may be triggered from DMA support + * functions which are possibly called from interrupt context. As we don't + * want to keep interrupt disabled all the time when such maintenance is + * taking place, we therefore allow for some reentrancy by preserving and + * restoring the previous fixmap entry before the interrupted context is + * resumed.  If the reentrancy depth is 0 then there is no need to restore + * the previous fixmap, and leaving the current one in place allow it to + * be reused the next time without a TLB flush (common with DMA). + */ + +static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); + +void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) +{ +	unsigned int idx, cpu = smp_processor_id(); +	int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); +	unsigned long vaddr, flags; +	pte_t pte, *ptep; + +	idx = KM_L1_CACHE + KM_TYPE_NR * cpu; +	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +	ptep = TOP_PTE(vaddr); +	pte = mk_pte(page, kmap_prot); + +	if (!in_interrupt()) +		preempt_disable(); + +	raw_local_irq_save(flags); +	(*depth)++; +	if (pte_val(*ptep) == pte_val(pte)) { +		*saved_pte = pte; +	} else { +		*saved_pte = *ptep; +		set_pte_ext(ptep, pte, 0); +		local_flush_tlb_kernel_page(vaddr); +	} +	raw_local_irq_restore(flags); + +	return (void *)vaddr; +} + +void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) +{ +	unsigned int idx, cpu = smp_processor_id(); +	int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); +	unsigned long vaddr, flags; +	pte_t pte, *ptep; + +	idx = KM_L1_CACHE + KM_TYPE_NR * cpu; +	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +	ptep = TOP_PTE(vaddr); +	pte = mk_pte(page, kmap_prot); + +	BUG_ON(pte_val(*ptep) != pte_val(pte)); +	BUG_ON(*depth <= 0); + +	raw_local_irq_save(flags); +	(*depth)--; +	if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { +		set_pte_ext(ptep, saved_pte, 0); +		local_flush_tlb_kernel_page(vaddr); +	} +	raw_local_irq_restore(flags); + +	if (!in_interrupt()) +		preempt_enable(); +} + +#endif  /* CONFIG_CPU_CACHE_VIPT */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7829cb5425f..0ed29bfeba1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,6 +17,7 @@  #include <linux/initrd.h>  #include <linux/sort.h>  #include <linux/highmem.h> +#include <linux/gfp.h>  #include <asm/mach-types.h>  #include <asm/sections.h> @@ -85,9 +86,6 @@ void show_mem(void)  	printk("Mem-info:\n");  	show_free_areas();  	for_each_online_node(node) { -		pg_data_t *n = NODE_DATA(node); -		struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; -  		for_each_nodebank (i,mi,node) {  			struct membank *bank = &mi->bank[i];  			unsigned int pfn1, pfn2; @@ -96,8 +94,8 @@ void show_mem(void)  			pfn1 = bank_pfn_start(bank);  			pfn2 = bank_pfn_end(bank); -			page = map + pfn1; -			end  = map + pfn2; +			page = pfn_to_page(pfn1); +			end  = pfn_to_page(pfn2 - 1) + 1;  			do {  				total++; @@ -602,9 +600,6 @@ void __init mem_init(void)  	reserved_pages = free_pages = 0;  	for_each_online_node(node) { -		pg_data_t *n = NODE_DATA(node); -		struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; -  		for_each_nodebank(i, &meminfo, node) {  			struct membank *bank = &meminfo.bank[i];  			unsigned int pfn1, pfn2; @@ -613,8 +608,8 @@ void __init mem_init(void)  			pfn1 = bank_pfn_start(bank);  			pfn2 = bank_pfn_end(bank); -			page = map + pfn1; -			end  = map + pfn2; +			page = pfn_to_page(pfn1); +			end  = pfn_to_page(pfn2 - 1) + 1;  			do {  				if (PageReserved(page)) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d4da6ac28e..241c24a1c18 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)  		user_pgprot |= L_PTE_SHARED;  		kern_pgprot |= L_PTE_SHARED;  		vecs_pgprot |= L_PTE_SHARED; +		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; +		mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; +		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; +		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;  		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;  		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;  #endif @@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode)  	pgd_t *pgd;  	int i; -	if (current->mm && current->mm->pgd) -		pgd = current->mm->pgd; -	else -		pgd = init_mm.pgd; +	/* +	 * We need to access to user-mode page tables here. For kernel threads +	 * we don't have any user-mode mappings so we use the context that we +	 * "borrowed". +	 */ +	pgd = current->active_mm->pgd;  	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;  	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 2690146161b..be5f58e153b 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -8,6 +8,7 @@   * published by the Free Software Foundation.   */  #include <linux/mm.h> +#include <linux/gfp.h>  #include <linux/highmem.h>  #include <asm/pgalloc.h> diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index ee7700242c1..5c47760c206 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init)  	mcr	p15, 0, r0, c9, c0, 5		@ Allow read-buffer operations from userland  	mov	pc, lr -	.previous +	.section .text  /*   * cpu_sa1100_proc_fin()  |