diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
| -rw-r--r-- | arch/arm/mm/mmu.c | 150 | 
1 files changed, 105 insertions, 45 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c9..e0d8565671a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -34,6 +34,7 @@  #include <asm/mach/pci.h>  #include "mm.h" +#include "tcm.h"  /*   * empty_zero_page is a special page that is used for @@ -57,6 +58,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;  static unsigned int ecc_mask __initdata = 0;  pgprot_t pgprot_user;  pgprot_t pgprot_kernel; +pgprot_t pgprot_hyp_device; +pgprot_t pgprot_s2; +pgprot_t pgprot_s2_device;  EXPORT_SYMBOL(pgprot_user);  EXPORT_SYMBOL(pgprot_kernel); @@ -66,37 +70,50 @@ struct cachepolicy {  	unsigned int	cr_mask;  	pmdval_t	pmd;  	pteval_t	pte; +	pteval_t	pte_s2;  }; +#ifdef CONFIG_ARM_LPAE +#define s2_policy(policy)	policy +#else +#define s2_policy(policy)	0 +#endif +  static struct cachepolicy cache_policies[] __initdata = {  	{  		.policy		= "uncached",  		.cr_mask	= CR_W|CR_C,  		.pmd		= PMD_SECT_UNCACHED,  		.pte		= L_PTE_MT_UNCACHED, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "buffered",  		.cr_mask	= CR_C,  		.pmd		= PMD_SECT_BUFFERED,  		.pte		= L_PTE_MT_BUFFERABLE, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "writethrough",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WT,  		.pte		= L_PTE_MT_WRITETHROUGH, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITETHROUGH),  	}, {  		.policy		= "writeback",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WB,  		.pte		= L_PTE_MT_WRITEBACK, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}, {  		.policy		= "writealloc",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WBWA,  		.pte		= L_PTE_MT_WRITEALLOC, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}  }; +#ifdef CONFIG_CPU_CP15  /*   * These are useful for identifying cache coherency   * problems by allowing the cache or the cache and @@ -195,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)  }  #endif +#else /* ifdef CONFIG_CPU_CP15 */ + +static int __init early_cachepolicy(char *p) +{ +	pr_warning("cachepolicy kernel parameter not supported without cp15\n"); +} +early_param("cachepolicy", early_cachepolicy); + +static int __init noalign_setup(char *__unused) +{ +	pr_warning("noalign kernel parameter not supported without cp15\n"); +} +__setup("noalign", noalign_setup); + +#endif /* ifdef CONFIG_CPU_CP15 / else */ +  #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN  #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE @@ -310,6 +343,7 @@ static void __init build_mem_type_table(void)  	struct cachepolicy *cp;  	unsigned int cr = get_cr();  	pteval_t user_pgprot, kern_pgprot, vecs_pgprot; +	pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;  	int cpu_arch = cpu_architecture();  	int i; @@ -421,6 +455,8 @@ static void __init build_mem_type_table(void)  	 */  	cp = &cache_policies[cachepolicy];  	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; +	s2_pgprot = cp->pte_s2; +	hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;  	/*  	 * ARMv6 and above have extended page tables. @@ -444,6 +480,7 @@ static void __init build_mem_type_table(void)  			user_pgprot |= L_PTE_SHARED;  			kern_pgprot |= L_PTE_SHARED;  			vecs_pgprot |= L_PTE_SHARED; +			s2_pgprot |= L_PTE_SHARED;  			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;  			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;  			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -498,6 +535,9 @@ static void __init build_mem_type_table(void)  	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);  	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |  				 L_PTE_DIRTY | kern_pgprot); +	pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); +	pgprot_s2_device  = __pgprot(s2_device_pgprot); +	pgprot_hyp_device  = __pgprot(hyp_device_pgprot);  	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;  	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; @@ -576,39 +616,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,  	} while (pte++, addr += PAGE_SIZE, addr != end);  } -static void __init alloc_init_section(pud_t *pud, unsigned long addr, -				      unsigned long end, phys_addr_t phys, -				      const struct mem_type *type) +static void __init map_init_section(pmd_t *pmd, unsigned long addr, +			unsigned long end, phys_addr_t phys, +			const struct mem_type *type)  { -	pmd_t *pmd = pmd_offset(pud, addr); - +#ifndef CONFIG_ARM_LPAE  	/* -	 * Try a section mapping - end, addr and phys must all be aligned -	 * to a section boundary.  Note that PMDs refer to the individual -	 * L1 entries, whereas PGDs refer to a group of L1 entries making -	 * up one logical pointer to an L2 table. +	 * In classic MMU format, puds and pmds are folded in to +	 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a +	 * group of L1 entries making up one logical pointer to +	 * an L2 table (2MB), where as PMDs refer to the individual +	 * L1 entries (1MB). Hence increment to get the correct +	 * offset for odd 1MB sections. +	 * (See arch/arm/include/asm/pgtable-2level.h)  	 */ -	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { -		pmd_t *p = pmd; - -#ifndef CONFIG_ARM_LPAE -		if (addr & SECTION_SIZE) -			pmd++; +	if (addr & SECTION_SIZE) +		pmd++;  #endif +	do { +		*pmd = __pmd(phys | type->prot_sect); +		phys += SECTION_SIZE; +	} while (pmd++, addr += SECTION_SIZE, addr != end); -		do { -			*pmd = __pmd(phys | type->prot_sect); -			phys += SECTION_SIZE; -		} while (pmd++, addr += SECTION_SIZE, addr != end); +	flush_pmd_entry(pmd); +} -		flush_pmd_entry(p); -	} else { +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, +				      unsigned long end, phys_addr_t phys, +				      const struct mem_type *type) +{ +	pmd_t *pmd = pmd_offset(pud, addr); +	unsigned long next; + +	do {  		/* -		 * No need to loop; pte's aren't interested in the -		 * individual L1 entries. +		 * With LPAE, we must loop over to map +		 * all the pmds for the given range.  		 */ -		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); -	} +		next = pmd_addr_end(addr, end); + +		/* +		 * Try a section mapping - addr, next and phys must all be +		 * aligned to a section boundary. +		 */ +		if (type->prot_sect && +				((addr | next | phys) & ~SECTION_MASK) == 0) { +			map_init_section(pmd, addr, next, phys, type); +		} else { +			alloc_init_pte(pmd, addr, next, +						__phys_to_pfn(phys), type); +		} + +		phys += next - addr; + +	} while (pmd++, addr = next, addr != end);  }  static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, @@ -619,7 +680,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,  	do {  		next = pud_addr_end(addr, end); -		alloc_init_section(pud, addr, next, phys, type); +		alloc_init_pmd(pud, addr, next, phys, type);  		phys += next - addr;  	} while (pud++, addr = next, addr != end);  } @@ -757,21 +818,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)  {  	struct map_desc *md;  	struct vm_struct *vm; +	struct static_vm *svm;  	if (!nr)  		return; -	vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); +	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));  	for (md = io_desc; nr; md++, nr--) {  		create_mapping(md); + +		vm = &svm->vm;  		vm->addr = (void *)(md->virtual & PAGE_MASK);  		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));  		vm->phys_addr = __pfn_to_phys(md->pfn);  		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;  		vm->flags |= VM_ARM_MTYPE(md->type);  		vm->caller = iotable_init; -		vm_area_add_early(vm++); +		add_static_vm_early(svm++);  	}  } @@ -779,13 +843,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,  				  void *caller)  {  	struct vm_struct *vm; +	struct static_vm *svm; + +	svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); -	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); +	vm = &svm->vm;  	vm->addr = (void *)addr;  	vm->size = size;  	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;  	vm->caller = caller; -	vm_area_add_early(vm); +	add_static_vm_early(svm);  }  #ifndef CONFIG_ARM_LPAE @@ -810,14 +877,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)  static void __init fill_pmd_gaps(void)  { +	struct static_vm *svm;  	struct vm_struct *vm;  	unsigned long addr, next = 0;  	pmd_t *pmd; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) -			continue; +	list_for_each_entry(svm, &static_vmlist, list) { +		vm = &svm->vm;  		addr = (unsigned long)vm->addr;  		if (addr < next)  			continue; @@ -857,19 +923,12 @@ static void __init fill_pmd_gaps(void)  #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)  static void __init pci_reserve_io(void)  { -	struct vm_struct *vm; -	unsigned long addr; +	struct static_vm *svm; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & VM_ARM_STATIC_MAPPING)) -			continue; -		addr = (unsigned long)vm->addr; -		addr &= ~(SZ_2M - 1); -		if (addr == PCI_IO_VIRT_BASE) -			return; +	svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); +	if (svm) +		return; -	}  	vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);  }  #else @@ -1236,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)  	dma_contiguous_remap();  	devicemaps_init(mdesc);  	kmap_init(); +	tcm_init();  	top_pmd = pmd_off_k(0xffff0000);  |