diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
| -rw-r--r-- | arch/arm/mm/mmu.c | 58 | 
1 files changed, 39 insertions, 19 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c9..e95a996ab78 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;  static unsigned int ecc_mask __initdata = 0;  pgprot_t pgprot_user;  pgprot_t pgprot_kernel; +pgprot_t pgprot_hyp_device; +pgprot_t pgprot_s2; +pgprot_t pgprot_s2_device;  EXPORT_SYMBOL(pgprot_user);  EXPORT_SYMBOL(pgprot_kernel); @@ -66,34 +69,46 @@ struct cachepolicy {  	unsigned int	cr_mask;  	pmdval_t	pmd;  	pteval_t	pte; +	pteval_t	pte_s2;  }; +#ifdef CONFIG_ARM_LPAE +#define s2_policy(policy)	policy +#else +#define s2_policy(policy)	0 +#endif +  static struct cachepolicy cache_policies[] __initdata = {  	{  		.policy		= "uncached",  		.cr_mask	= CR_W|CR_C,  		.pmd		= PMD_SECT_UNCACHED,  		.pte		= L_PTE_MT_UNCACHED, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "buffered",  		.cr_mask	= CR_C,  		.pmd		= PMD_SECT_BUFFERED,  		.pte		= L_PTE_MT_BUFFERABLE, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "writethrough",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WT,  		.pte		= L_PTE_MT_WRITETHROUGH, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITETHROUGH),  	}, {  		.policy		= "writeback",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WB,  		.pte		= L_PTE_MT_WRITEBACK, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}, {  		.policy		= "writealloc",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WBWA,  		.pte		= L_PTE_MT_WRITEALLOC, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}  }; @@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)  	struct cachepolicy *cp;  	unsigned int cr = get_cr();  	pteval_t user_pgprot, kern_pgprot, vecs_pgprot; +	pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;  	int cpu_arch = cpu_architecture();  	int i; @@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)  	 */  	cp = &cache_policies[cachepolicy];  	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; +	s2_pgprot = cp->pte_s2; +	hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;  	/*  	 * ARMv6 and above have extended page tables. @@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)  			user_pgprot |= L_PTE_SHARED;  			kern_pgprot |= L_PTE_SHARED;  			vecs_pgprot |= L_PTE_SHARED; +			s2_pgprot |= L_PTE_SHARED;  			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;  			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;  			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)  	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);  	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |  				 L_PTE_DIRTY | kern_pgprot); +	pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); +	pgprot_s2_device  = __pgprot(s2_device_pgprot); +	pgprot_hyp_device  = __pgprot(hyp_device_pgprot);  	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;  	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; @@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)  {  	struct map_desc *md;  	struct vm_struct *vm; +	struct static_vm *svm;  	if (!nr)  		return; -	vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); +	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));  	for (md = io_desc; nr; md++, nr--) {  		create_mapping(md); + +		vm = &svm->vm;  		vm->addr = (void *)(md->virtual & PAGE_MASK);  		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));  		vm->phys_addr = __pfn_to_phys(md->pfn);  		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;  		vm->flags |= VM_ARM_MTYPE(md->type);  		vm->caller = iotable_init; -		vm_area_add_early(vm++); +		add_static_vm_early(svm++);  	}  } @@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,  				  void *caller)  {  	struct vm_struct *vm; +	struct static_vm *svm; + +	svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); -	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); +	vm = &svm->vm;  	vm->addr = (void *)addr;  	vm->size = size;  	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;  	vm->caller = caller; -	vm_area_add_early(vm); +	add_static_vm_early(svm);  }  #ifndef CONFIG_ARM_LPAE @@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)  static void __init fill_pmd_gaps(void)  { +	struct static_vm *svm;  	struct vm_struct *vm;  	unsigned long addr, next = 0;  	pmd_t *pmd; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) -			continue; +	list_for_each_entry(svm, &static_vmlist, list) { +		vm = &svm->vm;  		addr = (unsigned long)vm->addr;  		if (addr < next)  			continue; @@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void)  #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)  static void __init pci_reserve_io(void)  { -	struct vm_struct *vm; -	unsigned long addr; +	struct static_vm *svm; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & VM_ARM_STATIC_MAPPING)) -			continue; -		addr = (unsigned long)vm->addr; -		addr &= ~(SZ_2M - 1); -		if (addr == PCI_IO_VIRT_BASE) -			return; +	svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); +	if (svm) +		return; -	}  	vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);  }  #else  |