diff options
Diffstat (limited to 'arch/arm/mm/idmap.c')
| -rw-r--r-- | arch/arm/mm/idmap.c | 91 | 
1 files changed, 57 insertions, 34 deletions
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 296ad2eaddb..feacf4c7671 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -1,9 +1,38 @@  #include <linux/kernel.h>  #include <asm/cputype.h> +#include <asm/idmap.h>  #include <asm/pgalloc.h>  #include <asm/pgtable.h> +#include <asm/sections.h> +pgd_t *idmap_pgd; + +#ifdef CONFIG_ARM_LPAE +static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, +	unsigned long prot) +{ +	pmd_t *pmd; +	unsigned long next; + +	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { +		pmd = pmd_alloc_one(&init_mm, addr); +		if (!pmd) { +			pr_warning("Failed to allocate identity pmd.\n"); +			return; +		} +		pud_populate(&init_mm, pud, pmd); +		pmd += pmd_index(addr); +	} else +		pmd = pmd_offset(pud, addr); + +	do { +		next = pmd_addr_end(addr, end); +		*pmd = __pmd((addr & PMD_MASK) | prot); +		flush_pmd_entry(pmd); +	} while (pmd++, addr = next, addr != end); +} +#else	/* !CONFIG_ARM_LPAE */  static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,  	unsigned long prot)  { @@ -15,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,  	pmd[1] = __pmd(addr);  	flush_pmd_entry(pmd);  } +#endif	/* CONFIG_ARM_LPAE */  static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,  	unsigned long prot) @@ -28,11 +58,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,  	} while (pud++, addr = next, addr != end);  } -void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) +static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)  {  	unsigned long prot, next; -	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; +	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;  	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())  		prot |= PMD_BIT4; @@ -43,48 +73,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)  	} while (pgd++, addr = next, addr != end);  } -#ifdef CONFIG_SMP -static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) -{ -	pmd_t *pmd = pmd_offset(pud, addr); -	pmd_clear(pmd); -} +extern char  __idmap_text_start[], __idmap_text_end[]; -static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) +static int __init init_static_idmap(void)  { -	pud_t *pud = pud_offset(pgd, addr); -	unsigned long next; +	phys_addr_t idmap_start, idmap_end; -	do { -		next = pud_addr_end(addr, end); -		idmap_del_pmd(pud, addr, next); -	} while (pud++, addr = next, addr != end); -} +	idmap_pgd = pgd_alloc(&init_mm); +	if (!idmap_pgd) +		return -ENOMEM; -void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) -{ -	unsigned long next; +	/* Add an identity mapping for the physical address of the section. */ +	idmap_start = virt_to_phys((void *)__idmap_text_start); +	idmap_end = virt_to_phys((void *)__idmap_text_end); -	pgd += pgd_index(addr); -	do { -		next = pgd_addr_end(addr, end); -		idmap_del_pud(pgd, addr, next); -	} while (pgd++, addr = next, addr != end); +	pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", +		(long long)idmap_start, (long long)idmap_end); +	identity_mapping_add(idmap_pgd, idmap_start, idmap_end); + +	return 0;  } -#endif +early_initcall(init_static_idmap);  /* - * In order to soft-boot, we need to insert a 1:1 mapping in place of - * the user-mode pages.  This will then ensure that we have predictable - * results when turning the mmu off + * In order to soft-boot, we need to switch to a 1:1 mapping for the + * cpu_reset functions. This will then ensure that we have predictable + * results when turning off the mmu.   */  void setup_mm_for_reboot(void)  { -	/* -	 * We need to access to user-mode page tables here. For kernel threads -	 * we don't have any user-mode mappings so we use the context that we -	 * "borrowed". -	 */ -	identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); +	/* Clean and invalidate L1. */ +	flush_cache_all(); + +	/* Switch to the identity mapping. */ +	cpu_switch_mm(idmap_pgd, &init_mm); + +	/* Flush the TLB. */  	local_flush_tlb_all();  }  |