diff options
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 22 | ||||
| -rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 11 | ||||
| -rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/mm/slb_low.S | 50 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 2 | 
5 files changed, 45 insertions, 42 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e1271719..f410c3e12c1 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,  		unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);  		unsigned long tprot = prot; +		/* +		 * If we hit a bad address return error. +		 */ +		if (!vsid) +			return -1;  		/* Make kernel text executable */  		if (overlaps_kernel_text(vaddr, vaddr + step))  			tprot &= ~HPTE_R_N; @@ -759,6 +764,8 @@ void __init early_init_mmu(void)  	/* Initialize stab / SLB management */  	if (mmu_has_feature(MMU_FTR_SLB))  		slb_initialize(); +	else +		stab_initialize(get_paca()->stab_real);  }  #ifdef CONFIG_SMP @@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",  		ea, access, trap); -	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { -		DBG_LOW(" out of pgtable range !\n"); - 		return 1; -	} -  	/* Get region & vsid */   	switch (REGION_ID(ea)) {  	case USER_REGION_ID: @@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)  	}  	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); +	/* Bad address. */ +	if (!vsid) { +		DBG_LOW("Bad address!\n"); +		return 1; +	}  	/* Get pgdir */  	pgdir = mm->pgd;  	if (pgdir == NULL) @@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,  	/* Get VSID */  	ssize = user_segment_size(ea);  	vsid = get_vsid(mm->context.id, ea, ssize); +	if (!vsid) +		return;  	/* Hash doesn't like irqs */  	local_irq_save(flags); @@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)  	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);  	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); +	/* Don't create HPTE entries for bad address */ +	if (!vsid) +		return;  	ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),  				 mode, HPTE_V_BOLTED,  				 mmu_linear_psize, mmu_kernel_ssize); diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0ace5..d1d1b92c5b9 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -29,15 +29,6 @@  static DEFINE_SPINLOCK(mmu_context_lock);  static DEFINE_IDA(mmu_context_ida); -/* - * 256MB segment - * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments - * available for user mappings. Each segment contains 2^28 bytes. Each - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts - * (19 == 37 + 28 - 46). - */ -#define MAX_CONTEXT	((1UL << CONTEXT_BITS) - 1) -  int __init_new_context(void)  {  	int index; @@ -56,7 +47,7 @@ again:  	else if (err)  		return err; -	if (index > MAX_CONTEXT) { +	if (index > MAX_USER_CONTEXT) {  		spin_lock(&mmu_context_lock);  		ida_remove(&mmu_context_ida, index);  		spin_unlock(&mmu_context_lock); diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a271c7a..654258f165a 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -61,7 +61,7 @@  #endif  #ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))  #error TASK_SIZE_USER64 exceeds user VSID range  #endif  #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca22775..17aa6dfceb3 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -31,10 +31,15 @@   * No other registers are examined or changed.   */  _GLOBAL(slb_allocate_realmode) -	/* r3 = faulting address */ +	/* +	 * check for bad kernel/user address +	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE +	 */ +	rldicr. r9,r3,4,(63 - 46 - 4) +	bne-	8f  	srdi	r9,r3,60		/* get region */ -	srdi	r10,r3,28		/* get esid */ +	srdi	r10,r3,SID_SHIFT	/* get esid */  	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */  	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ @@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)  	 */  _GLOBAL(slb_miss_kernel_load_linear)  	li	r11,0 -	li	r9,0x1  	/* -	 * for 1T we shift 12 bits more.  slb_finish_load_1T will do -	 * the necessary adjustment +	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 +	 * r9 = region id.  	 */ -	rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 +	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha +	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l + +  BEGIN_FTR_SECTION  	b	slb_finish_load  END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) @@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)  	_GLOBAL(slb_miss_kernel_load_io)  	li	r11,0  6: -	li	r9,0x1  	/* -	 * for 1T we shift 12 bits more.  slb_finish_load_1T will do -	 * the necessary adjustment +	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 +	 * r9 = region id.  	 */ -	rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 +	addis	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha +	addi	r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l +  BEGIN_FTR_SECTION  	b	slb_finish_load  END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)  	b	slb_finish_load_1T -0:	/* user address: proto-VSID = context << 15 | ESID. First check -	 * if the address is within the boundaries of the user region -	 */ -	srdi.	r9,r10,USER_ESID_BITS -	bne-	8f			/* invalid ea bits set */ - - +0:  	/* when using slices, we extract the psize off the slice bitmaps  	 * and then we need to get the sllp encoding off the mmu_psize_defs  	 * array. @@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)  	ld	r9,PACACONTEXTID(r13)  BEGIN_FTR_SECTION  	cmpldi	r10,0x1000 -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) -	rldimi	r10,r9,USER_ESID_BITS,0 -BEGIN_FTR_SECTION  	bge	slb_finish_load_1T  END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)  	b	slb_finish_load  8:	/* invalid EA */  	li	r10,0			/* BAD_VSID */ +	li	r9,0			/* BAD_VSID */  	li	r11,SLB_VSID_USER	/* flags don't much matter */  	b	slb_finish_load @@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)  	/* get context to calculate proto-VSID */  	ld	r9,PACACONTEXTID(r13) -	rldimi	r10,r9,USER_ESID_BITS,0 -  	/* fall through slb_finish_load */  #endif /* __DISABLED__ */ @@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)  /*   * Finish loading of an SLB entry and return   * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET + * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET   */  slb_finish_load: +	rldimi  r10,r9,ESID_BITS,0  	ASM_VSID_SCRAMBLE(r10,r9,256M)  	/*  	 * bits above VSID_BITS_256M need to be ignored from r10 @@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)  /*   * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.   * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 + * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9   */  slb_finish_load_1T: -	srdi	r10,r10,40-28		/* get 1T ESID */ +	srdi	r10,r10,(SID_SHIFT_1T - SID_SHIFT)	/* get 1T ESID */ +	rldimi  r10,r9,ESID_BITS_1T,0  	ASM_VSID_SCRAMBLE(r10,r9,1T)  	/*  	 * bits above VSID_BITS_1T need to be ignored from r10 diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef50dc3..023ec8a13f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,  	if (!is_kernel_addr(addr)) {  		ssize = user_segment_size(addr);  		vsid = get_vsid(mm->context.id, addr, ssize); -		WARN_ON(vsid == 0);  	} else {  		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);  		ssize = mmu_kernel_ssize;  	} +	WARN_ON(vsid == 0);  	vpn = hpt_vpn(addr, vsid, ssize);  	rpte = __real_pte(__pte(pte), ptep);  |