diff options
Diffstat (limited to 'arch/s390/mm')
| -rw-r--r-- | arch/s390/mm/cmm.c | 8 | ||||
| -rw-r--r-- | arch/s390/mm/fault.c | 9 | ||||
| -rw-r--r-- | arch/s390/mm/hugetlbpage.c | 2 | ||||
| -rw-r--r-- | arch/s390/mm/init.c | 45 | ||||
| -rw-r--r-- | arch/s390/mm/pageattr.c | 24 | ||||
| -rw-r--r-- | arch/s390/mm/pgtable.c | 233 | ||||
| -rw-r--r-- | arch/s390/mm/vmem.c | 15 | 
7 files changed, 200 insertions, 136 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 479e9428291..9d84a1feefe 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -458,12 +458,10 @@ static int __init cmm_init(void)  	if (rc)  		goto out_pm;  	cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); -	rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; -	if (rc) -		goto out_kthread; -	return 0; +	if (!IS_ERR(cmm_thread_ptr)) +		return 0; -out_kthread: +	rc = PTR_ERR(cmm_thread_ptr);  	unregister_pm_notifier(&cmm_power_notifier);  out_pm:  	unregister_oom_notifier(&cmm_oom_nb); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2fb9e63b8fc..047c3e4c59a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)  	int fault;  	trans_exc_code = regs->int_parm_long; -	/* Protection exception is suppressing, decrement psw address. */ -	regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); +	/* +	 * Protection exceptions are suppressing, decrement psw address. +	 * The exception to this rule are aborted transactions, for these +	 * the PSW already points to the correct location. +	 */ +	if (!(regs->int_code & 0x200)) +		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);  	/*  	 * Check for low-address protection.  This needs to be treated  	 * as a special case because the translation exception code diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 532525ec88c..121089d5780 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page)  	if (!ptep)  		return -ENOMEM; -	pte = mk_pte(page, PAGE_RW); +	pte_val(pte) = addr;  	for (i = 0; i < PTRS_PER_PTE; i++) {  		set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);  		pte_val(pte) += PAGE_SIZE; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 49ce6bb2c64..0b09b234230 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));  unsigned long empty_zero_page, zero_page_mask;  EXPORT_SYMBOL(empty_zero_page); -static unsigned long __init setup_zero_pages(void) +static void __init setup_zero_pages(void)  {  	struct cpuid cpu_id;  	unsigned int order; -	unsigned long size;  	struct page *page;  	int i; @@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void)  		break;  	case 0x2097:	/* z10 */  	case 0x2098:	/* z10 */ -	default: +	case 0x2817:	/* z196 */ +	case 0x2818:	/* z196 */  		order = 2;  		break; +	case 0x2827:	/* zEC12 */ +	default: +		order = 5; +		break;  	} +	/* Limit number of empty zero pages for small memory sizes */ +	if (order > 2 && totalram_pages <= 16384) +		order = 2;  	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);  	if (!empty_zero_page) @@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void)  	page = virt_to_page((void *) empty_zero_page);  	split_page(page, order);  	for (i = 1 << order; i > 0; i--) { -		SetPageReserved(page); +		mark_page_reserved(page);  		page++;  	} -	size = PAGE_SIZE << order; -	zero_page_mask = (size - 1) & PAGE_MASK; - -	return 1UL << order; +	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;  }  /* @@ -139,7 +143,7 @@ void __init mem_init(void)  	/* this will put all low memory onto the freelists */  	totalram_pages += free_all_bootmem(); -	totalram_pages -= setup_zero_pages();	/* Setup zeroed pages. */ +	setup_zero_pages();	/* Setup zeroed pages. */  	reservedpages = 0; @@ -158,34 +162,15 @@ void __init mem_init(void)  	       PFN_ALIGN((unsigned long)&_eshared) - 1);  } -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ -	unsigned long addr = begin; - -	if (begin >= end) -		return; -	for (; addr < end; addr += PAGE_SIZE) { -		ClearPageReserved(virt_to_page(addr)); -		init_page_count(virt_to_page(addr)); -		memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, -		       PAGE_SIZE); -		free_page(addr); -		totalram_pages++; -	} -	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); -} -  void free_initmem(void)  { -	free_init_pages("unused kernel memory", -			(unsigned long)&__init_begin, -			(unsigned long)&__init_end); +	free_initmem_default(0);  }  #ifdef CONFIG_BLK_DEV_INITRD  void __init free_initrd_mem(unsigned long start, unsigned long end)  { -	free_init_pages("initrd memory", start, end); +	free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");  }  #endif diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index d21040ed5e5..80adfbf7506 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -9,31 +9,25 @@  #include <asm/pgtable.h>  #include <asm/page.h> +static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) +{ +	asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" +		     : [addr] "+a" (addr) : [skey] "d" (skey)); +	return addr; +} +  void storage_key_init_range(unsigned long start, unsigned long end)  { -	unsigned long boundary, function, size; +	unsigned long boundary, size;  	while (start < end) { -		if (MACHINE_HAS_EDAT2) { -			/* set storage keys for a 2GB frame */ -			function = 0x22000 | PAGE_DEFAULT_KEY; -			size = 1UL << 31; -			boundary = (start + size) & ~(size - 1); -			if (boundary <= end) { -				do { -					start = pfmf(function, start); -				} while (start < boundary); -				continue; -			} -		}  		if (MACHINE_HAS_EDAT1) {  			/* set storage keys for a 1MB frame */ -			function = 0x21000 | PAGE_DEFAULT_KEY;  			size = 1UL << 20;  			boundary = (start + size) & ~(size - 1);  			if (boundary <= end) {  				do { -					start = pfmf(function, start); +					start = sske_frame(start, PAGE_DEFAULT_KEY);  				} while (start < boundary);  				continue;  			} diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index ae44d2a3431..bd954e96f51 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -379,75 +379,183 @@ out_unmap:  }  EXPORT_SYMBOL_GPL(gmap_map_segment); -/* - * this function is assumed to be called with mmap_sem held - */ -unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)  { -	unsigned long *table, vmaddr, segment; -	struct mm_struct *mm; -	struct gmap_pgtable *mp; -	struct gmap_rmap *rmap; -	struct vm_area_struct *vma; -	struct page *page; -	pgd_t *pgd; -	pud_t *pud; -	pmd_t *pmd; +	unsigned long *table; -	current->thread.gmap_addr = address; -	mm = gmap->mm; -	/* Walk the gmap address space page table */  	table = gmap->table + ((address >> 53) & 0x7ff);  	if (unlikely(*table & _REGION_ENTRY_INV)) -		return -EFAULT; +		return ERR_PTR(-EFAULT);  	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);  	table = table + ((address >> 42) & 0x7ff);  	if (unlikely(*table & _REGION_ENTRY_INV)) -		return -EFAULT; +		return ERR_PTR(-EFAULT);  	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);  	table = table + ((address >> 31) & 0x7ff);  	if (unlikely(*table & _REGION_ENTRY_INV)) -		return -EFAULT; +		return ERR_PTR(-EFAULT);  	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);  	table = table + ((address >> 20) & 0x7ff); +	return table; +} + +/** + * __gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + * The mmap_sem of the mm that belongs to the address space must be held + * when this function gets called. + */ +unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) +{ +	unsigned long *segment_ptr, vmaddr, segment; +	struct gmap_pgtable *mp; +	struct page *page; +	current->thread.gmap_addr = address; +	segment_ptr = gmap_table_walk(address, gmap); +	if (IS_ERR(segment_ptr)) +		return PTR_ERR(segment_ptr);  	/* Convert the gmap address to an mm address. */ -	segment = *table; -	if (likely(!(segment & _SEGMENT_ENTRY_INV))) { +	segment = *segment_ptr; +	if (!(segment & _SEGMENT_ENTRY_INV)) {  		page = pfn_to_page(segment >> PAGE_SHIFT);  		mp = (struct gmap_pgtable *) page->index;  		return mp->vmaddr | (address & ~PMD_MASK);  	} else if (segment & _SEGMENT_ENTRY_RO) {  		vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; -		vma = find_vma(mm, vmaddr); -		if (!vma || vma->vm_start > vmaddr) -			return -EFAULT; +		return vmaddr | (address & ~PMD_MASK); +	} +	return -EFAULT; +} +EXPORT_SYMBOL_GPL(__gmap_translate); -		/* Walk the parent mm page table */ -		pgd = pgd_offset(mm, vmaddr); -		pud = pud_alloc(mm, pgd, vmaddr); -		if (!pud) -			return -ENOMEM; -		pmd = pmd_alloc(mm, pud, vmaddr); -		if (!pmd) -			return -ENOMEM; -		if (!pmd_present(*pmd) && -		    __pte_alloc(mm, vma, pmd, vmaddr)) -			return -ENOMEM; -		/* pmd now points to a valid segment table entry. */ -		rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); -		if (!rmap) -			return -ENOMEM; -		/* Link gmap segment table entry location to page table. */ -		page = pmd_page(*pmd); -		mp = (struct gmap_pgtable *) page->index; -		rmap->entry = table; -		spin_lock(&mm->page_table_lock); +/** + * gmap_translate - translate a guest address to a user space address + * @address: guest address + * @gmap: pointer to guest mapping meta data structure + * + * Returns user space address which corresponds to the guest address or + * -EFAULT if no such mapping exists. + * This function does not establish potentially missing page table entries. + */ +unsigned long gmap_translate(unsigned long address, struct gmap *gmap) +{ +	unsigned long rc; + +	down_read(&gmap->mm->mmap_sem); +	rc = __gmap_translate(address, gmap); +	up_read(&gmap->mm->mmap_sem); +	return rc; +} +EXPORT_SYMBOL_GPL(gmap_translate); + +static int gmap_connect_pgtable(unsigned long segment, +				unsigned long *segment_ptr, +				struct gmap *gmap) +{ +	unsigned long vmaddr; +	struct vm_area_struct *vma; +	struct gmap_pgtable *mp; +	struct gmap_rmap *rmap; +	struct mm_struct *mm; +	struct page *page; +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; + +	mm = gmap->mm; +	vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; +	vma = find_vma(mm, vmaddr); +	if (!vma || vma->vm_start > vmaddr) +		return -EFAULT; +	/* Walk the parent mm page table */ +	pgd = pgd_offset(mm, vmaddr); +	pud = pud_alloc(mm, pgd, vmaddr); +	if (!pud) +		return -ENOMEM; +	pmd = pmd_alloc(mm, pud, vmaddr); +	if (!pmd) +		return -ENOMEM; +	if (!pmd_present(*pmd) && +	    __pte_alloc(mm, vma, pmd, vmaddr)) +		return -ENOMEM; +	/* pmd now points to a valid segment table entry. */ +	rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); +	if (!rmap) +		return -ENOMEM; +	/* Link gmap segment table entry location to page table. */ +	page = pmd_page(*pmd); +	mp = (struct gmap_pgtable *) page->index; +	rmap->entry = segment_ptr; +	spin_lock(&mm->page_table_lock); +	if (*segment_ptr == segment) {  		list_add(&rmap->list, &mp->mapper); -		spin_unlock(&mm->page_table_lock);  		/* Set gmap segment table entry to page table. */ -		*table = pmd_val(*pmd) & PAGE_MASK; -		return vmaddr | (address & ~PMD_MASK); +		*segment_ptr = pmd_val(*pmd) & PAGE_MASK; +		rmap = NULL; +	} +	spin_unlock(&mm->page_table_lock); +	kfree(rmap); +	return 0; +} + +static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) +{ +	struct gmap_rmap *rmap, *next; +	struct gmap_pgtable *mp; +	struct page *page; +	int flush; + +	flush = 0; +	spin_lock(&mm->page_table_lock); +	page = pfn_to_page(__pa(table) >> PAGE_SHIFT); +	mp = (struct gmap_pgtable *) page->index; +	list_for_each_entry_safe(rmap, next, &mp->mapper, list) { +		*rmap->entry = +			_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; +		list_del(&rmap->list); +		kfree(rmap); +		flush = 1; +	} +	spin_unlock(&mm->page_table_lock); +	if (flush) +		__tlb_flush_global(); +} + +/* + * this function is assumed to be called with mmap_sem held + */ +unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) +{ +	unsigned long *segment_ptr, segment; +	struct gmap_pgtable *mp; +	struct page *page; +	int rc; + +	current->thread.gmap_addr = address; +	segment_ptr = gmap_table_walk(address, gmap); +	if (IS_ERR(segment_ptr)) +		return -EFAULT; +	/* Convert the gmap address to an mm address. */ +	while (1) { +		segment = *segment_ptr; +		if (!(segment & _SEGMENT_ENTRY_INV)) { +			/* Page table is present */ +			page = pfn_to_page(segment >> PAGE_SHIFT); +			mp = (struct gmap_pgtable *) page->index; +			return mp->vmaddr | (address & ~PMD_MASK); +		} +		if (!(segment & _SEGMENT_ENTRY_RO)) +			/* Nothing mapped in the gmap address space. */ +			break; +		rc = gmap_connect_pgtable(segment, segment_ptr, gmap); +		if (rc) +			return rc;  	}  	return -EFAULT;  } @@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)  }  EXPORT_SYMBOL_GPL(gmap_discard); -void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) -{ -	struct gmap_rmap *rmap, *next; -	struct gmap_pgtable *mp; -	struct page *page; -	int flush; - -	flush = 0; -	spin_lock(&mm->page_table_lock); -	page = pfn_to_page(__pa(table) >> PAGE_SHIFT); -	mp = (struct gmap_pgtable *) page->index; -	list_for_each_entry_safe(rmap, next, &mp->mapper, list) { -		*rmap->entry = -			_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; -		list_del(&rmap->list); -		kfree(rmap); -		flush = 1; -	} -	spin_unlock(&mm->page_table_lock); -	if (flush) -		__tlb_flush_global(); -} -  static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,  						    unsigned long vmaddr)  { @@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)  {  } -static inline void gmap_unmap_notifier(struct mm_struct *mm, -					  unsigned long *table) +static inline void gmap_disconnect_pgtable(struct mm_struct *mm, +					   unsigned long *table)  {  } @@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)  	unsigned int bit, mask;  	if (mm_has_pgste(mm)) { -		gmap_unmap_notifier(mm, table); +		gmap_disconnect_pgtable(mm, table);  		return page_table_free_pgste(table);  	}  	/* Free 1K/2K page table fragment of a 4K page */ @@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)  	mm = tlb->mm;  	if (mm_has_pgste(mm)) { -		gmap_unmap_notifier(mm, table); +		gmap_disconnect_pgtable(mm, table);  		table = (unsigned long *) (__pa(table) | FRAG_MASK);  		tlb_remove_table(tlb, table);  		return; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ffab84db690..35837054f73 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)  /*   * Add a backed mem_map array to the virtual mem_map array.   */ -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)  { -	unsigned long address, start_addr, end_addr; +	unsigned long address = start;  	pgd_t *pg_dir;  	pud_t *pu_dir;  	pmd_t *pm_dir;  	pte_t *pt_dir;  	int ret = -ENOMEM; -	start_addr = (unsigned long) start; -	end_addr = (unsigned long) (start + nr); - -	for (address = start_addr; address < end_addr;) { +	for (address = start; address < end;) {  		pg_dir = pgd_offset_k(address);  		if (pgd_none(*pg_dir)) {  			pu_dir = vmem_pud_alloc(); @@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)  		}  		address += PAGE_SIZE;  	} -	memset(start, 0, nr * sizeof(struct page)); +	memset((void *)start, 0, end - start);  	ret = 0;  out: -	flush_tlb_kernel_range(start_addr, end_addr); +	flush_tlb_kernel_range(start, end);  	return ret;  } -void vmemmap_free(struct page *memmap, unsigned long nr_pages) +void vmemmap_free(unsigned long start, unsigned long end)  {  }  |