diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:23:15 +0900 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:23:15 +0900 | 
| commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
| tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /arch/s390/include/asm/pgtable.h | |
| parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
| parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) | |
| download | olio-linux-3.10-9e2d8656f5e8aa214e66b462680cf86b210b74a8.tar.xz olio-linux-3.10-9e2d8656f5e8aa214e66b462680cf86b210b74a8.zip  | |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
 "A few misc things and very nearly all of the MM tree.  A tremendous
  amount of stuff (again), including a significant rbtree library
  rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
  sparc64: Support transparent huge pages.
  mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
  mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
  sparc64: Document PGD and PMD layout.
  sparc64: Eliminate PTE table memory wastage.
  sparc64: Halve the size of PTE tables
  sparc64: Only support 4MB huge pages and 8KB base pages.
  memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
  mm: memcg: clean up mm_match_cgroup() signature
  mm: document PageHuge somewhat
  mm: use %pK for /proc/vmallocinfo
  mm, thp: fix mlock statistics
  mm, thp: fix mapped pages avoiding unevictable list on mlock
  memory-hotplug: update memory block's state and notify userspace
  memory-hotplug: preparation to notify memory block's state at memory hot remove
  mm: avoid section mismatch warning for memblock_type_name
  make GFP_NOTRACK definition unconditional
  cma: decrease cc.nr_migratepages after reclaiming pagelist
  CMA: migrate mlocked pages
  kpageflags: fix wrong KPF_THP on non-huge compound pages
  ...
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
| -rw-r--r-- | arch/s390/include/asm/pgtable.h | 210 | 
1 files changed, 210 insertions, 0 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 6bd7d748301..979fe3dc078 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -42,6 +42,7 @@ extern void fault_init(void);   * tables contain all the necessary information.   */  #define update_mmu_cache(vma, address, ptep)     do { } while (0) +#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)  /*   * ZERO_PAGE is a global shared page that is always zero; used @@ -347,6 +348,12 @@ extern struct page *vmemmap;  #define _SEGMENT_ENTRY_LARGE	0x400	/* STE-format control, large page   */  #define _SEGMENT_ENTRY_CO	0x100	/* change-recording override   */ +#define _SEGMENT_ENTRY_SPLIT_BIT 0	/* THP splitting bit number */ +#define _SEGMENT_ENTRY_SPLIT	(1UL << _SEGMENT_ENTRY_SPLIT_BIT) + +/* Set of bits not changed in pmd_modify */ +#define _SEGMENT_CHG_MASK	(_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ +				 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)  /* Page status table bits for virtualization */  #define RCP_ACC_BITS	0xf000000000000000UL @@ -506,6 +513,30 @@ static inline int pmd_bad(pmd_t pmd)  	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;  } +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, +				 unsigned long addr, pmd_t *pmdp); + +#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, +				 unsigned long address, pmd_t *pmdp, +				 pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, +				  unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMD_WRITE +static inline int pmd_write(pmd_t pmd) +{ +	return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; +} + +static inline int pmd_young(pmd_t pmd) +{ +	return 0; +} +  static inline int pte_none(pte_t pte)  {  	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); @@ -1159,6 +1190,185 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)  #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)  #define pte_unmap(pte) do { } while (0) +static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) +{ +	unsigned long sto = (unsigned long) pmdp - +			    pmd_index(address) * sizeof(pmd_t); + +	if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { +		asm volatile( +			"	.insn	rrf,0xb98e0000,%2,%3,0,0" +			: "=m" (*pmdp) +			: "m" (*pmdp), "a" (sto), +			  "a" ((address & HPAGE_MASK)) +			: "cc" +		); +	} +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); + +#define __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); + +static inline int pmd_trans_splitting(pmd_t pmd) +{ +	return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, +			      pmd_t *pmdp, pmd_t entry) +{ +	*pmdp = entry; +} + +static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) +{ +	unsigned long pgprot_pmd = 0; + +	if (pgprot_val(pgprot) & _PAGE_INVALID) { +		if (pgprot_val(pgprot) & _PAGE_SWT) +			pgprot_pmd |= _HPAGE_TYPE_NONE; +		pgprot_pmd |= _SEGMENT_ENTRY_INV; +	} +	if (pgprot_val(pgprot) & _PAGE_RO) +		pgprot_pmd |= _SEGMENT_ENTRY_RO; +	return pgprot_pmd; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ +	pmd_val(pmd) &= _SEGMENT_CHG_MASK; +	pmd_val(pmd) |= massage_pgprot_pmd(newprot); +	return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ +	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; +	return pmd; +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ +	pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; +	return pmd; +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ +	pmd_val(pmd) |= _SEGMENT_ENTRY_RO; +	return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ +	/* No dirty bit in the segment table entry. */ +	return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ +	/* No referenced bit in the segment table entry. */ +	return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ +	/* No referenced bit in the segment table entry. */ +	return pmd; +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, +					    unsigned long address, pmd_t *pmdp) +{ +	unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; +	long tmp, rc; +	int counter; + +	rc = 0; +	if (MACHINE_HAS_RRBM) { +		counter = PTRS_PER_PTE >> 6; +		asm volatile( +			"0:	.insn	rre,0xb9ae0000,%0,%3\n"	/* rrbm */ +			"	ogr	%1,%0\n" +			"	la	%3,0(%4,%3)\n" +			"	brct	%2,0b\n" +			: "=&d" (tmp), "+&d" (rc), "+d" (counter), +			  "+a" (pmd_addr) +			: "a" (64 * 4096UL) : "cc"); +		rc = !!rc; +	} else { +		counter = PTRS_PER_PTE; +		asm volatile( +			"0:	rrbe	0,%2\n" +			"	la	%2,0(%3,%2)\n" +			"	brc	12,1f\n" +			"	lhi	%0,1\n" +			"1:	brct	%1,0b\n" +			: "+d" (rc), "+d" (counter), "+a" (pmd_addr) +			: "a" (4096UL) : "cc"); +	} +	return rc; +} + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, +				       unsigned long address, pmd_t *pmdp) +{ +	pmd_t pmd = *pmdp; + +	__pmd_idte(address, pmdp); +	pmd_clear(pmdp); +	return pmd; +} + +#define __HAVE_ARCH_PMDP_CLEAR_FLUSH +static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, +				     unsigned long address, pmd_t *pmdp) +{ +	return pmdp_get_and_clear(vma->vm_mm, address, pmdp); +} + +#define __HAVE_ARCH_PMDP_INVALIDATE +static inline void pmdp_invalidate(struct vm_area_struct *vma, +				   unsigned long address, pmd_t *pmdp) +{ +	__pmd_idte(address, pmdp); +} + +static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) +{ +	pmd_t __pmd; +	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); +	return __pmd; +} + +#define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) +#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot)) + +static inline int pmd_trans_huge(pmd_t pmd) +{ +	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; +} + +static inline int has_transparent_hugepage(void) +{ +	return MACHINE_HAS_HPAGE ? 1 : 0; +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ +	if (pmd_trans_huge(pmd)) +		return pmd_val(pmd) >> HPAGE_SHIFT; +	else +		return pmd_val(pmd) >> PAGE_SHIFT; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +  /*   * 31 bit swap entry format:   * A page-table entry has some bits we have to treat in a special way.  |