diff options
| author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2010-05-28 09:29:21 +0900 | 
|---|---|---|
| committer | Andi Kleen <ak@linux.intel.com> | 2010-08-11 09:23:01 +0200 | 
| commit | fd6a03edd271cf2d69a61aa8df98dd05fa6b9afd (patch) | |
| tree | 32d3e89f1f66a3d9b68bccc88fec548acc361bf5 /mm/hugetlb.c | |
| parent | 93f70f900da36fbc19c13c2aa04b2e468c8d00fb (diff) | |
| download | olio-linux-3.10-fd6a03edd271cf2d69a61aa8df98dd05fa6b9afd.tar.xz olio-linux-3.10-fd6a03edd271cf2d69a61aa8df98dd05fa6b9afd.zip  | |
HWPOISON, hugetlb: detect hwpoison in hugetlb code
This patch enables to block access to hwpoisoned hugepage and
also enables to block unmapping for it.
Dependency:
  "HWPOISON, hugetlb: enable error handling path for hugepage"
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Fengguang Wu <fengguang.wu@intel.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 40 | 
1 files changed, 40 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8c163f64cf1..4c2efc0f391 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -19,6 +19,8 @@  #include <linux/sysfs.h>  #include <linux/slab.h>  #include <linux/rmap.h> +#include <linux/swap.h> +#include <linux/swapops.h>  #include <asm/page.h>  #include <asm/pgtable.h> @@ -2149,6 +2151,19 @@ nomem:  	return -ENOMEM;  } +static int is_hugetlb_entry_hwpoisoned(pte_t pte) +{ +	swp_entry_t swp; + +	if (huge_pte_none(pte) || pte_present(pte)) +		return 0; +	swp = pte_to_swp_entry(pte); +	if (non_swap_entry(swp) && is_hwpoison_entry(swp)) { +		return 1; +	} else +		return 0; +} +  void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,  			    unsigned long end, struct page *ref_page)  { @@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,  		if (huge_pte_none(pte))  			continue; +		/* +		 * HWPoisoned hugepage is already unmapped and dropped reference +		 */ +		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) +			continue; +  		page = pte_page(pte);  		if (pte_dirty(pte))  			set_page_dirty(page); @@ -2491,6 +2512,18 @@ retry:  	}  	/* +	 * Since memory error handler replaces pte into hwpoison swap entry +	 * at the time of error handling, a process which reserved but not have +	 * the mapping to the error hugepage does not have hwpoison swap entry. +	 * So we need to block accesses from such a process by checking +	 * PG_hwpoison bit here. +	 */ +	if (unlikely(PageHWPoison(page))) { +		ret = VM_FAULT_HWPOISON; +		goto backout_unlocked; +	} + +	/*  	 * If we are going to COW a private mapping later, we examine the  	 * pending reservations for this page now. This will ensure that  	 * any allocations necessary to record that reservation occur outside @@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  	static DEFINE_MUTEX(hugetlb_instantiation_mutex);  	struct hstate *h = hstate_vma(vma); +	ptep = huge_pte_offset(mm, address); +	if (ptep) { +		entry = huge_ptep_get(ptep); +		if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) +			return VM_FAULT_HWPOISON; +	} +  	ptep = huge_pte_alloc(mm, address, huge_page_size(h));  	if (!ptep)  		return VM_FAULT_OOM;  |