diff options
Diffstat (limited to 'include/linux/mmu_notifier.h')
| -rw-r--r-- | include/linux/mmu_notifier.h | 60 | 
1 files changed, 12 insertions, 48 deletions
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 1d1b1e13f79..bc823c4c028 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -4,6 +4,7 @@  #include <linux/list.h>  #include <linux/spinlock.h>  #include <linux/mm_types.h> +#include <linux/srcu.h>  struct mmu_notifier;  struct mmu_notifier_ops; @@ -245,50 +246,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)  		__mmu_notifier_mm_destroy(mm);  } -/* - * These two macros will sometime replace ptep_clear_flush. - * ptep_clear_flush is implemented as macro itself, so this also is - * implemented as a macro until ptep_clear_flush will converted to an - * inline function, to diminish the risk of compilation failure. The - * invalidate_page method over time can be moved outside the PT lock - * and these two macros can be later removed. - */ -#define ptep_clear_flush_notify(__vma, __address, __ptep)		\ -({									\ -	pte_t __pte;							\ -	struct vm_area_struct *___vma = __vma;				\ -	unsigned long ___address = __address;				\ -	__pte = ptep_clear_flush(___vma, ___address, __ptep);		\ -	mmu_notifier_invalidate_page(___vma->vm_mm, ___address);	\ -	__pte;								\ -}) - -#define pmdp_clear_flush_notify(__vma, __address, __pmdp)		\ -({									\ -	pmd_t __pmd;							\ -	struct vm_area_struct *___vma = __vma;				\ -	unsigned long ___address = __address;				\ -	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\ -	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\ -					    (__address)+HPAGE_PMD_SIZE);\ -	__pmd = pmdp_clear_flush(___vma, ___address, __pmdp);		\ -	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\ -					  (__address)+HPAGE_PMD_SIZE);	\ -	__pmd;								\ -}) - -#define pmdp_splitting_flush_notify(__vma, __address, __pmdp)		\ -({									\ -	struct vm_area_struct *___vma = __vma;				\ -	unsigned long ___address = __address;				\ -	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\ -	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\ -					    (__address)+HPAGE_PMD_SIZE);\ -	pmdp_splitting_flush(___vma, ___address, __pmdp);		\ -	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\ -					  (__address)+HPAGE_PMD_SIZE);	\ -}) -  #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\  ({									\  	int __young;							\ @@ -311,14 +268,24 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)  	__young;							\  }) +/* + * set_pte_at_notify() sets the pte _after_ running the notifier. + * This is safe to start by updating the secondary MMUs, because the primary MMU + * pte invalidate must have already happened with a ptep_clear_flush() before + * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is + * required when we change both the protection of the mapping from read-only to + * read-write and the pfn (like during copy on write page faults). Otherwise the + * old page would remain mapped readonly in the secondary MMUs after the new + * page is already writable by some CPU through the primary MMU. + */  #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\  ({									\  	struct mm_struct *___mm = __mm;					\  	unsigned long ___address = __address;				\  	pte_t ___pte = __pte;						\  									\ -	set_pte_at(___mm, ___address, __ptep, ___pte);			\  	mmu_notifier_change_pte(___mm, ___address, ___pte);		\ +	set_pte_at(___mm, ___address, __ptep, ___pte);			\  })  #else /* CONFIG_MMU_NOTIFIER */ @@ -369,9 +336,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)  #define ptep_clear_flush_young_notify ptep_clear_flush_young  #define pmdp_clear_flush_young_notify pmdp_clear_flush_young -#define ptep_clear_flush_notify ptep_clear_flush -#define pmdp_clear_flush_notify pmdp_clear_flush -#define pmdp_splitting_flush_notify pmdp_splitting_flush  #define set_pte_at_notify set_pte_at  #endif /* CONFIG_MMU_NOTIFIER */  |