diff options
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
| -rw-r--r-- | arch/x86/include/asm/tlbflush.h | 49 | 
1 files changed, 28 insertions, 21 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 36a1a2ab87d..74a44333545 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr)   *  - flush_tlb_page(vma, vmaddr) flushes one page   *  - flush_tlb_range(vma, start, end) flushes a range of pages   *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus + *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus   *   * ..but the i386 has somewhat limited tlb flushing capabilities,   * and page-granular flushes are available only on i486 and up. - * - * x86-64 can only flush individual pages or full VMs. For a range flush - * we always do the full VM. Might be worth trying if for a small - * range a few INVLPGs in a row are a win.   */  #ifndef CONFIG_SMP @@ -109,9 +105,17 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,  		__flush_tlb();  } +static inline void flush_tlb_mm_range(struct mm_struct *mm, +	   unsigned long start, unsigned long end, unsigned long vmflag) +{ +	if (mm == current->active_mm) +		__flush_tlb(); +} +  static inline void native_flush_tlb_others(const struct cpumask *cpumask,  					   struct mm_struct *mm, -					   unsigned long va) +					   unsigned long start, +					   unsigned long end)  {  } @@ -119,27 +123,35 @@ static inline void reset_lazy_tlbstate(void)  {  } +static inline void flush_tlb_kernel_range(unsigned long start, +					  unsigned long end) +{ +	flush_tlb_all(); +} +  #else  /* SMP */  #include <asm/smp.h>  #define local_flush_tlb() __flush_tlb() +#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) + +#define flush_tlb_range(vma, start, end)	\ +		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) +  extern void flush_tlb_all(void);  extern void flush_tlb_current_task(void); -extern void flush_tlb_mm(struct mm_struct *);  extern void flush_tlb_page(struct vm_area_struct *, unsigned long); +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, +				unsigned long end, unsigned long vmflag); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);  #define flush_tlb()	flush_tlb_current_task() -static inline void flush_tlb_range(struct vm_area_struct *vma, -				   unsigned long start, unsigned long end) -{ -	flush_tlb_mm(vma->vm_mm); -} -  void native_flush_tlb_others(const struct cpumask *cpumask, -			     struct mm_struct *mm, unsigned long va); +				struct mm_struct *mm, +				unsigned long start, unsigned long end);  #define TLBSTATE_OK	1  #define TLBSTATE_LAZY	2 @@ -159,13 +171,8 @@ static inline void reset_lazy_tlbstate(void)  #endif	/* SMP */  #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(mask, mm, va) +#define flush_tlb_others(mask, mm, start, end)	\ +	native_flush_tlb_others(mask, mm, start, end)  #endif -static inline void flush_tlb_kernel_range(unsigned long start, -					  unsigned long end) -{ -	flush_tlb_all(); -} -  #endif /* _ASM_X86_TLBFLUSH_H */  |