diff options
Diffstat (limited to 'arch/x86/kernel/process_64.c')
| -rw-r--r-- | arch/x86/kernel/process_64.c | 30 | 
1 files changed, 5 insertions, 25 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9b9fe4a85c8..cfa5c90c01d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,  	set_tsk_thread_flag(p, TIF_FORK); +	p->fpu_counter = 0;  	p->thread.io_bitmap_ptr = NULL;  	savesegment(gs, p->thread.gsindex); @@ -386,18 +387,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)  	int cpu = smp_processor_id();  	struct tss_struct *tss = &per_cpu(init_tss, cpu);  	unsigned fsindex, gsindex; -	bool preload_fpu; +	fpu_switch_t fpu; -	/* -	 * If the task has used fpu the last 5 timeslices, just do a full -	 * restore of the math state immediately to avoid the trap; the -	 * chances of needing FPU soon are obviously high now -	 */ -	preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; - -	/* we're going to use this soon, after a few expensive things */ -	if (preload_fpu) -		prefetch(next->fpu.state); +	fpu = switch_fpu_prepare(prev_p, next_p, cpu);  	/*  	 * Reload esp0, LDT and the page table pointer: @@ -427,13 +419,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)  	load_TLS(next, cpu); -	/* Must be after DS reload */ -	__unlazy_fpu(prev_p); - -	/* Make sure cpu is ready for new context */ -	if (preload_fpu) -		clts(); -  	/*  	 * Leave lazy mode, flushing any hypercalls made here.  	 * This must be done before restoring TLS segments so @@ -474,6 +459,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)  		wrmsrl(MSR_KERNEL_GS_BASE, next->gs);  	prev->gsindex = gsindex; +	switch_fpu_finish(next_p, fpu); +  	/*  	 * Switch the PDA and FPU contexts.  	 */ @@ -492,13 +479,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)  		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))  		__switch_to_xtra(prev_p, next_p, tss); -	/* -	 * Preload the FPU context, now that we've determined that the -	 * task is likely to be using it.  -	 */ -	if (preload_fpu) -		__math_state_restore(); -  	return prev_p;  }  |