|
@@ -386,18 +386,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
int cpu = smp_processor_id();
|
|
|
struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
|
|
unsigned fsindex, gsindex;
|
|
|
- bool preload_fpu;
|
|
|
-
|
|
|
- /*
|
|
|
- * If the task has used fpu the last 5 timeslices, just do a full
|
|
|
- * restore of the math state immediately to avoid the trap; the
|
|
|
- * chances of needing FPU soon are obviously high now
|
|
|
- */
|
|
|
- preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
|
|
|
-
|
|
|
- /* we're going to use this soon, after a few expensive things */
|
|
|
- if (preload_fpu)
|
|
|
- prefetch(next->fpu.state);
|
|
|
|
|
|
/*
|
|
|
* Reload esp0, LDT and the page table pointer:
|
|
@@ -430,10 +418,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
/* Must be after DS reload */
|
|
|
__unlazy_fpu(prev_p);
|
|
|
|
|
|
- /* Make sure cpu is ready for new context */
|
|
|
- if (preload_fpu)
|
|
|
- clts();
|
|
|
-
|
|
|
/*
|
|
|
* Leave lazy mode, flushing any hypercalls made here.
|
|
|
* This must be done before restoring TLS segments so
|
|
@@ -492,13 +476,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
|
|
|
__switch_to_xtra(prev_p, next_p, tss);
|
|
|
|
|
|
- /*
|
|
|
- * Preload the FPU context, now that we've determined that the
|
|
|
- * task is likely to be using it.
|
|
|
- */
|
|
|
- if (preload_fpu)
|
|
|
- __math_state_restore();
|
|
|
-
|
|
|
return prev_p;
|
|
|
}
|
|
|
|