|
@@ -300,13 +300,11 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
|
|
|
{
|
|
|
struct thread_struct *next_t = &next->thread;
|
|
|
|
|
|
-#if defined(CONFIG_SH_FPU)
|
|
|
unlazy_fpu(prev, task_pt_regs(prev));
|
|
|
|
|
|
/* we're going to use this soon, after a few expensive things */
|
|
|
if (next->fpu_counter > 5)
|
|
|
prefetch(&next_t->fpu.hard);
|
|
|
-#endif
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
/*
|
|
@@ -337,15 +335,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-#if defined(CONFIG_SH_FPU)
|
|
|
- /* If the task has used fpu the last 5 timeslices, just do a full
|
|
|
+ /*
|
|
|
+ * If the task has used fpu the last 5 timeslices, just do a full
|
|
|
* restore of the math state immediately to avoid the trap; the
|
|
|
* chances of needing FPU soon are obviously high now
|
|
|
*/
|
|
|
- if (next->fpu_counter > 5) {
|
|
|
+ if (next->fpu_counter > 5)
|
|
|
fpu_state_restore(task_pt_regs(next));
|
|
|
- }
|
|
|
-#endif
|
|
|
|
|
|
return prev;
|
|
|
}
|