|
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|
|
#endif
|
|
|
ti->addr_limit = KERNEL_DS;
|
|
|
ti->status &= ~TS_USEDFPU;
|
|
|
- p->fpu_counter = 0;
|
|
|
+ p->thread.fpu_counter = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
*childregs = *current_pt_regs();
|
|
@@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
|
|
|
unlazy_fpu(prev, task_pt_regs(prev));
|
|
|
|
|
|
/* we're going to use this soon, after a few expensive things */
|
|
|
- if (next->fpu_counter > 5)
|
|
|
+ if (next->thread.fpu_counter > 5)
|
|
|
prefetch(next_t->xstate);
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
@@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
|
|
|
* restore of the math state immediately to avoid the trap; the
|
|
|
* chances of needing FPU soon are obviously high now
|
|
|
*/
|
|
|
- if (next->fpu_counter > 5)
|
|
|
+ if (next->thread.fpu_counter > 5)
|
|
|
__fpu_state_restore();
|
|
|
|
|
|
return prev;
|