|
@@ -669,14 +669,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|
|
*/
|
|
|
load_TLS(next, cpu);
|
|
|
|
|
|
- /*
|
|
|
- * Restore %gs if needed (which is common)
|
|
|
- */
|
|
|
- if (prev->gs | next->gs)
|
|
|
- loadsegment(gs, next->gs);
|
|
|
-
|
|
|
- write_pda(pcurrent, next_p);
|
|
|
-
|
|
|
/*
|
|
|
* Now maybe handle debug registers and/or IO bitmaps
|
|
|
*/
|
|
@@ -686,6 +678,15 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|
|
|
|
|
disable_tsc(prev_p, next_p);
|
|
|
|
|
|
+ /*
|
|
|
+ * Leave lazy mode, flushing any hypercalls made here.
|
|
|
+ * This must be done before restoring TLS segments so
|
|
|
+ * the GDT and LDT are properly updated, and must be
|
|
|
+ * done before math_state_restore, so the TS bit is up
|
|
|
+ * to date.
|
|
|
+ */
|
|
|
+ arch_leave_lazy_cpu_mode();
|
|
|
+
|
|
|
/* If the task has used fpu the last 5 timeslices, just do a full
|
|
|
* restore of the math state immediately to avoid the trap; the
|
|
|
* chances of needing FPU soon are obviously high now
|
|
@@ -693,6 +694,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|
|
if (next_p->fpu_counter > 5)
|
|
|
math_state_restore();
|
|
|
|
|
|
+ /*
|
|
|
+ * Restore %gs if needed (which is common)
|
|
|
+ */
|
|
|
+ if (prev->gs | next->gs)
|
|
|
+ loadsegment(gs, next->gs);
|
|
|
+
|
|
|
+ write_pda(pcurrent, next_p);
|
|
|
+
|
|
|
return prev_p;
|
|
|
}
|
|
|
|