switch_to.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. #ifndef _ASM_X86_SWITCH_TO_H
  2. #define _ASM_X86_SWITCH_TO_H
  3. struct task_struct; /* one of the stranger aspects of C forward declarations */
  4. struct task_struct *__switch_to(struct task_struct *prev,
  5. struct task_struct *next);
  6. struct tss_struct;
  7. void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  8. struct tss_struct *tss);
  9. #ifdef CONFIG_X86_32
  10. #ifdef CONFIG_CC_STACKPROTECTOR
  11. #define __switch_canary \
  12. "movl %P[task_canary](%[next]), %%ebx\n\t" \
  13. "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
  14. #define __switch_canary_oparam \
  15. , [stack_canary] "=m" (stack_canary.canary)
  16. #define __switch_canary_iparam \
  17. , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
  18. #else /* CC_STACKPROTECTOR */
  19. #define __switch_canary
  20. #define __switch_canary_oparam
  21. #define __switch_canary_iparam
  22. #endif /* CC_STACKPROTECTOR */
  23. /*
  24. * Saving eflags is important. It switches not only IOPL between tasks,
  25. * it also protects other tasks from NT leaking through sysenter etc.
  26. */
  27. #define switch_to(prev, next, last) \
  28. do { \
  29. /* \
  30. * Context-switching clobbers all registers, so we clobber \
  31. * them explicitly, via unused output variables. \
  32. * (EAX and EBP is not listed because EBP is saved/restored \
  33. * explicitly for wchan access and EAX is the return value of \
  34. * __switch_to()) \
  35. */ \
  36. unsigned long ebx, ecx, edx, esi, edi; \
  37. \
  38. asm volatile("pushfl\n\t" /* save flags */ \
  39. "pushl %%ebp\n\t" /* save EBP */ \
  40. "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
  41. "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
  42. "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
  43. "pushl %[next_ip]\n\t" /* restore EIP */ \
  44. __switch_canary \
  45. "jmp __switch_to\n" /* regparm call */ \
  46. "1:\t" \
  47. "popl %%ebp\n\t" /* restore EBP */ \
  48. "popfl\n" /* restore flags */ \
  49. \
  50. /* output parameters */ \
  51. : [prev_sp] "=m" (prev->thread.sp), \
  52. [prev_ip] "=m" (prev->thread.ip), \
  53. "=a" (last), \
  54. \
  55. /* clobbered output registers: */ \
  56. "=b" (ebx), "=c" (ecx), "=d" (edx), \
  57. "=S" (esi), "=D" (edi) \
  58. \
  59. __switch_canary_oparam \
  60. \
  61. /* input parameters: */ \
  62. : [next_sp] "m" (next->thread.sp), \
  63. [next_ip] "m" (next->thread.ip), \
  64. \
  65. /* regparm parameters for __switch_to(): */ \
  66. [prev] "a" (prev), \
  67. [next] "d" (next) \
  68. \
  69. __switch_canary_iparam \
  70. \
  71. : /* reloaded segment registers */ \
  72. "memory"); \
  73. } while (0)
  74. #else /* CONFIG_X86_32 */
  75. /* frame pointer must be last for get_wchan */
  76. #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
  77. #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
  78. #define __EXTRA_CLOBBER \
  79. , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
  80. "r12", "r13", "r14", "r15"
  81. #ifdef CONFIG_CC_STACKPROTECTOR
  82. #define __switch_canary \
  83. "movq %P[task_canary](%%rsi),%%r8\n\t" \
  84. "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
  85. #define __switch_canary_oparam \
  86. , [gs_canary] "=m" (irq_stack_union.stack_canary)
  87. #define __switch_canary_iparam \
  88. , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
  89. #else /* CC_STACKPROTECTOR */
  90. #define __switch_canary
  91. #define __switch_canary_oparam
  92. #define __switch_canary_iparam
  93. #endif /* CC_STACKPROTECTOR */
  94. /* Save restore flags to clear handle leaking NT */
  95. #define switch_to(prev, next, last) \
  96. asm volatile(SAVE_CONTEXT \
  97. "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
  98. "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
  99. "call __switch_to\n\t" \
  100. "movq "__percpu_arg([current_task])",%%rsi\n\t" \
  101. __switch_canary \
  102. "movq %P[thread_info](%%rsi),%%r8\n\t" \
  103. "movq %%rax,%%rdi\n\t" \
  104. "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
  105. "jnz ret_from_fork\n\t" \
  106. RESTORE_CONTEXT \
  107. : "=a" (last) \
  108. __switch_canary_oparam \
  109. : [next] "S" (next), [prev] "D" (prev), \
  110. [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
  111. [ti_flags] "i" (offsetof(struct thread_info, flags)), \
  112. [_tif_fork] "i" (_TIF_FORK), \
  113. [thread_info] "i" (offsetof(struct task_struct, stack)), \
  114. [current_task] "m" (current_task) \
  115. __switch_canary_iparam \
  116. : "memory", "cc" __EXTRA_CLOBBER)
  117. #endif /* CONFIG_X86_32 */
  118. #endif /* _ASM_X86_SWITCH_TO_H */