xen-asm_32.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. //#include <asm/asm-offsets.h>
  12. #include <asm/thread_info.h>
  13. #include <asm/processor-flags.h>
  14. #include <asm/segment.h>
  15. #include <xen/interface/xen.h>
  16. #include "xen-asm.h"
  17. /*
  18. Force an event check by making a hypercall,
  19. but preserve regs before making the call.
  20. */
  21. check_events:
  22. push %eax
  23. push %ecx
  24. push %edx
  25. call xen_force_evtchn_callback
  26. pop %edx
  27. pop %ecx
  28. pop %eax
  29. ret
  30. /*
  31. We can't use sysexit directly, because we're not running in ring0.
  32. But we can easily fake it up using iret. Assuming xen_sysexit
  33. is jumped to with a standard stack frame, we can just strip it
  34. back to a standard iret frame and use iret.
  35. */
  36. ENTRY(xen_sysexit)
  37. movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
  38. orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
  39. lea PT_EIP(%esp), %esp
  40. jmp xen_iret
  41. ENDPROC(xen_sysexit)
  42. /*
  43. This is run where a normal iret would be run, with the same stack setup:
  44. 8: eflags
  45. 4: cs
  46. esp-> 0: eip
  47. This attempts to make sure that any pending events are dealt
  48. with on return to usermode, but there is a small window in
  49. which an event can happen just before entering usermode. If
  50. the nested interrupt ends up setting one of the TIF_WORK_MASK
  51. pending work flags, they will not be tested again before
  52. returning to usermode. This means that a process can end up
  53. with pending work, which will be unprocessed until the process
  54. enters and leaves the kernel again, which could be an
  55. unbounded amount of time. This means that a pending signal or
  56. reschedule event could be indefinitely delayed.
  57. The fix is to notice a nested interrupt in the critical
  58. window, and if one occurs, then fold the nested interrupt into
  59. the current interrupt stack frame, and re-process it
  60. iteratively rather than recursively. This means that it will
  61. exit via the normal path, and all pending work will be dealt
  62. with appropriately.
  63. Because the nested interrupt handler needs to deal with the
  64. current stack state in whatever form its in, we keep things
  65. simple by only using a single register which is pushed/popped
  66. on the stack.
  67. */
  68. ENTRY(xen_iret)
  69. /* test eflags for special cases */
  70. testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
  71. jnz hyper_iret
  72. push %eax
  73. ESP_OFFSET=4 # bytes pushed onto stack
  74. /* Store vcpu_info pointer for easy access. Do it this
  75. way to avoid having to reload %fs */
  76. #ifdef CONFIG_SMP
  77. GET_THREAD_INFO(%eax)
  78. movl TI_cpu(%eax),%eax
  79. movl __per_cpu_offset(,%eax,4),%eax
  80. mov per_cpu__xen_vcpu(%eax),%eax
  81. #else
  82. movl per_cpu__xen_vcpu, %eax
  83. #endif
  84. /* check IF state we're restoring */
  85. testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
  86. /* Maybe enable events. Once this happens we could get a
  87. recursive event, so the critical region starts immediately
  88. afterwards. However, if that happens we don't end up
  89. resuming the code, so we don't have to be worried about
  90. being preempted to another CPU. */
  91. setz XEN_vcpu_info_mask(%eax)
  92. xen_iret_start_crit:
  93. /* check for unmasked and pending */
  94. cmpw $0x0001, XEN_vcpu_info_pending(%eax)
  95. /* If there's something pending, mask events again so we
  96. can jump back into xen_hypervisor_callback */
  97. sete XEN_vcpu_info_mask(%eax)
  98. popl %eax
  99. /* From this point on the registers are restored and the stack
  100. updated, so we don't need to worry about it if we're preempted */
  101. iret_restore_end:
  102. /* Jump to hypervisor_callback after fixing up the stack.
  103. Events are masked, so jumping out of the critical
  104. region is OK. */
  105. je xen_hypervisor_callback
  106. 1: iret
  107. xen_iret_end_crit:
  108. .section __ex_table,"a"
  109. .align 4
  110. .long 1b,iret_exc
  111. .previous
  112. hyper_iret:
  113. /* put this out of line since its very rarely used */
  114. jmp hypercall_page + __HYPERVISOR_iret * 32
  115. .globl xen_iret_start_crit, xen_iret_end_crit
  116. /*
  117. This is called by xen_hypervisor_callback in entry.S when it sees
  118. that the EIP at the time of interrupt was between xen_iret_start_crit
  119. and xen_iret_end_crit. We're passed the EIP in %eax so we can do
  120. a more refined determination of what to do.
  121. The stack format at this point is:
  122. ----------------
  123. ss : (ss/esp may be present if we came from usermode)
  124. esp :
  125. eflags } outer exception info
  126. cs }
  127. eip }
  128. ---------------- <- edi (copy dest)
  129. eax : outer eax if it hasn't been restored
  130. ----------------
  131. eflags } nested exception info
  132. cs } (no ss/esp because we're nested
  133. eip } from the same ring)
  134. orig_eax }<- esi (copy src)
  135. - - - - - - - -
  136. fs }
  137. es }
  138. ds } SAVE_ALL state
  139. eax }
  140. : :
  141. ebx }<- esp
  142. ----------------
  143. In order to deliver the nested exception properly, we need to shift
  144. everything from the return addr up to the error code so it
  145. sits just under the outer exception info. This means that when we
  146. handle the exception, we do it in the context of the outer exception
  147. rather than starting a new one.
  148. The only caveat is that if the outer eax hasn't been
  149. restored yet (ie, it's still on stack), we need to insert
  150. its value into the SAVE_ALL state before going on, since
  151. it's usermode state which we eventually need to restore.
  152. */
  153. ENTRY(xen_iret_crit_fixup)
  154. /*
  155. Paranoia: Make sure we're really coming from kernel space.
  156. One could imagine a case where userspace jumps into the
  157. critical range address, but just before the CPU delivers a GP,
  158. it decides to deliver an interrupt instead. Unlikely?
  159. Definitely. Easy to avoid? Yes. The Intel documents
  160. explicitly say that the reported EIP for a bad jump is the
  161. jump instruction itself, not the destination, but some virtual
  162. environments get this wrong.
  163. */
  164. movl PT_CS(%esp), %ecx
  165. andl $SEGMENT_RPL_MASK, %ecx
  166. cmpl $USER_RPL, %ecx
  167. je 2f
  168. lea PT_ORIG_EAX(%esp), %esi
  169. lea PT_EFLAGS(%esp), %edi
  170. /* If eip is before iret_restore_end then stack
  171. hasn't been restored yet. */
  172. cmp $iret_restore_end, %eax
  173. jae 1f
  174. movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */
  175. movl %eax, PT_EAX(%esp)
  176. lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
  177. /* set up the copy */
  178. 1: std
  179. mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
  180. rep movsl
  181. cld
  182. lea 4(%edi),%esp /* point esp to new frame */
  183. 2: jmp xen_do_upcall