xen-asm.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/percpu.h>
  15. #include <asm/processor-flags.h>
  16. #include <asm/segment.h>
  17. #include <xen/interface/xen.h>
  18. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  19. #define ENDPATCH(x) .globl x##_end; x##_end=.
  20. /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  21. #define XEN_EFLAGS_NMI 0x80000000
  22. /*
  23. Enable events. This clears the event mask and tests the pending
  24. event status with one and operation. If there are pending
  25. events, then enter the hypervisor to get them handled.
  26. */
  27. ENTRY(xen_irq_enable_direct)
  28. /* Unmask events */
  29. movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  30. /* Preempt here doesn't matter because that will deal with
  31. any pending interrupts. The pending check may end up being
  32. run on the wrong CPU, but that doesn't hurt. */
  33. /* Test for pending */
  34. testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
  35. jz 1f
  36. 2: call check_events
  37. 1:
  38. ENDPATCH(xen_irq_enable_direct)
  39. ret
  40. ENDPROC(xen_irq_enable_direct)
  41. RELOC(xen_irq_enable_direct, 2b+1)
  42. /*
  43. Disabling events is simply a matter of making the event mask
  44. non-zero.
  45. */
  46. ENTRY(xen_irq_disable_direct)
  47. movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  48. ENDPATCH(xen_irq_disable_direct)
  49. ret
  50. ENDPROC(xen_irq_disable_direct)
  51. RELOC(xen_irq_disable_direct, 0)
  52. /*
  53. (xen_)save_fl is used to get the current interrupt enable status.
  54. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  55. may be set in the return value. We take advantage of this by
  56. making sure that X86_EFLAGS_IF has the right value (and other bits
  57. in that byte are 0), but other bits in the return value are
  58. undefined. We need to toggle the state of the bit, because
  59. Xen and x86 use opposite senses (mask vs enable).
  60. */
  61. ENTRY(xen_save_fl_direct)
  62. testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  63. setz %ah
  64. addb %ah,%ah
  65. ENDPATCH(xen_save_fl_direct)
  66. ret
  67. ENDPROC(xen_save_fl_direct)
  68. RELOC(xen_save_fl_direct, 0)
  69. /*
  70. In principle the caller should be passing us a value return
  71. from xen_save_fl_direct, but for robustness sake we test only
  72. the X86_EFLAGS_IF flag rather than the whole byte. After
  73. setting the interrupt mask state, it checks for unmasked
  74. pending events and enters the hypervisor to get them delivered
  75. if so.
  76. */
  77. ENTRY(xen_restore_fl_direct)
  78. testb $X86_EFLAGS_IF>>8, %ah
  79. setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  80. /* Preempt here doesn't matter because that will deal with
  81. any pending interrupts. The pending check may end up being
  82. run on the wrong CPU, but that doesn't hurt. */
  83. /* check for unmasked and pending */
  84. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
  85. jz 1f
  86. 2: call check_events
  87. 1:
  88. ENDPATCH(xen_restore_fl_direct)
  89. ret
  90. ENDPROC(xen_restore_fl_direct)
  91. RELOC(xen_restore_fl_direct, 2b+1)
  92. /*
  93. This is run where a normal iret would be run, with the same stack setup:
  94. 8: eflags
  95. 4: cs
  96. esp-> 0: eip
  97. This attempts to make sure that any pending events are dealt
  98. with on return to usermode, but there is a small window in
  99. which an event can happen just before entering usermode. If
  100. the nested interrupt ends up setting one of the TIF_WORK_MASK
  101. pending work flags, they will not be tested again before
  102. returning to usermode. This means that a process can end up
  103. with pending work, which will be unprocessed until the process
  104. enters and leaves the kernel again, which could be an
  105. unbounded amount of time. This means that a pending signal or
  106. reschedule event could be indefinitely delayed.
  107. The fix is to notice a nested interrupt in the critical
  108. window, and if one occurs, then fold the nested interrupt into
  109. the current interrupt stack frame, and re-process it
  110. iteratively rather than recursively. This means that it will
  111. exit via the normal path, and all pending work will be dealt
  112. with appropriately.
  113. Because the nested interrupt handler needs to deal with the
  114. current stack state in whatever form its in, we keep things
  115. simple by only using a single register which is pushed/popped
  116. on the stack.
  117. */
  118. ENTRY(xen_iret)
  119. /* test eflags for special cases */
  120. testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
  121. jnz hyper_iret
  122. push %eax
  123. ESP_OFFSET=4 # bytes pushed onto stack
  124. /* Store vcpu_info pointer for easy access. Do it this
  125. way to avoid having to reload %fs */
  126. #ifdef CONFIG_SMP
  127. GET_THREAD_INFO(%eax)
  128. movl TI_cpu(%eax),%eax
  129. movl __per_cpu_offset(,%eax,4),%eax
  130. mov per_cpu__xen_vcpu(%eax),%eax
  131. #else
  132. movl per_cpu__xen_vcpu, %eax
  133. #endif
  134. /* check IF state we're restoring */
  135. testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
  136. /* Maybe enable events. Once this happens we could get a
  137. recursive event, so the critical region starts immediately
  138. afterwards. However, if that happens we don't end up
  139. resuming the code, so we don't have to be worried about
  140. being preempted to another CPU. */
  141. setz XEN_vcpu_info_mask(%eax)
  142. xen_iret_start_crit:
  143. /* check for unmasked and pending */
  144. cmpw $0x0001, XEN_vcpu_info_pending(%eax)
  145. /* If there's something pending, mask events again so we
  146. can jump back into xen_hypervisor_callback */
  147. sete XEN_vcpu_info_mask(%eax)
  148. popl %eax
  149. /* From this point on the registers are restored and the stack
  150. updated, so we don't need to worry about it if we're preempted */
  151. iret_restore_end:
  152. /* Jump to hypervisor_callback after fixing up the stack.
  153. Events are masked, so jumping out of the critical
  154. region is OK. */
  155. je xen_hypervisor_callback
  156. 1: iret
  157. xen_iret_end_crit:
  158. .section __ex_table,"a"
  159. .align 4
  160. .long 1b,iret_exc
  161. .previous
  162. hyper_iret:
  163. /* put this out of line since its very rarely used */
  164. jmp hypercall_page + __HYPERVISOR_iret * 32
  165. .globl xen_iret_start_crit, xen_iret_end_crit
  166. /*
  167. This is called by xen_hypervisor_callback in entry.S when it sees
  168. that the EIP at the time of interrupt was between xen_iret_start_crit
  169. and xen_iret_end_crit. We're passed the EIP in %eax so we can do
  170. a more refined determination of what to do.
  171. The stack format at this point is:
  172. ----------------
  173. ss : (ss/esp may be present if we came from usermode)
  174. esp :
  175. eflags } outer exception info
  176. cs }
  177. eip }
  178. ---------------- <- edi (copy dest)
  179. eax : outer eax if it hasn't been restored
  180. ----------------
  181. eflags } nested exception info
  182. cs } (no ss/esp because we're nested
  183. eip } from the same ring)
  184. orig_eax }<- esi (copy src)
  185. - - - - - - - -
  186. fs }
  187. es }
  188. ds } SAVE_ALL state
  189. eax }
  190. : :
  191. ebx }<- esp
  192. ----------------
  193. In order to deliver the nested exception properly, we need to shift
  194. everything from the return addr up to the error code so it
  195. sits just under the outer exception info. This means that when we
  196. handle the exception, we do it in the context of the outer exception
  197. rather than starting a new one.
  198. The only caveat is that if the outer eax hasn't been
  199. restored yet (ie, it's still on stack), we need to insert
  200. its value into the SAVE_ALL state before going on, since
  201. it's usermode state which we eventually need to restore.
  202. */
  203. ENTRY(xen_iret_crit_fixup)
  204. /*
  205. Paranoia: Make sure we're really coming from kernel space.
  206. One could imagine a case where userspace jumps into the
  207. critical range address, but just before the CPU delivers a GP,
  208. it decides to deliver an interrupt instead. Unlikely?
  209. Definitely. Easy to avoid? Yes. The Intel documents
  210. explicitly say that the reported EIP for a bad jump is the
  211. jump instruction itself, not the destination, but some virtual
  212. environments get this wrong.
  213. */
  214. movl PT_CS(%esp), %ecx
  215. andl $SEGMENT_RPL_MASK, %ecx
  216. cmpl $USER_RPL, %ecx
  217. je 2f
  218. lea PT_ORIG_EAX(%esp), %esi
  219. lea PT_EFLAGS(%esp), %edi
  220. /* If eip is before iret_restore_end then stack
  221. hasn't been restored yet. */
  222. cmp $iret_restore_end, %eax
  223. jae 1f
  224. movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */
  225. movl %eax, PT_EAX(%esp)
  226. lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
  227. /* set up the copy */
  228. 1: std
  229. mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
  230. rep movsl
  231. cld
  232. lea 4(%edi),%esp /* point esp to new frame */
  233. 2: jmp xen_do_upcall
  234. ENTRY(xen_sysexit)
  235. /* Store vcpu_info pointer for easy access. Do it this
  236. way to avoid having to reload %fs */
  237. #ifdef CONFIG_SMP
  238. GET_THREAD_INFO(%eax)
  239. movl TI_cpu(%eax),%eax
  240. movl __per_cpu_offset(,%eax,4),%eax
  241. mov per_cpu__xen_vcpu(%eax),%eax
  242. #else
  243. movl per_cpu__xen_vcpu, %eax
  244. #endif
  245. /* We can't actually use sysexit in a pv guest,
  246. so fake it up with iret */
  247. pushl $__USER_DS /* user stack segment */
  248. pushl %ecx /* user esp */
  249. pushl PT_EFLAGS+2*4(%esp) /* user eflags */
  250. pushl $__USER_CS /* user code segment */
  251. pushl %edx /* user eip */
  252. xen_sysexit_start_crit:
  253. /* Unmask events... */
  254. movb $0, XEN_vcpu_info_mask(%eax)
  255. /* ...and test for pending.
  256. There's a preempt window here, but it doesn't
  257. matter because we're within the critical section. */
  258. testb $0xff, XEN_vcpu_info_pending(%eax)
  259. /* If there's something pending, mask events again so we
  260. can directly inject it back into the kernel. */
  261. jnz 1f
  262. movl PT_EAX+5*4(%esp),%eax
  263. 2: iret
  264. 1: movb $1, XEN_vcpu_info_mask(%eax)
  265. xen_sysexit_end_crit:
  266. addl $5*4, %esp /* remove iret frame */
  267. /* no need to re-save regs, but need to restore kernel %fs */
  268. mov $__KERNEL_PERCPU, %eax
  269. mov %eax, %fs
  270. jmp xen_do_upcall
  271. .section __ex_table,"a"
  272. .align 4
  273. .long 2b,iret_exc
  274. .previous
  275. .globl xen_sysexit_start_crit, xen_sysexit_end_crit
  276. /*
  277. sysexit fixup is easy, since the old frame is still sitting there
  278. on the stack. We just need to remove the new recursive
  279. interrupt and return.
  280. */
  281. ENTRY(xen_sysexit_crit_fixup)
  282. addl $PT_OLDESP+5*4, %esp /* remove frame+iret */
  283. jmp xen_do_upcall
  284. /*
  285. Force an event check by making a hypercall,
  286. but preserve regs before making the call.
  287. */
  288. check_events:
  289. push %eax
  290. push %ecx
  291. push %edx
  292. call force_evtchn_callback
  293. pop %edx
  294. pop %ecx
  295. pop %eax
  296. ret