xen-asm_64.S 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/processor-flags.h>
  14. #include <asm/errno.h>
  15. #include <asm/segment.h>
  16. #include <asm/percpu.h>
  17. #include <xen/interface/xen.h>
  18. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  19. #define ENDPATCH(x) .globl x##_end; x##_end=.
  20. /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  21. #define XEN_EFLAGS_NMI 0x80000000
  22. #if 1
  23. /*
  24. FIXME: x86_64 now can support direct access to percpu variables
  25. via a segment override. Update xen accordingly.
  26. */
  27. #define BUG ud2a
  28. #endif
  29. /*
  30. Enable events. This clears the event mask and tests the pending
  31. event status with one and operation. If there are pending
  32. events, then enter the hypervisor to get them handled.
  33. */
  34. ENTRY(xen_irq_enable_direct)
  35. BUG
  36. /* Unmask events */
  37. movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  38. /* Preempt here doesn't matter because that will deal with
  39. any pending interrupts. The pending check may end up being
  40. run on the wrong CPU, but that doesn't hurt. */
  41. /* Test for pending */
  42. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  43. jz 1f
  44. 2: call check_events
  45. 1:
  46. ENDPATCH(xen_irq_enable_direct)
  47. ret
  48. ENDPROC(xen_irq_enable_direct)
  49. RELOC(xen_irq_enable_direct, 2b+1)
  50. /*
  51. Disabling events is simply a matter of making the event mask
  52. non-zero.
  53. */
  54. ENTRY(xen_irq_disable_direct)
  55. BUG
  56. movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  57. ENDPATCH(xen_irq_disable_direct)
  58. ret
  59. ENDPROC(xen_irq_disable_direct)
  60. RELOC(xen_irq_disable_direct, 0)
  61. /*
  62. (xen_)save_fl is used to get the current interrupt enable status.
  63. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  64. may be set in the return value. We take advantage of this by
  65. making sure that X86_EFLAGS_IF has the right value (and other bits
  66. in that byte are 0), but other bits in the return value are
  67. undefined. We need to toggle the state of the bit, because
  68. Xen and x86 use opposite senses (mask vs enable).
  69. */
  70. ENTRY(xen_save_fl_direct)
  71. BUG
  72. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  73. setz %ah
  74. addb %ah,%ah
  75. ENDPATCH(xen_save_fl_direct)
  76. ret
  77. ENDPROC(xen_save_fl_direct)
  78. RELOC(xen_save_fl_direct, 0)
  79. /*
  80. In principle the caller should be passing us a value return
  81. from xen_save_fl_direct, but for robustness sake we test only
  82. the X86_EFLAGS_IF flag rather than the whole byte. After
  83. setting the interrupt mask state, it checks for unmasked
  84. pending events and enters the hypervisor to get them delivered
  85. if so.
  86. */
  87. ENTRY(xen_restore_fl_direct)
  88. BUG
  89. testb $X86_EFLAGS_IF>>8, %ah
  90. setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  91. /* Preempt here doesn't matter because that will deal with
  92. any pending interrupts. The pending check may end up being
  93. run on the wrong CPU, but that doesn't hurt. */
  94. /* check for unmasked and pending */
  95. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  96. jz 1f
  97. 2: call check_events
  98. 1:
  99. ENDPATCH(xen_restore_fl_direct)
  100. ret
  101. ENDPROC(xen_restore_fl_direct)
  102. RELOC(xen_restore_fl_direct, 2b+1)
  103. /*
  104. Force an event check by making a hypercall,
  105. but preserve regs before making the call.
  106. */
  107. check_events:
  108. push %rax
  109. push %rcx
  110. push %rdx
  111. push %rsi
  112. push %rdi
  113. push %r8
  114. push %r9
  115. push %r10
  116. push %r11
  117. call xen_force_evtchn_callback
  118. pop %r11
  119. pop %r10
  120. pop %r9
  121. pop %r8
  122. pop %rdi
  123. pop %rsi
  124. pop %rdx
  125. pop %rcx
  126. pop %rax
  127. ret
  128. ENTRY(xen_adjust_exception_frame)
  129. mov 8+0(%rsp),%rcx
  130. mov 8+8(%rsp),%r11
  131. ret $16
  132. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  133. /*
  134. Xen64 iret frame:
  135. ss
  136. rsp
  137. rflags
  138. cs
  139. rip <-- standard iret frame
  140. flags
  141. rcx }
  142. r11 }<-- pushed by hypercall page
  143. rsp -> rax }
  144. */
  145. ENTRY(xen_iret)
  146. pushq $0
  147. 1: jmp hypercall_iret
  148. ENDPATCH(xen_iret)
  149. RELOC(xen_iret, 1b+1)
  150. /*
  151. sysexit is not used for 64-bit processes, so it's
  152. only ever used to return to 32-bit compat userspace.
  153. */
  154. ENTRY(xen_sysexit)
  155. pushq $__USER32_DS
  156. pushq %rcx
  157. pushq $X86_EFLAGS_IF
  158. pushq $__USER32_CS
  159. pushq %rdx
  160. pushq $0
  161. 1: jmp hypercall_iret
  162. ENDPATCH(xen_sysexit)
  163. RELOC(xen_sysexit, 1b+1)
  164. ENTRY(xen_sysret64)
  165. /* We're already on the usermode stack at this point, but still
  166. with the kernel gs, so we can easily switch back */
  167. movq %rsp, %gs:pda_oldrsp
  168. movq PER_CPU_VAR(kernel_stack),%rsp
  169. pushq $__USER_DS
  170. pushq %gs:pda_oldrsp
  171. pushq %r11
  172. pushq $__USER_CS
  173. pushq %rcx
  174. pushq $VGCF_in_syscall
  175. 1: jmp hypercall_iret
  176. ENDPATCH(xen_sysret64)
  177. RELOC(xen_sysret64, 1b+1)
  178. ENTRY(xen_sysret32)
  179. /* We're already on the usermode stack at this point, but still
  180. with the kernel gs, so we can easily switch back */
  181. movq %rsp, %gs:pda_oldrsp
  182. movq PER_CPU_VAR(kernel_stack), %rsp
  183. pushq $__USER32_DS
  184. pushq %gs:pda_oldrsp
  185. pushq %r11
  186. pushq $__USER32_CS
  187. pushq %rcx
  188. pushq $VGCF_in_syscall
  189. 1: jmp hypercall_iret
  190. ENDPATCH(xen_sysret32)
  191. RELOC(xen_sysret32, 1b+1)
  192. /*
  193. Xen handles syscall callbacks much like ordinary exceptions,
  194. which means we have:
  195. - kernel gs
  196. - kernel rsp
  197. - an iret-like stack frame on the stack (including rcx and r11):
  198. ss
  199. rsp
  200. rflags
  201. cs
  202. rip
  203. r11
  204. rsp-> rcx
  205. In all the entrypoints, we undo all that to make it look
  206. like a CPU-generated syscall/sysenter and jump to the normal
  207. entrypoint.
  208. */
  209. .macro undo_xen_syscall
  210. mov 0*8(%rsp),%rcx
  211. mov 1*8(%rsp),%r11
  212. mov 5*8(%rsp),%rsp
  213. .endm
  214. /* Normal 64-bit system call target */
  215. ENTRY(xen_syscall_target)
  216. undo_xen_syscall
  217. jmp system_call_after_swapgs
  218. ENDPROC(xen_syscall_target)
  219. #ifdef CONFIG_IA32_EMULATION
  220. /* 32-bit compat syscall target */
  221. ENTRY(xen_syscall32_target)
  222. undo_xen_syscall
  223. jmp ia32_cstar_target
  224. ENDPROC(xen_syscall32_target)
  225. /* 32-bit compat sysenter target */
  226. ENTRY(xen_sysenter_target)
  227. undo_xen_syscall
  228. jmp ia32_sysenter_target
  229. ENDPROC(xen_sysenter_target)
  230. #else /* !CONFIG_IA32_EMULATION */
  231. ENTRY(xen_syscall32_target)
  232. ENTRY(xen_sysenter_target)
  233. lea 16(%rsp), %rsp /* strip %rcx,%r11 */
  234. mov $-ENOSYS, %rax
  235. pushq $VGCF_in_syscall
  236. jmp hypercall_iret
  237. ENDPROC(xen_syscall32_target)
  238. ENDPROC(xen_sysenter_target)
  239. #endif /* CONFIG_IA32_EMULATION */