xen-asm_64.S 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/processor-flags.h>
  14. #include <asm/errno.h>
  15. #include <asm/segment.h>
  16. #include <xen/interface/xen.h>
  17. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  18. #define ENDPATCH(x) .globl x##_end; x##_end=.
  19. /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  20. #define XEN_EFLAGS_NMI 0x80000000
  21. #if 1
  22. /*
  23. x86-64 does not yet support direct access to percpu variables
  24. via a segment override, so we just need to make sure this code
  25. never gets used
  26. */
  27. #define BUG ud2a
  28. #define PER_CPU_VAR(var, off) 0xdeadbeef
  29. #endif
  30. /*
  31. Enable events. This clears the event mask and tests the pending
  32. event status with one and operation. If there are pending
  33. events, then enter the hypervisor to get them handled.
  34. */
  35. ENTRY(xen_irq_enable_direct)
  36. BUG
  37. /* Unmask events */
  38. movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  39. /* Preempt here doesn't matter because that will deal with
  40. any pending interrupts. The pending check may end up being
  41. run on the wrong CPU, but that doesn't hurt. */
  42. /* Test for pending */
  43. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  44. jz 1f
  45. 2: call check_events
  46. 1:
  47. ENDPATCH(xen_irq_enable_direct)
  48. ret
  49. ENDPROC(xen_irq_enable_direct)
  50. RELOC(xen_irq_enable_direct, 2b+1)
  51. /*
  52. Disabling events is simply a matter of making the event mask
  53. non-zero.
  54. */
  55. ENTRY(xen_irq_disable_direct)
  56. BUG
  57. movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  58. ENDPATCH(xen_irq_disable_direct)
  59. ret
  60. ENDPROC(xen_irq_disable_direct)
  61. RELOC(xen_irq_disable_direct, 0)
  62. /*
  63. (xen_)save_fl is used to get the current interrupt enable status.
  64. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  65. may be set in the return value. We take advantage of this by
  66. making sure that X86_EFLAGS_IF has the right value (and other bits
  67. in that byte are 0), but other bits in the return value are
  68. undefined. We need to toggle the state of the bit, because
  69. Xen and x86 use opposite senses (mask vs enable).
  70. */
  71. ENTRY(xen_save_fl_direct)
  72. BUG
  73. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  74. setz %ah
  75. addb %ah,%ah
  76. ENDPATCH(xen_save_fl_direct)
  77. ret
  78. ENDPROC(xen_save_fl_direct)
  79. RELOC(xen_save_fl_direct, 0)
  80. /*
  81. In principle the caller should be passing us a value return
  82. from xen_save_fl_direct, but for robustness sake we test only
  83. the X86_EFLAGS_IF flag rather than the whole byte. After
  84. setting the interrupt mask state, it checks for unmasked
  85. pending events and enters the hypervisor to get them delivered
  86. if so.
  87. */
  88. ENTRY(xen_restore_fl_direct)
  89. BUG
  90. testb $X86_EFLAGS_IF>>8, %ah
  91. setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  92. /* Preempt here doesn't matter because that will deal with
  93. any pending interrupts. The pending check may end up being
  94. run on the wrong CPU, but that doesn't hurt. */
  95. /* check for unmasked and pending */
  96. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  97. jz 1f
  98. 2: call check_events
  99. 1:
  100. ENDPATCH(xen_restore_fl_direct)
  101. ret
  102. ENDPROC(xen_restore_fl_direct)
  103. RELOC(xen_restore_fl_direct, 2b+1)
  104. /*
  105. Force an event check by making a hypercall,
  106. but preserve regs before making the call.
  107. */
  108. check_events:
  109. push %rax
  110. push %rcx
  111. push %rdx
  112. push %rsi
  113. push %rdi
  114. push %r8
  115. push %r9
  116. push %r10
  117. push %r11
  118. call xen_force_evtchn_callback
  119. pop %r11
  120. pop %r10
  121. pop %r9
  122. pop %r8
  123. pop %rdi
  124. pop %rsi
  125. pop %rdx
  126. pop %rcx
  127. pop %rax
  128. ret
  129. ENTRY(xen_adjust_exception_frame)
  130. mov 8+0(%rsp),%rcx
  131. mov 8+8(%rsp),%r11
  132. ret $16
  133. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  134. /*
  135. Xen64 iret frame:
  136. ss
  137. rsp
  138. rflags
  139. cs
  140. rip <-- standard iret frame
  141. flags
  142. rcx }
  143. r11 }<-- pushed by hypercall page
  144. rsp -> rax }
  145. */
  146. ENTRY(xen_iret)
  147. pushq $0
  148. 1: jmp hypercall_iret
  149. ENDPATCH(xen_iret)
  150. RELOC(xen_iret, 1b+1)
  151. /*
  152. sysexit is not used for 64-bit processes, so it's
  153. only ever used to return to 32-bit compat userspace.
  154. */
  155. ENTRY(xen_sysexit)
  156. pushq $__USER32_DS
  157. pushq %rcx
  158. pushq $X86_EFLAGS_IF
  159. pushq $__USER32_CS
  160. pushq %rdx
  161. pushq $0
  162. 1: jmp hypercall_iret
  163. ENDPATCH(xen_sysexit)
  164. RELOC(xen_sysexit, 1b+1)
  165. ENTRY(xen_sysret64)
  166. /* We're already on the usermode stack at this point, but still
  167. with the kernel gs, so we can easily switch back */
  168. movq %rsp, %gs:pda_oldrsp
  169. movq %gs:pda_kernelstack,%rsp
  170. pushq $__USER_DS
  171. pushq %gs:pda_oldrsp
  172. pushq %r11
  173. pushq $__USER_CS
  174. pushq %rcx
  175. pushq $VGCF_in_syscall
  176. 1: jmp hypercall_iret
  177. ENDPATCH(xen_sysret64)
  178. RELOC(xen_sysret64, 1b+1)
  179. ENTRY(xen_sysret32)
  180. /* We're already on the usermode stack at this point, but still
  181. with the kernel gs, so we can easily switch back */
  182. movq %rsp, %gs:pda_oldrsp
  183. movq %gs:pda_kernelstack, %rsp
  184. pushq $__USER32_DS
  185. pushq %gs:pda_oldrsp
  186. pushq %r11
  187. pushq $__USER32_CS
  188. pushq %rcx
  189. pushq $VGCF_in_syscall
  190. 1: jmp hypercall_iret
  191. ENDPATCH(xen_sysret32)
  192. RELOC(xen_sysret32, 1b+1)
  193. /*
  194. Xen handles syscall callbacks much like ordinary exceptions,
  195. which means we have:
  196. - kernel gs
  197. - kernel rsp
  198. - an iret-like stack frame on the stack (including rcx and r11):
  199. ss
  200. rsp
  201. rflags
  202. cs
  203. rip
  204. r11
  205. rsp-> rcx
  206. In all the entrypoints, we undo all that to make it look
  207. like a CPU-generated syscall/sysenter and jump to the normal
  208. entrypoint.
  209. */
  210. .macro undo_xen_syscall
  211. mov 0*8(%rsp),%rcx
  212. mov 1*8(%rsp),%r11
  213. mov 5*8(%rsp),%rsp
  214. .endm
  215. /* Normal 64-bit system call target */
  216. ENTRY(xen_syscall_target)
  217. undo_xen_syscall
  218. jmp system_call_after_swapgs
  219. ENDPROC(xen_syscall_target)
  220. #ifdef CONFIG_IA32_EMULATION
  221. /* 32-bit compat syscall target */
  222. ENTRY(xen_syscall32_target)
  223. undo_xen_syscall
  224. jmp ia32_cstar_target
  225. ENDPROC(xen_syscall32_target)
  226. /* 32-bit compat sysenter target */
  227. ENTRY(xen_sysenter_target)
  228. undo_xen_syscall
  229. jmp ia32_sysenter_target
  230. ENDPROC(xen_sysenter_target)
  231. #else /* !CONFIG_IA32_EMULATION */
  232. ENTRY(xen_syscall32_target)
  233. ENTRY(xen_sysenter_target)
  234. lea 16(%rsp), %rsp /* strip %rcx,%r11 */
  235. mov $-ENOSYS, %rax
  236. pushq $VGCF_in_syscall
  237. jmp hypercall_iret
  238. ENDPROC(xen_syscall32_target)
  239. ENDPROC(xen_sysenter_target)
  240. #endif /* CONFIG_IA32_EMULATION */