xen-asm_64.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/processor-flags.h>
  14. #include <asm/errno.h>
  15. #include <asm/segment.h>
  16. #include <xen/interface/xen.h>
  17. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  18. #define ENDPATCH(x) .globl x##_end; x##_end=.
  19. /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  20. #define XEN_EFLAGS_NMI 0x80000000
  21. #if 0
  22. #include <asm/percpu.h>
  23. /*
  24. Enable events. This clears the event mask and tests the pending
  25. event status with one and operation. If there are pending
  26. events, then enter the hypervisor to get them handled.
  27. */
  28. ENTRY(xen_irq_enable_direct)
  29. /* Unmask events */
  30. movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  31. /* Preempt here doesn't matter because that will deal with
  32. any pending interrupts. The pending check may end up being
  33. run on the wrong CPU, but that doesn't hurt. */
  34. /* Test for pending */
  35. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  36. jz 1f
  37. 2: call check_events
  38. 1:
  39. ENDPATCH(xen_irq_enable_direct)
  40. ret
  41. ENDPROC(xen_irq_enable_direct)
  42. RELOC(xen_irq_enable_direct, 2b+1)
  43. /*
  44. Disabling events is simply a matter of making the event mask
  45. non-zero.
  46. */
  47. ENTRY(xen_irq_disable_direct)
  48. movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  49. ENDPATCH(xen_irq_disable_direct)
  50. ret
  51. ENDPROC(xen_irq_disable_direct)
  52. RELOC(xen_irq_disable_direct, 0)
  53. /*
  54. (xen_)save_fl is used to get the current interrupt enable status.
  55. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  56. may be set in the return value. We take advantage of this by
  57. making sure that X86_EFLAGS_IF has the right value (and other bits
  58. in that byte are 0), but other bits in the return value are
  59. undefined. We need to toggle the state of the bit, because
  60. Xen and x86 use opposite senses (mask vs enable).
  61. */
  62. ENTRY(xen_save_fl_direct)
  63. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  64. setz %ah
  65. addb %ah,%ah
  66. ENDPATCH(xen_save_fl_direct)
  67. ret
  68. ENDPROC(xen_save_fl_direct)
  69. RELOC(xen_save_fl_direct, 0)
  70. /*
  71. In principle the caller should be passing us a value return
  72. from xen_save_fl_direct, but for robustness sake we test only
  73. the X86_EFLAGS_IF flag rather than the whole byte. After
  74. setting the interrupt mask state, it checks for unmasked
  75. pending events and enters the hypervisor to get them delivered
  76. if so.
  77. */
  78. ENTRY(xen_restore_fl_direct)
  79. testb $X86_EFLAGS_IF>>8, %ah
  80. setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  81. /* Preempt here doesn't matter because that will deal with
  82. any pending interrupts. The pending check may end up being
  83. run on the wrong CPU, but that doesn't hurt. */
  84. /* check for unmasked and pending */
  85. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  86. jz 1f
  87. 2: call check_events
  88. 1:
  89. ENDPATCH(xen_restore_fl_direct)
  90. ret
  91. ENDPROC(xen_restore_fl_direct)
  92. RELOC(xen_restore_fl_direct, 2b+1)
  93. /*
  94. Force an event check by making a hypercall,
  95. but preserve regs before making the call.
  96. */
  97. check_events:
  98. push %rax
  99. push %rcx
  100. push %rdx
  101. push %rsi
  102. push %rdi
  103. push %r8
  104. push %r9
  105. push %r10
  106. push %r11
  107. call force_evtchn_callback
  108. pop %r11
  109. pop %r10
  110. pop %r9
  111. pop %r8
  112. pop %rdi
  113. pop %rsi
  114. pop %rdx
  115. pop %rcx
  116. pop %rax
  117. ret
  118. #endif
  119. ENTRY(xen_adjust_exception_frame)
  120. mov 8+0(%rsp),%rcx
  121. mov 8+8(%rsp),%r11
  122. ret $16
  123. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  124. /*
  125. Xen64 iret frame:
  126. ss
  127. rsp
  128. rflags
  129. cs
  130. rip <-- standard iret frame
  131. flags
  132. rcx }
  133. r11 }<-- pushed by hypercall page
  134. rsp -> rax }
  135. */
  136. ENTRY(xen_iret)
  137. pushq $0
  138. 1: jmp hypercall_iret
  139. ENDPATCH(xen_iret)
  140. RELOC(xen_iret, 1b+1)
  141. /*
  142. sysexit is not used for 64-bit processes, so it's
  143. only ever used to return to 32-bit compat userspace.
  144. */
  145. ENTRY(xen_sysexit)
  146. pushq $__USER32_DS
  147. pushq %rcx
  148. pushq $X86_EFLAGS_IF
  149. pushq $__USER32_CS
  150. pushq %rdx
  151. pushq $VGCF_in_syscall
  152. 1: jmp hypercall_iret
  153. ENDPATCH(xen_sysexit)
  154. RELOC(xen_sysexit, 1b+1)
  155. ENTRY(xen_sysret64)
  156. /* We're already on the usermode stack at this point, but still
  157. with the kernel gs, so we can easily switch back */
  158. movq %rsp, %gs:pda_oldrsp
  159. movq %gs:pda_kernelstack,%rsp
  160. pushq $__USER_DS
  161. pushq %gs:pda_oldrsp
  162. pushq %r11
  163. pushq $__USER_CS
  164. pushq %rcx
  165. pushq $VGCF_in_syscall
  166. 1: jmp hypercall_iret
  167. ENDPATCH(xen_sysret64)
  168. RELOC(xen_sysret64, 1b+1)
  169. ENTRY(xen_sysret32)
  170. /* We're already on the usermode stack at this point, but still
  171. with the kernel gs, so we can easily switch back */
  172. movq %rsp, %gs:pda_oldrsp
  173. movq %gs:pda_kernelstack, %rsp
  174. pushq $__USER32_DS
  175. pushq %gs:pda_oldrsp
  176. pushq %r11
  177. pushq $__USER32_CS
  178. pushq %rcx
  179. pushq $VGCF_in_syscall
  180. 1: jmp hypercall_iret
  181. ENDPATCH(xen_sysret32)
  182. RELOC(xen_sysret32, 1b+1)
  183. /*
  184. Xen handles syscall callbacks much like ordinary exceptions,
  185. which means we have:
  186. - kernel gs
  187. - kernel rsp
  188. - an iret-like stack frame on the stack (including rcx and r11):
  189. ss
  190. rsp
  191. rflags
  192. cs
  193. rip
  194. r11
  195. rsp-> rcx
  196. In all the entrypoints, we undo all that to make it look
  197. like a CPU-generated syscall/sysenter and jump to the normal
  198. entrypoint.
  199. */
  200. .macro undo_xen_syscall
  201. mov 0*8(%rsp),%rcx
  202. mov 1*8(%rsp),%r11
  203. mov 5*8(%rsp),%rsp
  204. .endm
  205. /* Normal 64-bit system call target */
  206. ENTRY(xen_syscall_target)
  207. undo_xen_syscall
  208. jmp system_call_after_swapgs
  209. ENDPROC(xen_syscall_target)
  210. #ifdef CONFIG_IA32_EMULATION
  211. /* 32-bit compat syscall target */
  212. ENTRY(xen_syscall32_target)
  213. undo_xen_syscall
  214. jmp ia32_cstar_target
  215. ENDPROC(xen_syscall32_target)
  216. /* 32-bit compat sysenter target */
  217. ENTRY(xen_sysenter_target)
  218. undo_xen_syscall
  219. jmp ia32_sysenter_target
  220. ENDPROC(xen_sysenter_target)
  221. #else /* !CONFIG_IA32_EMULATION */
  222. ENTRY(xen_syscall32_target)
  223. ENTRY(xen_sysenter_target)
  224. lea 16(%rsp), %rsp /* strip %rcx,%r11 */
  225. mov $-ENOSYS, %rax
  226. pushq $VGCF_in_syscall
  227. jmp hypercall_iret
  228. ENDPROC(xen_syscall32_target)
  229. ENDPROC(xen_sysenter_target)
  230. #endif /* CONFIG_IA32_EMULATION */