xen-asm_64.S 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <asm/errno.h>
  12. #include <asm/percpu.h>
  13. #include <asm/processor-flags.h>
  14. #include <asm/segment.h>
  15. #include <xen/interface/xen.h>
  16. #include "xen-asm.h"
  17. ENTRY(xen_adjust_exception_frame)
  18. mov 8+0(%rsp),%rcx
  19. mov 8+8(%rsp),%r11
  20. ret $16
  21. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  22. /*
  23. Xen64 iret frame:
  24. ss
  25. rsp
  26. rflags
  27. cs
  28. rip <-- standard iret frame
  29. flags
  30. rcx }
  31. r11 }<-- pushed by hypercall page
  32. rsp -> rax }
  33. */
  34. ENTRY(xen_iret)
  35. pushq $0
  36. 1: jmp hypercall_iret
  37. ENDPATCH(xen_iret)
  38. RELOC(xen_iret, 1b+1)
  39. /*
  40. sysexit is not used for 64-bit processes, so it's
  41. only ever used to return to 32-bit compat userspace.
  42. */
  43. ENTRY(xen_sysexit)
  44. pushq $__USER32_DS
  45. pushq %rcx
  46. pushq $X86_EFLAGS_IF
  47. pushq $__USER32_CS
  48. pushq %rdx
  49. pushq $0
  50. 1: jmp hypercall_iret
  51. ENDPATCH(xen_sysexit)
  52. RELOC(xen_sysexit, 1b+1)
  53. ENTRY(xen_sysret64)
  54. /* We're already on the usermode stack at this point, but still
  55. with the kernel gs, so we can easily switch back */
  56. movq %rsp, PER_CPU_VAR(old_rsp)
  57. movq PER_CPU_VAR(kernel_stack),%rsp
  58. pushq $__USER_DS
  59. pushq PER_CPU_VAR(old_rsp)
  60. pushq %r11
  61. pushq $__USER_CS
  62. pushq %rcx
  63. pushq $VGCF_in_syscall
  64. 1: jmp hypercall_iret
  65. ENDPATCH(xen_sysret64)
  66. RELOC(xen_sysret64, 1b+1)
  67. ENTRY(xen_sysret32)
  68. /* We're already on the usermode stack at this point, but still
  69. with the kernel gs, so we can easily switch back */
  70. movq %rsp, PER_CPU_VAR(old_rsp)
  71. movq PER_CPU_VAR(kernel_stack), %rsp
  72. pushq $__USER32_DS
  73. pushq PER_CPU_VAR(old_rsp)
  74. pushq %r11
  75. pushq $__USER32_CS
  76. pushq %rcx
  77. pushq $VGCF_in_syscall
  78. 1: jmp hypercall_iret
  79. ENDPATCH(xen_sysret32)
  80. RELOC(xen_sysret32, 1b+1)
  81. /*
  82. Xen handles syscall callbacks much like ordinary exceptions,
  83. which means we have:
  84. - kernel gs
  85. - kernel rsp
  86. - an iret-like stack frame on the stack (including rcx and r11):
  87. ss
  88. rsp
  89. rflags
  90. cs
  91. rip
  92. r11
  93. rsp-> rcx
  94. In all the entrypoints, we undo all that to make it look
  95. like a CPU-generated syscall/sysenter and jump to the normal
  96. entrypoint.
  97. */
  98. .macro undo_xen_syscall
  99. mov 0*8(%rsp),%rcx
  100. mov 1*8(%rsp),%r11
  101. mov 5*8(%rsp),%rsp
  102. .endm
  103. /* Normal 64-bit system call target */
  104. ENTRY(xen_syscall_target)
  105. undo_xen_syscall
  106. jmp system_call_after_swapgs
  107. ENDPROC(xen_syscall_target)
  108. #ifdef CONFIG_IA32_EMULATION
  109. /* 32-bit compat syscall target */
  110. ENTRY(xen_syscall32_target)
  111. undo_xen_syscall
  112. jmp ia32_cstar_target
  113. ENDPROC(xen_syscall32_target)
  114. /* 32-bit compat sysenter target */
  115. ENTRY(xen_sysenter_target)
  116. undo_xen_syscall
  117. jmp ia32_sysenter_target
  118. ENDPROC(xen_sysenter_target)
  119. #else /* !CONFIG_IA32_EMULATION */
  120. ENTRY(xen_syscall32_target)
  121. ENTRY(xen_sysenter_target)
  122. lea 16(%rsp), %rsp /* strip %rcx,%r11 */
  123. mov $-ENOSYS, %rax
  124. pushq $VGCF_in_syscall
  125. jmp hypercall_iret
  126. ENDPROC(xen_syscall32_target)
  127. ENDPROC(xen_sysenter_target)
  128. #endif /* CONFIG_IA32_EMULATION */