xen-asm_64.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/processor-flags.h>
  14. #include <xen/interface/xen.h>
  15. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  16. #define ENDPATCH(x) .globl x##_end; x##_end=.
  17. /* Pseudo-flag used for virtual NMI, which we don't implement yet */
  18. #define XEN_EFLAGS_NMI 0x80000000
  19. #if 0
  20. #include <asm/percpu.h>
  21. /*
  22. Enable events. This clears the event mask and tests the pending
  23. event status with one and operation. If there are pending
  24. events, then enter the hypervisor to get them handled.
  25. */
  26. ENTRY(xen_irq_enable_direct)
  27. /* Unmask events */
  28. movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  29. /* Preempt here doesn't matter because that will deal with
  30. any pending interrupts. The pending check may end up being
  31. run on the wrong CPU, but that doesn't hurt. */
  32. /* Test for pending */
  33. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  34. jz 1f
  35. 2: call check_events
  36. 1:
  37. ENDPATCH(xen_irq_enable_direct)
  38. ret
  39. ENDPROC(xen_irq_enable_direct)
  40. RELOC(xen_irq_enable_direct, 2b+1)
  41. /*
  42. Disabling events is simply a matter of making the event mask
  43. non-zero.
  44. */
  45. ENTRY(xen_irq_disable_direct)
  46. movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  47. ENDPATCH(xen_irq_disable_direct)
  48. ret
  49. ENDPROC(xen_irq_disable_direct)
  50. RELOC(xen_irq_disable_direct, 0)
  51. /*
  52. (xen_)save_fl is used to get the current interrupt enable status.
  53. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  54. may be set in the return value. We take advantage of this by
  55. making sure that X86_EFLAGS_IF has the right value (and other bits
  56. in that byte are 0), but other bits in the return value are
  57. undefined. We need to toggle the state of the bit, because
  58. Xen and x86 use opposite senses (mask vs enable).
  59. */
  60. ENTRY(xen_save_fl_direct)
  61. testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  62. setz %ah
  63. addb %ah,%ah
  64. ENDPATCH(xen_save_fl_direct)
  65. ret
  66. ENDPROC(xen_save_fl_direct)
  67. RELOC(xen_save_fl_direct, 0)
  68. /*
  69. In principle the caller should be passing us a value return
  70. from xen_save_fl_direct, but for robustness sake we test only
  71. the X86_EFLAGS_IF flag rather than the whole byte. After
  72. setting the interrupt mask state, it checks for unmasked
  73. pending events and enters the hypervisor to get them delivered
  74. if so.
  75. */
  76. ENTRY(xen_restore_fl_direct)
  77. testb $X86_EFLAGS_IF>>8, %ah
  78. setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
  79. /* Preempt here doesn't matter because that will deal with
  80. any pending interrupts. The pending check may end up being
  81. run on the wrong CPU, but that doesn't hurt. */
  82. /* check for unmasked and pending */
  83. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
  84. jz 1f
  85. 2: call check_events
  86. 1:
  87. ENDPATCH(xen_restore_fl_direct)
  88. ret
  89. ENDPROC(xen_restore_fl_direct)
  90. RELOC(xen_restore_fl_direct, 2b+1)
  91. /*
  92. Force an event check by making a hypercall,
  93. but preserve regs before making the call.
  94. */
  95. check_events:
  96. push %rax
  97. push %rcx
  98. push %rdx
  99. push %rsi
  100. push %rdi
  101. push %r8
  102. push %r9
  103. push %r10
  104. push %r11
  105. call force_evtchn_callback
  106. pop %r11
  107. pop %r10
  108. pop %r9
  109. pop %r8
  110. pop %rdi
  111. pop %rsi
  112. pop %rdx
  113. pop %rcx
  114. pop %rax
  115. ret
  116. #endif
  117. ENTRY(xen_adjust_exception_frame)
  118. mov 8+0(%rsp),%rcx
  119. mov 8+8(%rsp),%r11
  120. ret $16
  121. ENTRY(xen_iret)
  122. pushq $0
  123. jmp hypercall_page + __HYPERVISOR_iret * 32
  124. ENTRY(xen_sysexit)
  125. ud2a