xen-asm.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in percpu data) of
  8. the operations here; the indirect forms are better handled in
  9. C, since they're generally too large to inline anyway.
  10. */
  11. #include <asm/asm-offsets.h>
  12. #include <asm/percpu.h>
  13. #include <asm/processor-flags.h>
  14. #include "xen-asm.h"
  15. /*
  16. Enable events. This clears the event mask and tests the pending
  17. event status with one and operation. If there are pending
  18. events, then enter the hypervisor to get them handled.
  19. */
  20. ENTRY(xen_irq_enable_direct)
  21. /* Unmask events */
  22. movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  23. /* Preempt here doesn't matter because that will deal with
  24. any pending interrupts. The pending check may end up being
  25. run on the wrong CPU, but that doesn't hurt. */
  26. /* Test for pending */
  27. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  28. jz 1f
  29. 2: call check_events
  30. 1:
  31. ENDPATCH(xen_irq_enable_direct)
  32. ret
  33. ENDPROC(xen_irq_enable_direct)
  34. RELOC(xen_irq_enable_direct, 2b+1)
  35. /*
  36. Disabling events is simply a matter of making the event mask
  37. non-zero.
  38. */
  39. ENTRY(xen_irq_disable_direct)
  40. movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  41. ENDPATCH(xen_irq_disable_direct)
  42. ret
  43. ENDPROC(xen_irq_disable_direct)
  44. RELOC(xen_irq_disable_direct, 0)
  45. /*
  46. (xen_)save_fl is used to get the current interrupt enable status.
  47. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  48. may be set in the return value. We take advantage of this by
  49. making sure that X86_EFLAGS_IF has the right value (and other bits
  50. in that byte are 0), but other bits in the return value are
  51. undefined. We need to toggle the state of the bit, because
  52. Xen and x86 use opposite senses (mask vs enable).
  53. */
  54. ENTRY(xen_save_fl_direct)
  55. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  56. setz %ah
  57. addb %ah,%ah
  58. ENDPATCH(xen_save_fl_direct)
  59. ret
  60. ENDPROC(xen_save_fl_direct)
  61. RELOC(xen_save_fl_direct, 0)
  62. /*
  63. In principle the caller should be passing us a value return
  64. from xen_save_fl_direct, but for robustness sake we test only
  65. the X86_EFLAGS_IF flag rather than the whole byte. After
  66. setting the interrupt mask state, it checks for unmasked
  67. pending events and enters the hypervisor to get them delivered
  68. if so.
  69. */
  70. ENTRY(xen_restore_fl_direct)
  71. #ifdef CONFIG_X86_64
  72. testw $X86_EFLAGS_IF, %di
  73. #else
  74. testb $X86_EFLAGS_IF>>8, %ah
  75. #endif
  76. setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  77. /* Preempt here doesn't matter because that will deal with
  78. any pending interrupts. The pending check may end up being
  79. run on the wrong CPU, but that doesn't hurt. */
  80. /* check for unmasked and pending */
  81. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  82. jz 1f
  83. 2: call check_events
  84. 1:
  85. ENDPATCH(xen_restore_fl_direct)
  86. ret
  87. ENDPROC(xen_restore_fl_direct)
  88. RELOC(xen_restore_fl_direct, 2b+1)
  89. /*
  90. Force an event check by making a hypercall,
  91. but preserve regs before making the call.
  92. */
  93. check_events:
  94. #ifdef CONFIG_X86_32
  95. push %eax
  96. push %ecx
  97. push %edx
  98. call xen_force_evtchn_callback
  99. pop %edx
  100. pop %ecx
  101. pop %eax
  102. #else
  103. push %rax
  104. push %rcx
  105. push %rdx
  106. push %rsi
  107. push %rdi
  108. push %r8
  109. push %r9
  110. push %r10
  111. push %r11
  112. call xen_force_evtchn_callback
  113. pop %r11
  114. pop %r10
  115. pop %r9
  116. pop %r8
  117. pop %rdi
  118. pop %rsi
  119. pop %rdx
  120. pop %rcx
  121. pop %rax
  122. #endif
  123. ret