xen-asm.S 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /*
  2. Asm versions of Xen pv-ops, suitable for either direct use or inlining.
  3. The inline versions are the same as the direct-use versions, with the
  4. pre- and post-amble chopped off.
  5. This code is encoded for size rather than absolute efficiency,
  6. with a view to being able to inline as much as possible.
  7. We only bother with direct forms (ie, vcpu in pda) of the operations
  8. here; the indirect forms are better handled in C, since they're
  9. generally too large to inline anyway.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/percpu.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/processor-flags.h>
  17. #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
  18. #define ENDPATCH(x) .globl x##_end; x##_end=.
  19. /*
  20. Enable events. This clears the event mask and tests the pending
  21. event status with one and operation. If there are pending
  22. events, then enter the hypervisor to get them handled.
  23. */
  24. ENTRY(xen_irq_enable_direct)
  25. /* Clear mask and test pending */
  26. andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
  27. /* Preempt here doesn't matter because that will deal with
  28. any pending interrupts. The pending check may end up being
  29. run on the wrong CPU, but that doesn't hurt. */
  30. jz 1f
  31. 2: call check_events
  32. 1:
  33. ENDPATCH(xen_irq_enable_direct)
  34. ret
  35. ENDPROC(xen_irq_enable_direct)
  36. RELOC(xen_irq_enable_direct, 2b+1)
  37. /*
  38. Disabling events is simply a matter of making the event mask
  39. non-zero.
  40. */
  41. ENTRY(xen_irq_disable_direct)
  42. movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  43. ENDPATCH(xen_irq_disable_direct)
  44. ret
  45. ENDPROC(xen_irq_disable_direct)
  46. RELOC(xen_irq_disable_direct, 0)
  47. /*
  48. (xen_)save_fl is used to get the current interrupt enable status.
  49. Callers expect the status to be in X86_EFLAGS_IF, and other bits
  50. may be set in the return value. We take advantage of this by
  51. making sure that X86_EFLAGS_IF has the right value (and other bits
  52. in that byte are 0), but other bits in the return value are
  53. undefined. We need to toggle the state of the bit, because
  54. Xen and x86 use opposite senses (mask vs enable).
  55. */
  56. ENTRY(xen_save_fl_direct)
  57. testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  58. setz %ah
  59. addb %ah,%ah
  60. ENDPATCH(xen_save_fl_direct)
  61. ret
  62. ENDPROC(xen_save_fl_direct)
  63. RELOC(xen_save_fl_direct, 0)
  64. /*
  65. In principle the caller should be passing us a value return
  66. from xen_save_fl_direct, but for robustness sake we test only
  67. the X86_EFLAGS_IF flag rather than the whole byte. After
  68. setting the interrupt mask state, it checks for unmasked
  69. pending events and enters the hypervisor to get them delivered
  70. if so.
  71. */
  72. ENTRY(xen_restore_fl_direct)
  73. testb $X86_EFLAGS_IF>>8, %ah
  74. setz %al
  75. movb %al, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
  76. /* Preempt here doesn't matter because that will deal with
  77. any pending interrupts. The pending check may end up being
  78. run on the wrong CPU, but that doesn't hurt. */
  79. /* check for pending but unmasked */
  80. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
  81. jz 1f
  82. 2: call check_events
  83. 1:
  84. ENDPATCH(xen_restore_fl_direct)
  85. ret
  86. ENDPROC(xen_restore_fl_direct)
  87. RELOC(xen_restore_fl_direct, 2b+1)
  88. /*
  89. Force an event check by making a hypercall,
  90. but preserve regs before making the call.
  91. */
  92. check_events:
  93. push %eax
  94. push %ecx
  95. push %edx
  96. call force_evtchn_callback
  97. pop %edx
  98. pop %ecx
  99. pop %eax
  100. ret