irq.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #include <linux/hardirq.h>
  2. #include <xen/interface/xen.h>
  3. #include <xen/interface/sched.h>
  4. #include <xen/interface/vcpu.h>
  5. #include <asm/xen/hypercall.h>
  6. #include <asm/xen/hypervisor.h>
  7. #include "xen-ops.h"
  8. /*
  9. * Force a proper event-channel callback from Xen after clearing the
  10. * callback mask. We do this in a very simple manner, by making a call
  11. * down into Xen. The pending flag will be checked by Xen on return.
  12. */
  13. void xen_force_evtchn_callback(void)
  14. {
  15. (void)HYPERVISOR_xen_version(0, NULL);
  16. }
  17. static void __init __xen_init_IRQ(void)
  18. {
  19. #ifdef CONFIG_X86_64
  20. int i;
  21. /* Create identity vector->irq map */
  22. for(i = 0; i < NR_VECTORS; i++) {
  23. int cpu;
  24. for_each_possible_cpu(cpu)
  25. per_cpu(vector_irq, cpu)[i] = i;
  26. }
  27. #endif /* CONFIG_X86_64 */
  28. xen_init_IRQ();
  29. }
  30. static unsigned long xen_save_fl(void)
  31. {
  32. struct vcpu_info *vcpu;
  33. unsigned long flags;
  34. vcpu = x86_read_percpu(xen_vcpu);
  35. /* flag has opposite sense of mask */
  36. flags = !vcpu->evtchn_upcall_mask;
  37. /* convert to IF type flag
  38. -0 -> 0x00000000
  39. -1 -> 0xffffffff
  40. */
  41. return (-flags) & X86_EFLAGS_IF;
  42. }
  43. static void xen_restore_fl(unsigned long flags)
  44. {
  45. struct vcpu_info *vcpu;
  46. /* convert from IF type flag */
  47. flags = !(flags & X86_EFLAGS_IF);
  48. /* There's a one instruction preempt window here. We need to
  49. make sure we're don't switch CPUs between getting the vcpu
  50. pointer and updating the mask. */
  51. preempt_disable();
  52. vcpu = x86_read_percpu(xen_vcpu);
  53. vcpu->evtchn_upcall_mask = flags;
  54. preempt_enable_no_resched();
  55. /* Doesn't matter if we get preempted here, because any
  56. pending event will get dealt with anyway. */
  57. if (flags == 0) {
  58. preempt_check_resched();
  59. barrier(); /* unmask then check (avoid races) */
  60. if (unlikely(vcpu->evtchn_upcall_pending))
  61. xen_force_evtchn_callback();
  62. }
  63. }
  64. static void xen_irq_disable(void)
  65. {
  66. /* There's a one instruction preempt window here. We need to
  67. make sure we're don't switch CPUs between getting the vcpu
  68. pointer and updating the mask. */
  69. preempt_disable();
  70. x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
  71. preempt_enable_no_resched();
  72. }
  73. static void xen_irq_enable(void)
  74. {
  75. struct vcpu_info *vcpu;
  76. /* We don't need to worry about being preempted here, since
  77. either a) interrupts are disabled, so no preemption, or b)
  78. the caller is confused and is trying to re-enable interrupts
  79. on an indeterminate processor. */
  80. vcpu = x86_read_percpu(xen_vcpu);
  81. vcpu->evtchn_upcall_mask = 0;
  82. /* Doesn't matter if we get preempted here, because any
  83. pending event will get dealt with anyway. */
  84. barrier(); /* unmask then check (avoid races) */
  85. if (unlikely(vcpu->evtchn_upcall_pending))
  86. xen_force_evtchn_callback();
  87. }
  88. static void xen_safe_halt(void)
  89. {
  90. /* Blocking includes an implicit local_irq_enable(). */
  91. if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
  92. BUG();
  93. }
  94. static void xen_halt(void)
  95. {
  96. if (irqs_disabled())
  97. HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
  98. else
  99. xen_safe_halt();
  100. }
  101. static const struct pv_irq_ops xen_irq_ops __initdata = {
  102. .init_IRQ = __xen_init_IRQ,
  103. .save_fl = xen_save_fl,
  104. .restore_fl = xen_restore_fl,
  105. .irq_disable = xen_irq_disable,
  106. .irq_enable = xen_irq_enable,
  107. .safe_halt = xen_safe_halt,
  108. .halt = xen_halt,
  109. #ifdef CONFIG_X86_64
  110. .adjust_exception_frame = xen_adjust_exception_frame,
  111. #endif
  112. };
  113. void __init xen_init_irq_ops()
  114. {
  115. pv_irq_ops = xen_irq_ops;
  116. }