ipi.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _ASM_X86_IPI_H
  2. #define _ASM_X86_IPI_H
  3. /*
  4. * Copyright 2004 James Cleverdon, IBM.
  5. * Subject to the GNU Public License, v.2
  6. *
  7. * Generic APIC InterProcessor Interrupt code.
  8. *
  9. * Moved to include file by James Cleverdon from
  10. * arch/x86-64/kernel/smp.c
  11. *
  12. * Copyrights from kernel/smp.c:
  13. *
  14. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  15. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  16. * (c) 2002,2003 Andi Kleen, SuSE Labs.
  17. * Subject to the GNU Public License, v.2
  18. */
  19. #include <asm/hw_irq.h>
  20. #include <asm/apic.h>
  21. #include <asm/smp.h>
  22. /*
  23. * the following functions deal with sending IPIs between CPUs.
  24. *
  25. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  26. */
  27. static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
  28. unsigned int dest)
  29. {
  30. unsigned int icr = shortcut | dest;
  31. switch (vector) {
  32. default:
  33. icr |= APIC_DM_FIXED | vector;
  34. break;
  35. case NMI_VECTOR:
  36. icr |= APIC_DM_NMI;
  37. break;
  38. }
  39. return icr;
  40. }
  41. static inline int __prepare_ICR2(unsigned int mask)
  42. {
  43. return SET_APIC_DEST_FIELD(mask);
  44. }
  45. static inline void __xapic_wait_icr_idle(void)
  46. {
  47. while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
  48. cpu_relax();
  49. }
  50. static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
  51. unsigned int dest)
  52. {
  53. /*
  54. * Subtle. In the case of the 'never do double writes' workaround
  55. * we have to lock out interrupts to be safe. As we don't care
  56. * of the value read we use an atomic rmw access to avoid costly
  57. * cli/sti. Otherwise we use an even cheaper single atomic write
  58. * to the APIC.
  59. */
  60. unsigned int cfg;
  61. /*
  62. * Wait for idle.
  63. */
  64. __xapic_wait_icr_idle();
  65. /*
  66. * No need to touch the target chip field
  67. */
  68. cfg = __prepare_ICR(shortcut, vector, dest);
  69. /*
  70. * Send the IPI. The write to APIC_ICR fires this off.
  71. */
  72. native_apic_mem_write(APIC_ICR, cfg);
  73. }
  74. /*
  75. * This is used to send an IPI with no shorthand notation (the destination is
  76. * specified in bits 56 to 63 of the ICR).
  77. */
  78. static inline void __send_IPI_dest_field(unsigned int mask, int vector,
  79. unsigned int dest)
  80. {
  81. unsigned long cfg;
  82. /*
  83. * Wait for idle.
  84. */
  85. if (unlikely(vector == NMI_VECTOR))
  86. safe_apic_wait_icr_idle();
  87. else
  88. __xapic_wait_icr_idle();
  89. /*
  90. * prepare target chip field
  91. */
  92. cfg = __prepare_ICR2(mask);
  93. native_apic_mem_write(APIC_ICR2, cfg);
  94. /*
  95. * program the ICR
  96. */
  97. cfg = __prepare_ICR(0, vector, dest);
  98. /*
  99. * Send the IPI. The write to APIC_ICR fires this off.
  100. */
  101. native_apic_mem_write(APIC_ICR, cfg);
  102. }
  103. static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
  104. {
  105. unsigned long flags;
  106. unsigned long query_cpu;
  107. /*
  108. * Hack. The clustered APIC addressing mode doesn't allow us to send
  109. * to an arbitrary mask, so I do a unicast to each CPU instead.
  110. * - mbligh
  111. */
  112. local_irq_save(flags);
  113. for_each_cpu_mask_nr(query_cpu, mask) {
  114. __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
  115. vector, APIC_DEST_PHYSICAL);
  116. }
  117. local_irq_restore(flags);
  118. }
  119. #endif /* _ASM_X86_IPI_H */