ipi.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #ifndef ASM_X86__IPI_H
  2. #define ASM_X86__IPI_H
  3. /*
  4. * Copyright 2004 James Cleverdon, IBM.
  5. * Subject to the GNU Public License, v.2
  6. *
  7. * Generic APIC InterProcessor Interrupt code.
  8. *
  9. * Moved to include file by James Cleverdon from
  10. * arch/x86-64/kernel/smp.c
  11. *
  12. * Copyrights from kernel/smp.c:
  13. *
  14. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  15. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  16. * (c) 2002,2003 Andi Kleen, SuSE Labs.
  17. * Subject to the GNU Public License, v.2
  18. */
  19. #include <asm/hw_irq.h>
  20. #include <asm/apic.h>
  21. #include <asm/smp.h>
  22. /*
  23. * the following functions deal with sending IPIs between CPUs.
  24. *
  25. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  26. */
  27. static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
  28. unsigned int dest)
  29. {
  30. unsigned int icr = shortcut | dest;
  31. switch (vector) {
  32. default:
  33. icr |= APIC_DM_FIXED | vector;
  34. break;
  35. case NMI_VECTOR:
  36. icr |= APIC_DM_NMI;
  37. break;
  38. }
  39. return icr;
  40. }
  41. static inline int __prepare_ICR2(unsigned int mask)
  42. {
  43. return SET_APIC_DEST_FIELD(mask);
  44. }
  45. static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
  46. unsigned int dest)
  47. {
  48. /*
  49. * Subtle. In the case of the 'never do double writes' workaround
  50. * we have to lock out interrupts to be safe. As we don't care
  51. * of the value read we use an atomic rmw access to avoid costly
  52. * cli/sti. Otherwise we use an even cheaper single atomic write
  53. * to the APIC.
  54. */
  55. unsigned int cfg;
  56. /*
  57. * Wait for idle.
  58. */
  59. apic_wait_icr_idle();
  60. /*
  61. * No need to touch the target chip field
  62. */
  63. cfg = __prepare_ICR(shortcut, vector, dest);
  64. /*
  65. * Send the IPI. The write to APIC_ICR fires this off.
  66. */
  67. apic_write(APIC_ICR, cfg);
  68. }
  69. /*
  70. * This is used to send an IPI with no shorthand notation (the destination is
  71. * specified in bits 56 to 63 of the ICR).
  72. */
  73. static inline void __send_IPI_dest_field(unsigned int mask, int vector,
  74. unsigned int dest)
  75. {
  76. unsigned long cfg;
  77. /*
  78. * Wait for idle.
  79. */
  80. if (unlikely(vector == NMI_VECTOR))
  81. safe_apic_wait_icr_idle();
  82. else
  83. apic_wait_icr_idle();
  84. /*
  85. * prepare target chip field
  86. */
  87. cfg = __prepare_ICR2(mask);
  88. apic_write(APIC_ICR2, cfg);
  89. /*
  90. * program the ICR
  91. */
  92. cfg = __prepare_ICR(0, vector, dest);
  93. /*
  94. * Send the IPI. The write to APIC_ICR fires this off.
  95. */
  96. apic_write(APIC_ICR, cfg);
  97. }
  98. static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
  99. {
  100. unsigned long flags;
  101. unsigned long query_cpu;
  102. /*
  103. * Hack. The clustered APIC addressing mode doesn't allow us to send
  104. * to an arbitrary mask, so I do a unicast to each CPU instead.
  105. * - mbligh
  106. */
  107. local_irq_save(flags);
  108. for_each_cpu_mask_nr(query_cpu, mask) {
  109. __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
  110. vector, APIC_DEST_PHYSICAL);
  111. }
  112. local_irq_restore(flags);
  113. }
  114. #endif /* ASM_X86__IPI_H */