ipi.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #ifndef __ASM_IPI_H
  2. #define __ASM_IPI_H
  3. /*
  4. * Copyright 2004 James Cleverdon, IBM.
  5. * Subject to the GNU Public License, v.2
  6. *
  7. * Generic APIC InterProcessor Interrupt code.
  8. *
  9. * Moved to include file by James Cleverdon from
  10. * arch/x86-64/kernel/smp.c
  11. *
  12. * Copyrights from kernel/smp.c:
  13. *
  14. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  15. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  16. * (c) 2002,2003 Andi Kleen, SuSE Labs.
  17. * Subject to the GNU Public License, v.2
  18. */
  19. #include <asm/fixmap.h>
  20. #include <asm/hw_irq.h>
  21. #include <asm/apicdef.h>
  22. #include <asm/genapic.h>
  23. /*
  24. * the following functions deal with sending IPIs between CPUs.
  25. *
  26. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  27. */
  28. static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest)
  29. {
  30. unsigned int icr = shortcut | dest;
  31. switch (vector) {
  32. default:
  33. icr |= APIC_DM_FIXED | vector;
  34. break;
  35. case NMI_VECTOR:
  36. /*
  37. * Setup KDB IPI to be delivered as an NMI
  38. */
  39. case KDB_VECTOR:
  40. icr |= APIC_DM_NMI;
  41. break;
  42. }
  43. return icr;
  44. }
  45. static inline int __prepare_ICR2 (unsigned int mask)
  46. {
  47. return SET_APIC_DEST_FIELD(mask);
  48. }
  49. static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
  50. {
  51. /*
  52. * Subtle. In the case of the 'never do double writes' workaround
  53. * we have to lock out interrupts to be safe. As we don't care
  54. * of the value read we use an atomic rmw access to avoid costly
  55. * cli/sti. Otherwise we use an even cheaper single atomic write
  56. * to the APIC.
  57. */
  58. unsigned int cfg;
  59. /*
  60. * Wait for idle.
  61. */
  62. apic_wait_icr_idle();
  63. /*
  64. * No need to touch the target chip field
  65. */
  66. cfg = __prepare_ICR(shortcut, vector, dest);
  67. /*
  68. * Send the IPI. The write to APIC_ICR fires this off.
  69. */
  70. apic_write(APIC_ICR, cfg);
  71. }
  72. static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
  73. {
  74. unsigned long cfg, flags;
  75. unsigned long query_cpu;
  76. /*
  77. * Hack. The clustered APIC addressing mode doesn't allow us to send
  78. * to an arbitrary mask, so I do a unicast to each CPU instead.
  79. * - mbligh
  80. */
  81. local_irq_save(flags);
  82. for_each_cpu_mask(query_cpu, mask) {
  83. /*
  84. * Wait for idle.
  85. */
  86. apic_wait_icr_idle();
  87. /*
  88. * prepare target chip field
  89. */
  90. cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]);
  91. apic_write(APIC_ICR2, cfg);
  92. /*
  93. * program the ICR
  94. */
  95. cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL);
  96. /*
  97. * Send the IPI. The write to APIC_ICR fires this off.
  98. */
  99. apic_write(APIC_ICR, cfg);
  100. }
  101. local_irq_restore(flags);
  102. }
  103. #endif /* __ASM_IPI_H */