ipi.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #include <linux/cpumask.h>
  2. #include <linux/interrupt.h>
  3. #include <linux/init.h>
  4. #include <linux/mm.h>
  5. #include <linux/delay.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/kernel_stat.h>
  8. #include <linux/mc146818rtc.h>
  9. #include <linux/cache.h>
  10. #include <linux/cpu.h>
  11. #include <linux/module.h>
  12. #include <asm/smp.h>
  13. #include <asm/mtrr.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/apic.h>
  17. #include <asm/proto.h>
  18. #ifdef CONFIG_X86_32
  19. #include <mach_apic.h>
  20. #include <mach_ipi.h>
  21. /*
  22. * the following functions deal with sending IPIs between CPUs.
  23. *
  24. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  25. */
  26. static inline int __prepare_ICR(unsigned int shortcut, int vector)
  27. {
  28. unsigned int icr = shortcut | APIC_DEST_LOGICAL;
  29. switch (vector) {
  30. default:
  31. icr |= APIC_DM_FIXED | vector;
  32. break;
  33. case NMI_VECTOR:
  34. icr |= APIC_DM_NMI;
  35. break;
  36. }
  37. return icr;
  38. }
  39. static inline int __prepare_ICR2(unsigned int mask)
  40. {
  41. return SET_APIC_DEST_FIELD(mask);
  42. }
  43. void __send_IPI_shortcut(unsigned int shortcut, int vector)
  44. {
  45. /*
  46. * Subtle. In the case of the 'never do double writes' workaround
  47. * we have to lock out interrupts to be safe. As we don't care
  48. * of the value read we use an atomic rmw access to avoid costly
  49. * cli/sti. Otherwise we use an even cheaper single atomic write
  50. * to the APIC.
  51. */
  52. unsigned int cfg;
  53. /*
  54. * Wait for idle.
  55. */
  56. apic_wait_icr_idle();
  57. /*
  58. * No need to touch the target chip field
  59. */
  60. cfg = __prepare_ICR(shortcut, vector);
  61. /*
  62. * Send the IPI. The write to APIC_ICR fires this off.
  63. */
  64. apic_write(APIC_ICR, cfg);
  65. }
  66. void send_IPI_self(int vector)
  67. {
  68. __send_IPI_shortcut(APIC_DEST_SELF, vector);
  69. }
  70. /*
  71. * This is used to send an IPI with no shorthand notation (the destination is
  72. * specified in bits 56 to 63 of the ICR).
  73. */
  74. static inline void __send_IPI_dest_field(unsigned long mask, int vector)
  75. {
  76. unsigned long cfg;
  77. /*
  78. * Wait for idle.
  79. */
  80. if (unlikely(vector == NMI_VECTOR))
  81. safe_apic_wait_icr_idle();
  82. else
  83. apic_wait_icr_idle();
  84. /*
  85. * prepare target chip field
  86. */
  87. cfg = __prepare_ICR2(mask);
  88. apic_write(APIC_ICR2, cfg);
  89. /*
  90. * program the ICR
  91. */
  92. cfg = __prepare_ICR(0, vector);
  93. /*
  94. * Send the IPI. The write to APIC_ICR fires this off.
  95. */
  96. apic_write(APIC_ICR, cfg);
  97. }
  98. /*
  99. * This is only used on smaller machines.
  100. */
  101. void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
  102. {
  103. unsigned long mask = cpumask_bits(cpumask)[0];
  104. unsigned long flags;
  105. local_irq_save(flags);
  106. WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
  107. __send_IPI_dest_field(mask, vector);
  108. local_irq_restore(flags);
  109. }
  110. void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
  111. {
  112. unsigned long flags;
  113. unsigned int query_cpu;
  114. /*
  115. * Hack. The clustered APIC addressing mode doesn't allow us to send
  116. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  117. * should be modified to do 1 message per cluster ID - mbligh
  118. */
  119. local_irq_save(flags);
  120. for_each_cpu(query_cpu, mask)
  121. __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
  122. local_irq_restore(flags);
  123. }
  124. void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  125. {
  126. unsigned long flags;
  127. unsigned int query_cpu;
  128. unsigned int this_cpu = smp_processor_id();
  129. /* See Hack comment above */
  130. local_irq_save(flags);
  131. for_each_cpu(query_cpu, mask)
  132. if (query_cpu != this_cpu)
  133. __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
  134. vector);
  135. local_irq_restore(flags);
  136. }
  137. /* must come after the send_IPI functions above for inlining */
  138. static int convert_apicid_to_cpu(int apic_id)
  139. {
  140. int i;
  141. for_each_possible_cpu(i) {
  142. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  143. return i;
  144. }
  145. return -1;
  146. }
  147. int safe_smp_processor_id(void)
  148. {
  149. int apicid, cpuid;
  150. if (!boot_cpu_has(X86_FEATURE_APIC))
  151. return 0;
  152. apicid = hard_smp_processor_id();
  153. if (apicid == BAD_APICID)
  154. return 0;
  155. cpuid = convert_apicid_to_cpu(apicid);
  156. return cpuid >= 0 ? cpuid : 0;
  157. }
  158. #endif