ipi.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #include <linux/cpumask.h>
  2. #include <linux/interrupt.h>
  3. #include <linux/init.h>
  4. #include <linux/mm.h>
  5. #include <linux/delay.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/kernel_stat.h>
  8. #include <linux/mc146818rtc.h>
  9. #include <linux/cache.h>
  10. #include <linux/cpu.h>
  11. #include <linux/module.h>
  12. #include <asm/smp.h>
  13. #include <asm/mtrr.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/apic.h>
  17. #include <asm/proto.h>
  18. #ifdef CONFIG_X86_32
  19. #include <mach_apic.h>
  20. /*
  21. * the following functions deal with sending IPIs between CPUs.
  22. *
  23. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  24. */
  25. static inline int __prepare_ICR(unsigned int shortcut, int vector)
  26. {
  27. unsigned int icr = shortcut | APIC_DEST_LOGICAL;
  28. switch (vector) {
  29. default:
  30. icr |= APIC_DM_FIXED | vector;
  31. break;
  32. case NMI_VECTOR:
  33. icr |= APIC_DM_NMI;
  34. break;
  35. }
  36. return icr;
  37. }
  38. static inline int __prepare_ICR2(unsigned int mask)
  39. {
  40. return SET_APIC_DEST_FIELD(mask);
  41. }
  42. void __send_IPI_shortcut(unsigned int shortcut, int vector)
  43. {
  44. /*
  45. * Subtle. In the case of the 'never do double writes' workaround
  46. * we have to lock out interrupts to be safe. As we don't care
  47. * of the value read we use an atomic rmw access to avoid costly
  48. * cli/sti. Otherwise we use an even cheaper single atomic write
  49. * to the APIC.
  50. */
  51. unsigned int cfg;
  52. /*
  53. * Wait for idle.
  54. */
  55. apic_wait_icr_idle();
  56. /*
  57. * No need to touch the target chip field
  58. */
  59. cfg = __prepare_ICR(shortcut, vector);
  60. /*
  61. * Send the IPI. The write to APIC_ICR fires this off.
  62. */
  63. apic_write_around(APIC_ICR, cfg);
  64. }
  65. void send_IPI_self(int vector)
  66. {
  67. __send_IPI_shortcut(APIC_DEST_SELF, vector);
  68. }
  69. /*
  70. * This is used to send an IPI with no shorthand notation (the destination is
  71. * specified in bits 56 to 63 of the ICR).
  72. */
  73. static inline void __send_IPI_dest_field(unsigned long mask, int vector)
  74. {
  75. unsigned long cfg;
  76. /*
  77. * Wait for idle.
  78. */
  79. if (unlikely(vector == NMI_VECTOR))
  80. safe_apic_wait_icr_idle();
  81. else
  82. apic_wait_icr_idle();
  83. /*
  84. * prepare target chip field
  85. */
  86. cfg = __prepare_ICR2(mask);
  87. apic_write_around(APIC_ICR2, cfg);
  88. /*
  89. * program the ICR
  90. */
  91. cfg = __prepare_ICR(0, vector);
  92. /*
  93. * Send the IPI. The write to APIC_ICR fires this off.
  94. */
  95. apic_write_around(APIC_ICR, cfg);
  96. }
  97. /*
  98. * This is only used on smaller machines.
  99. */
  100. void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
  101. {
  102. unsigned long mask = cpus_addr(cpumask)[0];
  103. unsigned long flags;
  104. local_irq_save(flags);
  105. WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
  106. __send_IPI_dest_field(mask, vector);
  107. local_irq_restore(flags);
  108. }
  109. void send_IPI_mask_sequence(cpumask_t mask, int vector)
  110. {
  111. unsigned long flags;
  112. unsigned int query_cpu;
  113. /*
  114. * Hack. The clustered APIC addressing mode doesn't allow us to send
  115. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  116. * should be modified to do 1 message per cluster ID - mbligh
  117. */
  118. local_irq_save(flags);
  119. for_each_possible_cpu(query_cpu) {
  120. if (cpu_isset(query_cpu, mask)) {
  121. __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
  122. vector);
  123. }
  124. }
  125. local_irq_restore(flags);
  126. }
  127. /* must come after the send_IPI functions above for inlining */
  128. #include <mach_ipi.h>
  129. static int convert_apicid_to_cpu(int apic_id)
  130. {
  131. int i;
  132. for_each_possible_cpu(i) {
  133. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  134. return i;
  135. }
  136. return -1;
  137. }
  138. int safe_smp_processor_id(void)
  139. {
  140. int apicid, cpuid;
  141. if (!boot_cpu_has(X86_FEATURE_APIC))
  142. return 0;
  143. apicid = hard_smp_processor_id();
  144. if (apicid == BAD_APICID)
  145. return 0;
  146. cpuid = convert_apicid_to_cpu(apicid);
  147. return cpuid >= 0 ? cpuid : 0;
  148. }
  149. #endif