x2apic_cluster.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. #include <linux/threads.h>
  2. #include <linux/cpumask.h>
  3. #include <linux/string.h>
  4. #include <linux/kernel.h>
  5. #include <linux/ctype.h>
  6. #include <linux/init.h>
  7. #include <linux/dmar.h>
  8. #include <linux/cpu.h>
  9. #include <asm/smp.h>
  10. #include <asm/apic.h>
  11. #include <asm/ipi.h>
  12. static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
  13. static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
  14. static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
  15. static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  16. {
  17. return x2apic_enabled();
  18. }
  19. /*
  20. * need to use more than cpu 0, because we need more vectors when
  21. * MSI-X are used.
  22. */
  23. static const struct cpumask *x2apic_target_cpus(void)
  24. {
  25. return cpu_online_mask;
  26. }
  27. /*
  28. * for now each logical cpu is in its own vector allocation domain.
  29. */
  30. static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
  31. {
  32. cpumask_clear(retmask);
  33. cpumask_set_cpu(cpu, retmask);
  34. }
  35. static void
  36. __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
  37. {
  38. unsigned long cfg;
  39. cfg = __prepare_ICR(0, vector, dest);
  40. /*
  41. * send the IPI.
  42. */
  43. native_x2apic_icr_write(cfg, apicid);
  44. }
  45. static inline u32 x2apic_cluster(int cpu)
  46. {
  47. return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
  48. }
  49. static void
  50. __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
  51. {
  52. struct cpumask *cpus_in_cluster_ptr;
  53. struct cpumask *ipi_mask_ptr;
  54. unsigned int cpu, this_cpu;
  55. unsigned long flags;
  56. u32 dest;
  57. x2apic_wrmsr_fence();
  58. local_irq_save(flags);
  59. this_cpu = smp_processor_id();
  60. /*
  61. * We are to modify mask, so we need an own copy
  62. * and be sure it's manipulated with irq off.
  63. */
  64. ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
  65. cpumask_copy(ipi_mask_ptr, mask);
  66. /*
  67. * The idea is to send one IPI per cluster.
  68. */
  69. for_each_cpu(cpu, ipi_mask_ptr) {
  70. unsigned long i;
  71. cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
  72. dest = 0;
  73. /* Collect cpus in cluster. */
  74. for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
  75. if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
  76. dest |= per_cpu(x86_cpu_to_logical_apicid, i);
  77. }
  78. if (!dest)
  79. continue;
  80. __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
  81. /*
  82. * Cluster sibling cpus should be discared now so
  83. * we would not send IPI them second time.
  84. */
  85. cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
  86. }
  87. local_irq_restore(flags);
  88. }
  89. static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
  90. {
  91. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
  92. }
  93. static void
  94. x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  95. {
  96. __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
  97. }
  98. static void x2apic_send_IPI_allbutself(int vector)
  99. {
  100. __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
  101. }
  102. static void x2apic_send_IPI_all(int vector)
  103. {
  104. __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
  105. }
  106. static int x2apic_apic_id_registered(void)
  107. {
  108. return 1;
  109. }
  110. static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
  111. {
  112. /*
  113. * We're using fixed IRQ delivery, can only return one logical APIC ID.
  114. * May as well be the first.
  115. */
  116. int cpu = cpumask_first(cpumask);
  117. if ((unsigned)cpu < nr_cpu_ids)
  118. return per_cpu(x86_cpu_to_logical_apicid, cpu);
  119. else
  120. return BAD_APICID;
  121. }
  122. static unsigned int
  123. x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
  124. const struct cpumask *andmask)
  125. {
  126. int cpu;
  127. /*
  128. * We're using fixed IRQ delivery, can only return one logical APIC ID.
  129. * May as well be the first.
  130. */
  131. for_each_cpu_and(cpu, cpumask, andmask) {
  132. if (cpumask_test_cpu(cpu, cpu_online_mask))
  133. break;
  134. }
  135. return per_cpu(x86_cpu_to_logical_apicid, cpu);
  136. }
  137. static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
  138. {
  139. unsigned int id;
  140. id = x;
  141. return id;
  142. }
  143. static unsigned long set_apic_id(unsigned int id)
  144. {
  145. unsigned long x;
  146. x = id;
  147. return x;
  148. }
  149. static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
  150. {
  151. return initial_apicid >> index_msb;
  152. }
  153. static void x2apic_send_IPI_self(int vector)
  154. {
  155. apic_write(APIC_SELF_IPI, vector);
  156. }
  157. static void init_x2apic_ldr(void)
  158. {
  159. unsigned int this_cpu = smp_processor_id();
  160. unsigned int cpu;
  161. per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
  162. __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
  163. for_each_online_cpu(cpu) {
  164. if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
  165. continue;
  166. __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
  167. __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
  168. }
  169. }
  170. /*
  171. * At CPU state changes, update the x2apic cluster sibling info.
  172. */
  173. static int __cpuinit
  174. update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
  175. {
  176. unsigned int this_cpu = (unsigned long)hcpu;
  177. unsigned int cpu;
  178. int err = 0;
  179. switch (action) {
  180. case CPU_UP_PREPARE:
  181. if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
  182. GFP_KERNEL)) {
  183. err = -ENOMEM;
  184. } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
  185. GFP_KERNEL)) {
  186. free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
  187. err = -ENOMEM;
  188. }
  189. break;
  190. case CPU_UP_CANCELED:
  191. case CPU_UP_CANCELED_FROZEN:
  192. case CPU_DEAD:
  193. for_each_online_cpu(cpu) {
  194. if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
  195. continue;
  196. __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
  197. __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
  198. }
  199. free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
  200. free_cpumask_var(per_cpu(ipi_mask, this_cpu));
  201. break;
  202. }
  203. return notifier_from_errno(err);
  204. }
  205. static struct notifier_block __refdata x2apic_cpu_notifier = {
  206. .notifier_call = update_clusterinfo,
  207. };
  208. static int x2apic_init_cpu_notifier(void)
  209. {
  210. int cpu = smp_processor_id();
  211. zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
  212. zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
  213. BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
  214. __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
  215. register_hotcpu_notifier(&x2apic_cpu_notifier);
  216. return 1;
  217. }
  218. static int x2apic_cluster_probe(void)
  219. {
  220. if (x2apic_mode)
  221. return x2apic_init_cpu_notifier();
  222. else
  223. return 0;
  224. }
  225. struct apic apic_x2apic_cluster = {
  226. .name = "cluster x2apic",
  227. .probe = x2apic_cluster_probe,
  228. .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
  229. .apic_id_registered = x2apic_apic_id_registered,
  230. .irq_delivery_mode = dest_LowestPrio,
  231. .irq_dest_mode = 1, /* logical */
  232. .target_cpus = x2apic_target_cpus,
  233. .disable_esr = 0,
  234. .dest_logical = APIC_DEST_LOGICAL,
  235. .check_apicid_used = NULL,
  236. .check_apicid_present = NULL,
  237. .vector_allocation_domain = x2apic_vector_allocation_domain,
  238. .init_apic_ldr = init_x2apic_ldr,
  239. .ioapic_phys_id_map = NULL,
  240. .setup_apic_routing = NULL,
  241. .multi_timer_check = NULL,
  242. .cpu_present_to_apicid = default_cpu_present_to_apicid,
  243. .apicid_to_cpu_present = NULL,
  244. .setup_portio_remap = NULL,
  245. .check_phys_apicid_present = default_check_phys_apicid_present,
  246. .enable_apic_mode = NULL,
  247. .phys_pkg_id = x2apic_cluster_phys_pkg_id,
  248. .mps_oem_check = NULL,
  249. .get_apic_id = x2apic_cluster_phys_get_apic_id,
  250. .set_apic_id = set_apic_id,
  251. .apic_id_mask = 0xFFFFFFFFu,
  252. .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
  253. .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
  254. .send_IPI_mask = x2apic_send_IPI_mask,
  255. .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
  256. .send_IPI_allbutself = x2apic_send_IPI_allbutself,
  257. .send_IPI_all = x2apic_send_IPI_all,
  258. .send_IPI_self = x2apic_send_IPI_self,
  259. .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
  260. .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
  261. .wait_for_init_deassert = NULL,
  262. .smp_callin_clear_local_apic = NULL,
  263. .inquire_remote_apic = NULL,
  264. .read = native_apic_msr_read,
  265. .write = native_apic_msr_write,
  266. .icr_read = native_x2apic_icr_read,
  267. .icr_write = native_x2apic_icr_write,
  268. .wait_icr_idle = native_x2apic_wait_icr_idle,
  269. .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
  270. };