smp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /*
  2. * SMP support for ppc.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
  5. * deal of code from the sparc and intel versions.
  6. *
  7. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  8. *
  9. * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  10. * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #undef DEBUG
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <linux/init.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/cache.h>
  27. #include <linux/err.h>
  28. #include <linux/sysdev.h>
  29. #include <linux/cpu.h>
  30. #include <linux/notifier.h>
  31. #include <linux/topology.h>
  32. #include <asm/ptrace.h>
  33. #include <asm/atomic.h>
  34. #include <asm/irq.h>
  35. #include <asm/page.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/prom.h>
  38. #include <asm/smp.h>
  39. #include <asm/time.h>
  40. #include <asm/machdep.h>
  41. #include <asm/cputhreads.h>
  42. #include <asm/cputable.h>
  43. #include <asm/system.h>
  44. #include <asm/mpic.h>
  45. #include <asm/vdso_datapage.h>
  46. #ifdef CONFIG_PPC64
  47. #include <asm/paca.h>
  48. #endif
  49. #ifdef DEBUG
  50. #include <asm/udbg.h>
  51. #define DBG(fmt...) udbg_printf(fmt)
  52. #else
  53. #define DBG(fmt...)
  54. #endif
  55. struct thread_info *secondary_ti;
  56. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  57. DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
  58. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  59. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  60. /* SMP operations for this machine */
  61. struct smp_ops_t *smp_ops;
  62. /* Can't be static due to PowerMac hackery */
  63. volatile unsigned int cpu_callin_map[NR_CPUS];
  64. int smt_enabled_at_boot = 1;
  65. static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
  66. #ifdef CONFIG_PPC64
  67. void __devinit smp_generic_kick_cpu(int nr)
  68. {
  69. BUG_ON(nr < 0 || nr >= NR_CPUS);
  70. /*
  71. * The processor is currently spinning, waiting for the
  72. * cpu_start field to become non-zero After we set cpu_start,
  73. * the processor will continue on to secondary_start
  74. */
  75. paca[nr].cpu_start = 1;
  76. smp_mb();
  77. }
  78. #endif
  79. void smp_message_recv(int msg)
  80. {
  81. switch(msg) {
  82. case PPC_MSG_CALL_FUNCTION:
  83. generic_smp_call_function_interrupt();
  84. break;
  85. case PPC_MSG_RESCHEDULE:
  86. /* we notice need_resched on exit */
  87. break;
  88. case PPC_MSG_CALL_FUNC_SINGLE:
  89. generic_smp_call_function_single_interrupt();
  90. break;
  91. case PPC_MSG_DEBUGGER_BREAK:
  92. if (crash_ipi_function_ptr) {
  93. crash_ipi_function_ptr(get_irq_regs());
  94. break;
  95. }
  96. #ifdef CONFIG_DEBUGGER
  97. debugger_ipi(get_irq_regs());
  98. break;
  99. #endif /* CONFIG_DEBUGGER */
  100. /* FALLTHROUGH */
  101. default:
  102. printk("SMP %d: smp_message_recv(): unknown msg %d\n",
  103. smp_processor_id(), msg);
  104. break;
  105. }
  106. }
  107. static irqreturn_t call_function_action(int irq, void *data)
  108. {
  109. generic_smp_call_function_interrupt();
  110. return IRQ_HANDLED;
  111. }
  112. static irqreturn_t reschedule_action(int irq, void *data)
  113. {
  114. /* we just need the return path side effect of checking need_resched */
  115. return IRQ_HANDLED;
  116. }
  117. static irqreturn_t call_function_single_action(int irq, void *data)
  118. {
  119. generic_smp_call_function_single_interrupt();
  120. return IRQ_HANDLED;
  121. }
  122. static irqreturn_t debug_ipi_action(int irq, void *data)
  123. {
  124. smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
  125. return IRQ_HANDLED;
  126. }
  127. static irq_handler_t smp_ipi_action[] = {
  128. [PPC_MSG_CALL_FUNCTION] = call_function_action,
  129. [PPC_MSG_RESCHEDULE] = reschedule_action,
  130. [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
  131. [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
  132. };
  133. const char *smp_ipi_name[] = {
  134. [PPC_MSG_CALL_FUNCTION] = "ipi call function",
  135. [PPC_MSG_RESCHEDULE] = "ipi reschedule",
  136. [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
  137. [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
  138. };
  139. /* optional function to request ipi, for controllers with >= 4 ipis */
  140. int smp_request_message_ipi(int virq, int msg)
  141. {
  142. int err;
  143. if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
  144. return -EINVAL;
  145. }
  146. #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
  147. if (msg == PPC_MSG_DEBUGGER_BREAK) {
  148. return 1;
  149. }
  150. #endif
  151. err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
  152. smp_ipi_name[msg], 0);
  153. WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
  154. virq, smp_ipi_name[msg], err);
  155. return err;
  156. }
  157. void smp_send_reschedule(int cpu)
  158. {
  159. if (likely(smp_ops))
  160. smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
  161. }
  162. void arch_send_call_function_single_ipi(int cpu)
  163. {
  164. smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
  165. }
  166. void arch_send_call_function_ipi(cpumask_t mask)
  167. {
  168. unsigned int cpu;
  169. for_each_cpu_mask(cpu, mask)
  170. smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
  171. }
  172. #ifdef CONFIG_DEBUGGER
  173. void smp_send_debugger_break(int cpu)
  174. {
  175. if (likely(smp_ops))
  176. smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
  177. }
  178. #endif
  179. #ifdef CONFIG_KEXEC
  180. void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
  181. {
  182. crash_ipi_function_ptr = crash_ipi_callback;
  183. if (crash_ipi_callback && smp_ops) {
  184. mb();
  185. smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
  186. }
  187. }
  188. #endif
  189. static void stop_this_cpu(void *dummy)
  190. {
  191. local_irq_disable();
  192. while (1)
  193. ;
  194. }
  195. void smp_send_stop(void)
  196. {
  197. smp_call_function(stop_this_cpu, NULL, 0);
  198. }
  199. struct thread_info *current_set[NR_CPUS];
  200. static void __devinit smp_store_cpu_info(int id)
  201. {
  202. per_cpu(pvr, id) = mfspr(SPRN_PVR);
  203. }
  204. static void __init smp_create_idle(unsigned int cpu)
  205. {
  206. struct task_struct *p;
  207. /* create a process for the processor */
  208. p = fork_idle(cpu);
  209. if (IS_ERR(p))
  210. panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
  211. #ifdef CONFIG_PPC64
  212. paca[cpu].__current = p;
  213. paca[cpu].kstack = (unsigned long) task_thread_info(p)
  214. + THREAD_SIZE - STACK_FRAME_OVERHEAD;
  215. #endif
  216. current_set[cpu] = task_thread_info(p);
  217. task_thread_info(p)->cpu = cpu;
  218. }
  219. void __init smp_prepare_cpus(unsigned int max_cpus)
  220. {
  221. unsigned int cpu;
  222. DBG("smp_prepare_cpus\n");
  223. /*
  224. * setup_cpu may need to be called on the boot cpu. We havent
  225. * spun any cpus up but lets be paranoid.
  226. */
  227. BUG_ON(boot_cpuid != smp_processor_id());
  228. /* Fixup boot cpu */
  229. smp_store_cpu_info(boot_cpuid);
  230. cpu_callin_map[boot_cpuid] = 1;
  231. if (smp_ops)
  232. max_cpus = smp_ops->probe();
  233. else
  234. max_cpus = 1;
  235. smp_space_timers(max_cpus);
  236. for_each_possible_cpu(cpu)
  237. if (cpu != boot_cpuid)
  238. smp_create_idle(cpu);
  239. }
  240. void __devinit smp_prepare_boot_cpu(void)
  241. {
  242. BUG_ON(smp_processor_id() != boot_cpuid);
  243. cpu_set(boot_cpuid, cpu_online_map);
  244. cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
  245. cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
  246. #ifdef CONFIG_PPC64
  247. paca[boot_cpuid].__current = current;
  248. #endif
  249. current_set[boot_cpuid] = task_thread_info(current);
  250. }
  251. #ifdef CONFIG_HOTPLUG_CPU
  252. /* State of each CPU during hotplug phases */
  253. DEFINE_PER_CPU(int, cpu_state) = { 0 };
  254. int generic_cpu_disable(void)
  255. {
  256. unsigned int cpu = smp_processor_id();
  257. if (cpu == boot_cpuid)
  258. return -EBUSY;
  259. cpu_clear(cpu, cpu_online_map);
  260. #ifdef CONFIG_PPC64
  261. vdso_data->processorCount--;
  262. fixup_irqs(cpu_online_map);
  263. #endif
  264. return 0;
  265. }
  266. int generic_cpu_enable(unsigned int cpu)
  267. {
  268. /* Do the normal bootup if we haven't
  269. * already bootstrapped. */
  270. if (system_state != SYSTEM_RUNNING)
  271. return -ENOSYS;
  272. /* get the target out of it's holding state */
  273. per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  274. smp_wmb();
  275. while (!cpu_online(cpu))
  276. cpu_relax();
  277. #ifdef CONFIG_PPC64
  278. fixup_irqs(cpu_online_map);
  279. /* counter the irq disable in fixup_irqs */
  280. local_irq_enable();
  281. #endif
  282. return 0;
  283. }
  284. void generic_cpu_die(unsigned int cpu)
  285. {
  286. int i;
  287. for (i = 0; i < 100; i++) {
  288. smp_rmb();
  289. if (per_cpu(cpu_state, cpu) == CPU_DEAD)
  290. return;
  291. msleep(100);
  292. }
  293. printk(KERN_ERR "CPU%d didn't die...\n", cpu);
  294. }
  295. void generic_mach_cpu_die(void)
  296. {
  297. unsigned int cpu;
  298. local_irq_disable();
  299. cpu = smp_processor_id();
  300. printk(KERN_DEBUG "CPU%d offline\n", cpu);
  301. __get_cpu_var(cpu_state) = CPU_DEAD;
  302. smp_wmb();
  303. while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
  304. cpu_relax();
  305. cpu_set(cpu, cpu_online_map);
  306. local_irq_enable();
  307. }
  308. #endif
  309. static int __devinit cpu_enable(unsigned int cpu)
  310. {
  311. if (smp_ops && smp_ops->cpu_enable)
  312. return smp_ops->cpu_enable(cpu);
  313. return -ENOSYS;
  314. }
  315. int __cpuinit __cpu_up(unsigned int cpu)
  316. {
  317. int c;
  318. secondary_ti = current_set[cpu];
  319. if (!cpu_enable(cpu))
  320. return 0;
  321. if (smp_ops == NULL ||
  322. (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
  323. return -EINVAL;
  324. /* Make sure callin-map entry is 0 (can be leftover a CPU
  325. * hotplug
  326. */
  327. cpu_callin_map[cpu] = 0;
  328. /* The information for processor bringup must
  329. * be written out to main store before we release
  330. * the processor.
  331. */
  332. smp_mb();
  333. /* wake up cpus */
  334. DBG("smp: kicking cpu %d\n", cpu);
  335. smp_ops->kick_cpu(cpu);
  336. /*
  337. * wait to see if the cpu made a callin (is actually up).
  338. * use this value that I found through experimentation.
  339. * -- Cort
  340. */
  341. if (system_state < SYSTEM_RUNNING)
  342. for (c = 50000; c && !cpu_callin_map[cpu]; c--)
  343. udelay(100);
  344. #ifdef CONFIG_HOTPLUG_CPU
  345. else
  346. /*
  347. * CPUs can take much longer to come up in the
  348. * hotplug case. Wait five seconds.
  349. */
  350. for (c = 5000; c && !cpu_callin_map[cpu]; c--)
  351. msleep(1);
  352. #endif
  353. if (!cpu_callin_map[cpu]) {
  354. printk("Processor %u is stuck.\n", cpu);
  355. return -ENOENT;
  356. }
  357. printk("Processor %u found.\n", cpu);
  358. if (smp_ops->give_timebase)
  359. smp_ops->give_timebase();
  360. /* Wait until cpu puts itself in the online map */
  361. while (!cpu_online(cpu))
  362. cpu_relax();
  363. return 0;
  364. }
  365. /* Return the value of the reg property corresponding to the given
  366. * logical cpu.
  367. */
  368. int cpu_to_core_id(int cpu)
  369. {
  370. struct device_node *np;
  371. const int *reg;
  372. int id = -1;
  373. np = of_get_cpu_node(cpu, NULL);
  374. if (!np)
  375. goto out;
  376. reg = of_get_property(np, "reg", NULL);
  377. if (!reg)
  378. goto out;
  379. id = *reg;
  380. out:
  381. of_node_put(np);
  382. return id;
  383. }
  384. /* Must be called when no change can occur to cpu_present_map,
  385. * i.e. during cpu online or offline.
  386. */
  387. static struct device_node *cpu_to_l2cache(int cpu)
  388. {
  389. struct device_node *np;
  390. struct device_node *cache;
  391. if (!cpu_present(cpu))
  392. return NULL;
  393. np = of_get_cpu_node(cpu, NULL);
  394. if (np == NULL)
  395. return NULL;
  396. cache = of_find_next_cache_node(np);
  397. of_node_put(np);
  398. return cache;
  399. }
  400. /* Activate a secondary processor. */
  401. int __devinit start_secondary(void *unused)
  402. {
  403. unsigned int cpu = smp_processor_id();
  404. struct device_node *l2_cache;
  405. int i, base;
  406. atomic_inc(&init_mm.mm_count);
  407. current->active_mm = &init_mm;
  408. smp_store_cpu_info(cpu);
  409. set_dec(tb_ticks_per_jiffy);
  410. preempt_disable();
  411. cpu_callin_map[cpu] = 1;
  412. smp_ops->setup_cpu(cpu);
  413. if (smp_ops->take_timebase)
  414. smp_ops->take_timebase();
  415. if (system_state > SYSTEM_BOOTING)
  416. snapshot_timebase();
  417. secondary_cpu_time_init();
  418. ipi_call_lock();
  419. notify_cpu_starting(cpu);
  420. cpu_set(cpu, cpu_online_map);
  421. /* Update sibling maps */
  422. base = cpu_first_thread_in_core(cpu);
  423. for (i = 0; i < threads_per_core; i++) {
  424. if (cpu_is_offline(base + i))
  425. continue;
  426. cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
  427. cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
  428. /* cpu_core_map should be a superset of
  429. * cpu_sibling_map even if we don't have cache
  430. * information, so update the former here, too.
  431. */
  432. cpu_set(cpu, per_cpu(cpu_core_map, base +i));
  433. cpu_set(base + i, per_cpu(cpu_core_map, cpu));
  434. }
  435. l2_cache = cpu_to_l2cache(cpu);
  436. for_each_online_cpu(i) {
  437. struct device_node *np = cpu_to_l2cache(i);
  438. if (!np)
  439. continue;
  440. if (np == l2_cache) {
  441. cpu_set(cpu, per_cpu(cpu_core_map, i));
  442. cpu_set(i, per_cpu(cpu_core_map, cpu));
  443. }
  444. of_node_put(np);
  445. }
  446. of_node_put(l2_cache);
  447. ipi_call_unlock();
  448. local_irq_enable();
  449. cpu_idle();
  450. return 0;
  451. }
  452. int setup_profiling_timer(unsigned int multiplier)
  453. {
  454. return 0;
  455. }
  456. void __init smp_cpus_done(unsigned int max_cpus)
  457. {
  458. cpumask_t old_mask;
  459. /* We want the setup_cpu() here to be called from CPU 0, but our
  460. * init thread may have been "borrowed" by another CPU in the meantime
  461. * se we pin us down to CPU 0 for a short while
  462. */
  463. old_mask = current->cpus_allowed;
  464. set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
  465. if (smp_ops)
  466. smp_ops->setup_cpu(boot_cpuid);
  467. set_cpus_allowed(current, old_mask);
  468. snapshot_timebases();
  469. dump_numa_cpu_topology();
  470. }
  471. #ifdef CONFIG_HOTPLUG_CPU
  472. int __cpu_disable(void)
  473. {
  474. struct device_node *l2_cache;
  475. int cpu = smp_processor_id();
  476. int base, i;
  477. int err;
  478. if (!smp_ops->cpu_disable)
  479. return -ENOSYS;
  480. err = smp_ops->cpu_disable();
  481. if (err)
  482. return err;
  483. /* Update sibling maps */
  484. base = cpu_first_thread_in_core(cpu);
  485. for (i = 0; i < threads_per_core; i++) {
  486. cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
  487. cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
  488. cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
  489. cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
  490. }
  491. l2_cache = cpu_to_l2cache(cpu);
  492. for_each_present_cpu(i) {
  493. struct device_node *np = cpu_to_l2cache(i);
  494. if (!np)
  495. continue;
  496. if (np == l2_cache) {
  497. cpu_clear(cpu, per_cpu(cpu_core_map, i));
  498. cpu_clear(i, per_cpu(cpu_core_map, cpu));
  499. }
  500. of_node_put(np);
  501. }
  502. of_node_put(l2_cache);
  503. return 0;
  504. }
  505. void __cpu_die(unsigned int cpu)
  506. {
  507. if (smp_ops->cpu_die)
  508. smp_ops->cpu_die(cpu);
  509. }
  510. #endif