smp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /*
  2. * SMP support for ppc.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
  5. * deal of code from the sparc and intel versions.
  6. *
  7. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  8. *
  9. * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  10. * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #undef DEBUG
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <linux/init.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/cache.h>
  27. #include <linux/err.h>
  28. #include <linux/sysdev.h>
  29. #include <linux/cpu.h>
  30. #include <linux/notifier.h>
  31. #include <linux/topology.h>
  32. #include <asm/ptrace.h>
  33. #include <asm/atomic.h>
  34. #include <asm/irq.h>
  35. #include <asm/page.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/prom.h>
  38. #include <asm/smp.h>
  39. #include <asm/time.h>
  40. #include <asm/machdep.h>
  41. #include <asm/cputable.h>
  42. #include <asm/system.h>
  43. #include <asm/mpic.h>
  44. #include <asm/vdso_datapage.h>
  45. #ifdef CONFIG_PPC64
  46. #include <asm/paca.h>
  47. #endif
  48. #ifdef DEBUG
  49. #include <asm/udbg.h>
  50. #define DBG(fmt...) udbg_printf(fmt)
  51. #else
  52. #define DBG(fmt...)
  53. #endif
  54. int smp_hw_index[NR_CPUS];
  55. struct thread_info *secondary_ti;
  56. cpumask_t cpu_possible_map = CPU_MASK_NONE;
  57. cpumask_t cpu_online_map = CPU_MASK_NONE;
  58. cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  59. EXPORT_SYMBOL(cpu_online_map);
  60. EXPORT_SYMBOL(cpu_possible_map);
  61. EXPORT_SYMBOL(cpu_sibling_map);
  62. /* SMP operations for this machine */
  63. struct smp_ops_t *smp_ops;
  64. static volatile unsigned int cpu_callin_map[NR_CPUS];
  65. void smp_call_function_interrupt(void);
  66. int smt_enabled_at_boot = 1;
  67. static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
  68. #ifdef CONFIG_PPC64
  69. void __devinit smp_generic_kick_cpu(int nr)
  70. {
  71. BUG_ON(nr < 0 || nr >= NR_CPUS);
  72. /*
  73. * The processor is currently spinning, waiting for the
  74. * cpu_start field to become non-zero After we set cpu_start,
  75. * the processor will continue on to secondary_start
  76. */
  77. paca[nr].cpu_start = 1;
  78. smp_mb();
  79. }
  80. #endif
  81. void smp_message_recv(int msg)
  82. {
  83. switch(msg) {
  84. case PPC_MSG_CALL_FUNCTION:
  85. smp_call_function_interrupt();
  86. break;
  87. case PPC_MSG_RESCHEDULE:
  88. /* XXX Do we have to do this? */
  89. set_need_resched();
  90. break;
  91. case PPC_MSG_DEBUGGER_BREAK:
  92. if (crash_ipi_function_ptr) {
  93. crash_ipi_function_ptr(get_irq_regs());
  94. break;
  95. }
  96. #ifdef CONFIG_DEBUGGER
  97. debugger_ipi(get_irq_regs());
  98. break;
  99. #endif /* CONFIG_DEBUGGER */
  100. /* FALLTHROUGH */
  101. default:
  102. printk("SMP %d: smp_message_recv(): unknown msg %d\n",
  103. smp_processor_id(), msg);
  104. break;
  105. }
  106. }
  107. void smp_send_reschedule(int cpu)
  108. {
  109. if (likely(smp_ops))
  110. smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
  111. }
  112. #ifdef CONFIG_DEBUGGER
  113. void smp_send_debugger_break(int cpu)
  114. {
  115. if (likely(smp_ops))
  116. smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
  117. }
  118. #endif
  119. #ifdef CONFIG_KEXEC
  120. void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
  121. {
  122. crash_ipi_function_ptr = crash_ipi_callback;
  123. if (crash_ipi_callback && smp_ops) {
  124. mb();
  125. smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
  126. }
  127. }
  128. #endif
  129. static void stop_this_cpu(void *dummy)
  130. {
  131. local_irq_disable();
  132. while (1)
  133. ;
  134. }
  135. void smp_send_stop(void)
  136. {
  137. smp_call_function(stop_this_cpu, NULL, 1, 0);
  138. }
  139. /*
  140. * Structure and data for smp_call_function(). This is designed to minimise
  141. * static memory requirements. It also looks cleaner.
  142. * Stolen from the i386 version.
  143. */
  144. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
  145. static struct call_data_struct {
  146. void (*func) (void *info);
  147. void *info;
  148. atomic_t started;
  149. atomic_t finished;
  150. int wait;
  151. } *call_data;
  152. /* delay of at least 8 seconds */
  153. #define SMP_CALL_TIMEOUT 8
  154. /*
  155. * These functions send a 'generic call function' IPI to other online
  156. * CPUS in the system.
  157. *
  158. * [SUMMARY] Run a function on other CPUs.
  159. * <func> The function to run. This must be fast and non-blocking.
  160. * <info> An arbitrary pointer to pass to the function.
  161. * <nonatomic> currently unused.
  162. * <wait> If true, wait (atomically) until function has completed on other CPUs.
  163. * [RETURNS] 0 on success, else a negative status code. Does not return until
  164. * remote CPUs are nearly ready to execute <<func>> or are or have executed.
  165. *
  166. * You must not call this function with disabled interrupts or from a
  167. * hardware interrupt handler or from a bottom half handler.
  168. */
  169. int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
  170. int wait, cpumask_t map)
  171. {
  172. struct call_data_struct data;
  173. int ret = -1, num_cpus;
  174. int cpu;
  175. u64 timeout;
  176. /* Can deadlock when called with interrupts disabled */
  177. WARN_ON(irqs_disabled());
  178. /* remove 'self' from the map */
  179. if (cpu_isset(smp_processor_id(), map))
  180. cpu_clear(smp_processor_id(), map);
  181. /* sanity check the map, remove any non-online processors. */
  182. cpus_and(map, map, cpu_online_map);
  183. if (unlikely(smp_ops == NULL))
  184. return ret;
  185. data.func = func;
  186. data.info = info;
  187. atomic_set(&data.started, 0);
  188. data.wait = wait;
  189. if (wait)
  190. atomic_set(&data.finished, 0);
  191. spin_lock(&call_lock);
  192. /* Must grab online cpu count with preempt disabled, otherwise
  193. * it can change. */
  194. num_cpus = num_online_cpus() - 1;
  195. if (!num_cpus || cpus_empty(map)) {
  196. ret = 0;
  197. goto out;
  198. }
  199. call_data = &data;
  200. smp_wmb();
  201. /* Send a message to all CPUs in the map */
  202. for_each_cpu_mask(cpu, map)
  203. smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
  204. timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
  205. /* Wait for indication that they have received the message */
  206. while (atomic_read(&data.started) != num_cpus) {
  207. HMT_low();
  208. if (get_tb() >= timeout) {
  209. printk("smp_call_function on cpu %d: other cpus not "
  210. "responding (%d)\n", smp_processor_id(),
  211. atomic_read(&data.started));
  212. debugger(NULL);
  213. goto out;
  214. }
  215. }
  216. /* optionally wait for the CPUs to complete */
  217. if (wait) {
  218. while (atomic_read(&data.finished) != num_cpus) {
  219. HMT_low();
  220. if (get_tb() >= timeout) {
  221. printk("smp_call_function on cpu %d: other "
  222. "cpus not finishing (%d/%d)\n",
  223. smp_processor_id(),
  224. atomic_read(&data.finished),
  225. atomic_read(&data.started));
  226. debugger(NULL);
  227. goto out;
  228. }
  229. }
  230. }
  231. ret = 0;
  232. out:
  233. call_data = NULL;
  234. HMT_medium();
  235. spin_unlock(&call_lock);
  236. return ret;
  237. }
  238. int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
  239. int wait)
  240. {
  241. return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
  242. }
  243. EXPORT_SYMBOL(smp_call_function);
  244. int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
  245. int wait)
  246. {
  247. cpumask_t map=CPU_MASK_NONE;
  248. if (!cpu_online(cpu))
  249. return -EINVAL;
  250. if (cpu == smp_processor_id())
  251. return -EBUSY;
  252. cpu_set(cpu, map);
  253. return smp_call_function_map(func,info,nonatomic,wait,map);
  254. }
  255. EXPORT_SYMBOL(smp_call_function_single);
  256. void smp_call_function_interrupt(void)
  257. {
  258. void (*func) (void *info);
  259. void *info;
  260. int wait;
  261. /* call_data will be NULL if the sender timed out while
  262. * waiting on us to receive the call.
  263. */
  264. if (!call_data)
  265. return;
  266. func = call_data->func;
  267. info = call_data->info;
  268. wait = call_data->wait;
  269. if (!wait)
  270. smp_mb__before_atomic_inc();
  271. /*
  272. * Notify initiating CPU that I've grabbed the data and am
  273. * about to execute the function
  274. */
  275. atomic_inc(&call_data->started);
  276. /*
  277. * At this point the info structure may be out of scope unless wait==1
  278. */
  279. (*func)(info);
  280. if (wait) {
  281. smp_mb__before_atomic_inc();
  282. atomic_inc(&call_data->finished);
  283. }
  284. }
  285. extern struct gettimeofday_struct do_gtod;
  286. struct thread_info *current_set[NR_CPUS];
  287. DECLARE_PER_CPU(unsigned int, pvr);
  288. static void __devinit smp_store_cpu_info(int id)
  289. {
  290. per_cpu(pvr, id) = mfspr(SPRN_PVR);
  291. }
  292. static void __init smp_create_idle(unsigned int cpu)
  293. {
  294. struct task_struct *p;
  295. /* create a process for the processor */
  296. p = fork_idle(cpu);
  297. if (IS_ERR(p))
  298. panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
  299. #ifdef CONFIG_PPC64
  300. paca[cpu].__current = p;
  301. #endif
  302. current_set[cpu] = task_thread_info(p);
  303. task_thread_info(p)->cpu = cpu;
  304. }
  305. void __init smp_prepare_cpus(unsigned int max_cpus)
  306. {
  307. unsigned int cpu;
  308. DBG("smp_prepare_cpus\n");
  309. /*
  310. * setup_cpu may need to be called on the boot cpu. We havent
  311. * spun any cpus up but lets be paranoid.
  312. */
  313. BUG_ON(boot_cpuid != smp_processor_id());
  314. /* Fixup boot cpu */
  315. smp_store_cpu_info(boot_cpuid);
  316. cpu_callin_map[boot_cpuid] = 1;
  317. if (smp_ops)
  318. max_cpus = smp_ops->probe();
  319. else
  320. max_cpus = 1;
  321. smp_space_timers(max_cpus);
  322. for_each_possible_cpu(cpu)
  323. if (cpu != boot_cpuid)
  324. smp_create_idle(cpu);
  325. }
  326. void __devinit smp_prepare_boot_cpu(void)
  327. {
  328. BUG_ON(smp_processor_id() != boot_cpuid);
  329. cpu_set(boot_cpuid, cpu_online_map);
  330. #ifdef CONFIG_PPC64
  331. paca[boot_cpuid].__current = current;
  332. #endif
  333. current_set[boot_cpuid] = task_thread_info(current);
  334. }
  335. #ifdef CONFIG_HOTPLUG_CPU
  336. /* State of each CPU during hotplug phases */
  337. DEFINE_PER_CPU(int, cpu_state) = { 0 };
  338. int generic_cpu_disable(void)
  339. {
  340. unsigned int cpu = smp_processor_id();
  341. if (cpu == boot_cpuid)
  342. return -EBUSY;
  343. cpu_clear(cpu, cpu_online_map);
  344. #ifdef CONFIG_PPC64
  345. vdso_data->processorCount--;
  346. fixup_irqs(cpu_online_map);
  347. #endif
  348. return 0;
  349. }
  350. int generic_cpu_enable(unsigned int cpu)
  351. {
  352. /* Do the normal bootup if we haven't
  353. * already bootstrapped. */
  354. if (system_state != SYSTEM_RUNNING)
  355. return -ENOSYS;
  356. /* get the target out of it's holding state */
  357. per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
  358. smp_wmb();
  359. while (!cpu_online(cpu))
  360. cpu_relax();
  361. #ifdef CONFIG_PPC64
  362. fixup_irqs(cpu_online_map);
  363. /* counter the irq disable in fixup_irqs */
  364. local_irq_enable();
  365. #endif
  366. return 0;
  367. }
  368. void generic_cpu_die(unsigned int cpu)
  369. {
  370. int i;
  371. for (i = 0; i < 100; i++) {
  372. smp_rmb();
  373. if (per_cpu(cpu_state, cpu) == CPU_DEAD)
  374. return;
  375. msleep(100);
  376. }
  377. printk(KERN_ERR "CPU%d didn't die...\n", cpu);
  378. }
  379. void generic_mach_cpu_die(void)
  380. {
  381. unsigned int cpu;
  382. local_irq_disable();
  383. cpu = smp_processor_id();
  384. printk(KERN_DEBUG "CPU%d offline\n", cpu);
  385. __get_cpu_var(cpu_state) = CPU_DEAD;
  386. smp_wmb();
  387. while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
  388. cpu_relax();
  389. cpu_set(cpu, cpu_online_map);
  390. local_irq_enable();
  391. }
  392. #endif
  393. static int __devinit cpu_enable(unsigned int cpu)
  394. {
  395. if (smp_ops && smp_ops->cpu_enable)
  396. return smp_ops->cpu_enable(cpu);
  397. return -ENOSYS;
  398. }
  399. int __cpuinit __cpu_up(unsigned int cpu)
  400. {
  401. int c;
  402. secondary_ti = current_set[cpu];
  403. if (!cpu_enable(cpu))
  404. return 0;
  405. if (smp_ops == NULL ||
  406. (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
  407. return -EINVAL;
  408. /* Make sure callin-map entry is 0 (can be leftover a CPU
  409. * hotplug
  410. */
  411. cpu_callin_map[cpu] = 0;
  412. /* The information for processor bringup must
  413. * be written out to main store before we release
  414. * the processor.
  415. */
  416. smp_mb();
  417. /* wake up cpus */
  418. DBG("smp: kicking cpu %d\n", cpu);
  419. smp_ops->kick_cpu(cpu);
  420. /*
  421. * wait to see if the cpu made a callin (is actually up).
  422. * use this value that I found through experimentation.
  423. * -- Cort
  424. */
  425. if (system_state < SYSTEM_RUNNING)
  426. for (c = 50000; c && !cpu_callin_map[cpu]; c--)
  427. udelay(100);
  428. #ifdef CONFIG_HOTPLUG_CPU
  429. else
  430. /*
  431. * CPUs can take much longer to come up in the
  432. * hotplug case. Wait five seconds.
  433. */
  434. for (c = 25; c && !cpu_callin_map[cpu]; c--) {
  435. msleep(200);
  436. }
  437. #endif
  438. if (!cpu_callin_map[cpu]) {
  439. printk("Processor %u is stuck.\n", cpu);
  440. return -ENOENT;
  441. }
  442. printk("Processor %u found.\n", cpu);
  443. if (smp_ops->give_timebase)
  444. smp_ops->give_timebase();
  445. /* Wait until cpu puts itself in the online map */
  446. while (!cpu_online(cpu))
  447. cpu_relax();
  448. return 0;
  449. }
  450. /* Activate a secondary processor. */
  451. int __devinit start_secondary(void *unused)
  452. {
  453. unsigned int cpu = smp_processor_id();
  454. atomic_inc(&init_mm.mm_count);
  455. current->active_mm = &init_mm;
  456. smp_store_cpu_info(cpu);
  457. set_dec(tb_ticks_per_jiffy);
  458. preempt_disable();
  459. cpu_callin_map[cpu] = 1;
  460. smp_ops->setup_cpu(cpu);
  461. if (smp_ops->take_timebase)
  462. smp_ops->take_timebase();
  463. if (system_state > SYSTEM_BOOTING)
  464. snapshot_timebase();
  465. spin_lock(&call_lock);
  466. cpu_set(cpu, cpu_online_map);
  467. spin_unlock(&call_lock);
  468. local_irq_enable();
  469. cpu_idle();
  470. return 0;
  471. }
  472. int setup_profiling_timer(unsigned int multiplier)
  473. {
  474. return 0;
  475. }
  476. void __init smp_cpus_done(unsigned int max_cpus)
  477. {
  478. cpumask_t old_mask;
  479. /* We want the setup_cpu() here to be called from CPU 0, but our
  480. * init thread may have been "borrowed" by another CPU in the meantime
  481. * se we pin us down to CPU 0 for a short while
  482. */
  483. old_mask = current->cpus_allowed;
  484. set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
  485. if (smp_ops)
  486. smp_ops->setup_cpu(boot_cpuid);
  487. set_cpus_allowed(current, old_mask);
  488. snapshot_timebases();
  489. dump_numa_cpu_topology();
  490. }
  491. #ifdef CONFIG_HOTPLUG_CPU
  492. int __cpu_disable(void)
  493. {
  494. if (smp_ops->cpu_disable)
  495. return smp_ops->cpu_disable();
  496. return -ENOSYS;
  497. }
  498. void __cpu_die(unsigned int cpu)
  499. {
  500. if (smp_ops->cpu_die)
  501. smp_ops->cpu_die(cpu);
  502. }
  503. #endif