smp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * SMP support for ppc.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
  5. * deal of code from the sparc and intel versions.
  6. *
  7. * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  8. *
  9. * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  10. * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #undef DEBUG
  18. #include <linux/kernel.h>
  19. #include <linux/export.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <linux/init.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/cache.h>
  27. #include <linux/err.h>
  28. #include <linux/device.h>
  29. #include <linux/cpu.h>
  30. #include <linux/notifier.h>
  31. #include <linux/topology.h>
  32. #include <asm/ptrace.h>
  33. #include <linux/atomic.h>
  34. #include <asm/irq.h>
  35. #include <asm/page.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/prom.h>
  38. #include <asm/smp.h>
  39. #include <asm/time.h>
  40. #include <asm/machdep.h>
  41. #include <asm/cputhreads.h>
  42. #include <asm/cputable.h>
  43. #include <asm/mpic.h>
  44. #include <asm/vdso_datapage.h>
  45. #ifdef CONFIG_PPC64
  46. #include <asm/paca.h>
  47. #endif
  48. #include <asm/debug.h>
  49. #ifdef DEBUG
  50. #include <asm/udbg.h>
  51. #define DBG(fmt...) udbg_printf(fmt)
  52. #else
  53. #define DBG(fmt...)
  54. #endif
  55. /* Store all idle threads, this can be reused instead of creating
  56. * a new thread. Also avoids complicated thread destroy functionality
  57. * for idle threads.
  58. */
  59. #ifdef CONFIG_HOTPLUG_CPU
  60. /*
  61. * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
  62. * removed after init for !CONFIG_HOTPLUG_CPU.
  63. */
  64. static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
  65. #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
  66. #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
  67. /* State of each CPU during hotplug phases */
  68. static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  69. #else
  70. static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
  71. #define get_idle_for_cpu(x) (idle_thread_array[(x)])
  72. #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
  73. #endif
  74. struct thread_info *secondary_ti;
  75. DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  76. DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  77. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  78. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  79. /* SMP operations for this machine */
  80. struct smp_ops_t *smp_ops;
  81. /* Can't be static due to PowerMac hackery */
  82. volatile unsigned int cpu_callin_map[NR_CPUS];
  83. int smt_enabled_at_boot = 1;
  84. static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
  85. #ifdef CONFIG_PPC64
  86. int __devinit smp_generic_kick_cpu(int nr)
  87. {
  88. BUG_ON(nr < 0 || nr >= NR_CPUS);
  89. /*
  90. * The processor is currently spinning, waiting for the
  91. * cpu_start field to become non-zero After we set cpu_start,
  92. * the processor will continue on to secondary_start
  93. */
  94. if (!paca[nr].cpu_start) {
  95. paca[nr].cpu_start = 1;
  96. smp_mb();
  97. return 0;
  98. }
  99. #ifdef CONFIG_HOTPLUG_CPU
  100. /*
  101. * Ok it's not there, so it might be soft-unplugged, let's
  102. * try to bring it back
  103. */
  104. per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
  105. smp_wmb();
  106. smp_send_reschedule(nr);
  107. #endif /* CONFIG_HOTPLUG_CPU */
  108. return 0;
  109. }
  110. #endif /* CONFIG_PPC64 */
  111. static irqreturn_t call_function_action(int irq, void *data)
  112. {
  113. generic_smp_call_function_interrupt();
  114. return IRQ_HANDLED;
  115. }
  116. static irqreturn_t reschedule_action(int irq, void *data)
  117. {
  118. scheduler_ipi();
  119. return IRQ_HANDLED;
  120. }
  121. static irqreturn_t call_function_single_action(int irq, void *data)
  122. {
  123. generic_smp_call_function_single_interrupt();
  124. return IRQ_HANDLED;
  125. }
  126. static irqreturn_t debug_ipi_action(int irq, void *data)
  127. {
  128. if (crash_ipi_function_ptr) {
  129. crash_ipi_function_ptr(get_irq_regs());
  130. return IRQ_HANDLED;
  131. }
  132. #ifdef CONFIG_DEBUGGER
  133. debugger_ipi(get_irq_regs());
  134. #endif /* CONFIG_DEBUGGER */
  135. return IRQ_HANDLED;
  136. }
  137. static irq_handler_t smp_ipi_action[] = {
  138. [PPC_MSG_CALL_FUNCTION] = call_function_action,
  139. [PPC_MSG_RESCHEDULE] = reschedule_action,
  140. [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
  141. [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
  142. };
  143. const char *smp_ipi_name[] = {
  144. [PPC_MSG_CALL_FUNCTION] = "ipi call function",
  145. [PPC_MSG_RESCHEDULE] = "ipi reschedule",
  146. [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
  147. [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
  148. };
  149. /* optional function to request ipi, for controllers with >= 4 ipis */
  150. int smp_request_message_ipi(int virq, int msg)
  151. {
  152. int err;
  153. if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
  154. return -EINVAL;
  155. }
  156. #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
  157. if (msg == PPC_MSG_DEBUGGER_BREAK) {
  158. return 1;
  159. }
  160. #endif
  161. err = request_irq(virq, smp_ipi_action[msg],
  162. IRQF_PERCPU | IRQF_NO_THREAD,
  163. smp_ipi_name[msg], 0);
  164. WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
  165. virq, smp_ipi_name[msg], err);
  166. return err;
  167. }
  168. #ifdef CONFIG_PPC_SMP_MUXED_IPI
  169. struct cpu_messages {
  170. int messages; /* current messages */
  171. unsigned long data; /* data for cause ipi */
  172. };
  173. static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
  174. void smp_muxed_ipi_set_data(int cpu, unsigned long data)
  175. {
  176. struct cpu_messages *info = &per_cpu(ipi_message, cpu);
  177. info->data = data;
  178. }
  179. void smp_muxed_ipi_message_pass(int cpu, int msg)
  180. {
  181. struct cpu_messages *info = &per_cpu(ipi_message, cpu);
  182. char *message = (char *)&info->messages;
  183. message[msg] = 1;
  184. mb();
  185. smp_ops->cause_ipi(cpu, info->data);
  186. }
  187. irqreturn_t smp_ipi_demux(void)
  188. {
  189. struct cpu_messages *info = &__get_cpu_var(ipi_message);
  190. unsigned int all;
  191. mb(); /* order any irq clear */
  192. do {
  193. all = xchg_local(&info->messages, 0);
  194. #ifdef __BIG_ENDIAN
  195. if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
  196. generic_smp_call_function_interrupt();
  197. if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
  198. scheduler_ipi();
  199. if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
  200. generic_smp_call_function_single_interrupt();
  201. if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
  202. debug_ipi_action(0, NULL);
  203. #else
  204. #error Unsupported ENDIAN
  205. #endif
  206. } while (info->messages);
  207. return IRQ_HANDLED;
  208. }
  209. #endif /* CONFIG_PPC_SMP_MUXED_IPI */
  210. static inline void do_message_pass(int cpu, int msg)
  211. {
  212. if (smp_ops->message_pass)
  213. smp_ops->message_pass(cpu, msg);
  214. #ifdef CONFIG_PPC_SMP_MUXED_IPI
  215. else
  216. smp_muxed_ipi_message_pass(cpu, msg);
  217. #endif
  218. }
  219. void smp_send_reschedule(int cpu)
  220. {
  221. if (likely(smp_ops))
  222. do_message_pass(cpu, PPC_MSG_RESCHEDULE);
  223. }
  224. EXPORT_SYMBOL_GPL(smp_send_reschedule);
  225. void arch_send_call_function_single_ipi(int cpu)
  226. {
  227. do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
  228. }
  229. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  230. {
  231. unsigned int cpu;
  232. for_each_cpu(cpu, mask)
  233. do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
  234. }
  235. #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  236. void smp_send_debugger_break(void)
  237. {
  238. int cpu;
  239. int me = raw_smp_processor_id();
  240. if (unlikely(!smp_ops))
  241. return;
  242. for_each_online_cpu(cpu)
  243. if (cpu != me)
  244. do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
  245. }
  246. #endif
  247. #ifdef CONFIG_KEXEC
  248. void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
  249. {
  250. crash_ipi_function_ptr = crash_ipi_callback;
  251. if (crash_ipi_callback) {
  252. mb();
  253. smp_send_debugger_break();
  254. }
  255. }
  256. #endif
  257. static void stop_this_cpu(void *dummy)
  258. {
  259. /* Remove this CPU */
  260. set_cpu_online(smp_processor_id(), false);
  261. local_irq_disable();
  262. while (1)
  263. ;
  264. }
  265. void smp_send_stop(void)
  266. {
  267. smp_call_function(stop_this_cpu, NULL, 0);
  268. }
  269. struct thread_info *current_set[NR_CPUS];
  270. static void __devinit smp_store_cpu_info(int id)
  271. {
  272. per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
  273. #ifdef CONFIG_PPC_FSL_BOOK3E
  274. per_cpu(next_tlbcam_idx, id)
  275. = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
  276. #endif
  277. }
  278. void __init smp_prepare_cpus(unsigned int max_cpus)
  279. {
  280. unsigned int cpu;
  281. DBG("smp_prepare_cpus\n");
  282. /*
  283. * setup_cpu may need to be called on the boot cpu. We havent
  284. * spun any cpus up but lets be paranoid.
  285. */
  286. BUG_ON(boot_cpuid != smp_processor_id());
  287. /* Fixup boot cpu */
  288. smp_store_cpu_info(boot_cpuid);
  289. cpu_callin_map[boot_cpuid] = 1;
  290. for_each_possible_cpu(cpu) {
  291. zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
  292. GFP_KERNEL, cpu_to_node(cpu));
  293. zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
  294. GFP_KERNEL, cpu_to_node(cpu));
  295. }
  296. cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
  297. cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
  298. if (smp_ops)
  299. if (smp_ops->probe)
  300. max_cpus = smp_ops->probe();
  301. else
  302. max_cpus = NR_CPUS;
  303. else
  304. max_cpus = 1;
  305. }
  306. void __devinit smp_prepare_boot_cpu(void)
  307. {
  308. BUG_ON(smp_processor_id() != boot_cpuid);
  309. #ifdef CONFIG_PPC64
  310. paca[boot_cpuid].__current = current;
  311. #endif
  312. current_set[boot_cpuid] = task_thread_info(current);
  313. }
  314. #ifdef CONFIG_HOTPLUG_CPU
  315. int generic_cpu_disable(void)
  316. {
  317. unsigned int cpu = smp_processor_id();
  318. if (cpu == boot_cpuid)
  319. return -EBUSY;
  320. set_cpu_online(cpu, false);
  321. #ifdef CONFIG_PPC64
  322. vdso_data->processorCount--;
  323. #endif
  324. migrate_irqs();
  325. return 0;
  326. }
  327. void generic_cpu_die(unsigned int cpu)
  328. {
  329. int i;
  330. for (i = 0; i < 100; i++) {
  331. smp_rmb();
  332. if (per_cpu(cpu_state, cpu) == CPU_DEAD)
  333. return;
  334. msleep(100);
  335. }
  336. printk(KERN_ERR "CPU%d didn't die...\n", cpu);
  337. }
  338. void generic_mach_cpu_die(void)
  339. {
  340. unsigned int cpu;
  341. local_irq_disable();
  342. idle_task_exit();
  343. cpu = smp_processor_id();
  344. printk(KERN_DEBUG "CPU%d offline\n", cpu);
  345. __get_cpu_var(cpu_state) = CPU_DEAD;
  346. smp_wmb();
  347. while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
  348. cpu_relax();
  349. }
  350. void generic_set_cpu_dead(unsigned int cpu)
  351. {
  352. per_cpu(cpu_state, cpu) = CPU_DEAD;
  353. }
  354. int generic_check_cpu_restart(unsigned int cpu)
  355. {
  356. return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
  357. }
  358. #endif
  359. struct create_idle {
  360. struct work_struct work;
  361. struct task_struct *idle;
  362. struct completion done;
  363. int cpu;
  364. };
  365. static void __cpuinit do_fork_idle(struct work_struct *work)
  366. {
  367. struct create_idle *c_idle =
  368. container_of(work, struct create_idle, work);
  369. c_idle->idle = fork_idle(c_idle->cpu);
  370. complete(&c_idle->done);
  371. }
  372. static int __cpuinit create_idle(unsigned int cpu)
  373. {
  374. struct thread_info *ti;
  375. struct create_idle c_idle = {
  376. .cpu = cpu,
  377. .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
  378. };
  379. INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
  380. c_idle.idle = get_idle_for_cpu(cpu);
  381. /* We can't use kernel_thread since we must avoid to
  382. * reschedule the child. We use a workqueue because
  383. * we want to fork from a kernel thread, not whatever
  384. * userspace process happens to be trying to online us.
  385. */
  386. if (!c_idle.idle) {
  387. schedule_work(&c_idle.work);
  388. wait_for_completion(&c_idle.done);
  389. } else
  390. init_idle(c_idle.idle, cpu);
  391. if (IS_ERR(c_idle.idle)) {
  392. pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
  393. return PTR_ERR(c_idle.idle);
  394. }
  395. ti = task_thread_info(c_idle.idle);
  396. #ifdef CONFIG_PPC64
  397. paca[cpu].__current = c_idle.idle;
  398. paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
  399. #endif
  400. ti->cpu = cpu;
  401. current_set[cpu] = ti;
  402. return 0;
  403. }
  404. int __cpuinit __cpu_up(unsigned int cpu)
  405. {
  406. int rc, c;
  407. if (smp_ops == NULL ||
  408. (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
  409. return -EINVAL;
  410. /* Make sure we have an idle thread */
  411. rc = create_idle(cpu);
  412. if (rc)
  413. return rc;
  414. secondary_ti = current_set[cpu];
  415. /* Make sure callin-map entry is 0 (can be leftover a CPU
  416. * hotplug
  417. */
  418. cpu_callin_map[cpu] = 0;
  419. /* The information for processor bringup must
  420. * be written out to main store before we release
  421. * the processor.
  422. */
  423. smp_mb();
  424. /* wake up cpus */
  425. DBG("smp: kicking cpu %d\n", cpu);
  426. rc = smp_ops->kick_cpu(cpu);
  427. if (rc) {
  428. pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
  429. return rc;
  430. }
  431. /*
  432. * wait to see if the cpu made a callin (is actually up).
  433. * use this value that I found through experimentation.
  434. * -- Cort
  435. */
  436. if (system_state < SYSTEM_RUNNING)
  437. for (c = 50000; c && !cpu_callin_map[cpu]; c--)
  438. udelay(100);
  439. #ifdef CONFIG_HOTPLUG_CPU
  440. else
  441. /*
  442. * CPUs can take much longer to come up in the
  443. * hotplug case. Wait five seconds.
  444. */
  445. for (c = 5000; c && !cpu_callin_map[cpu]; c--)
  446. msleep(1);
  447. #endif
  448. if (!cpu_callin_map[cpu]) {
  449. printk(KERN_ERR "Processor %u is stuck.\n", cpu);
  450. return -ENOENT;
  451. }
  452. DBG("Processor %u found.\n", cpu);
  453. if (smp_ops->give_timebase)
  454. smp_ops->give_timebase();
  455. /* Wait until cpu puts itself in the online map */
  456. while (!cpu_online(cpu))
  457. cpu_relax();
  458. return 0;
  459. }
  460. /* Return the value of the reg property corresponding to the given
  461. * logical cpu.
  462. */
  463. int cpu_to_core_id(int cpu)
  464. {
  465. struct device_node *np;
  466. const int *reg;
  467. int id = -1;
  468. np = of_get_cpu_node(cpu, NULL);
  469. if (!np)
  470. goto out;
  471. reg = of_get_property(np, "reg", NULL);
  472. if (!reg)
  473. goto out;
  474. id = *reg;
  475. out:
  476. of_node_put(np);
  477. return id;
  478. }
  479. /* Helper routines for cpu to core mapping */
  480. int cpu_core_index_of_thread(int cpu)
  481. {
  482. return cpu >> threads_shift;
  483. }
  484. EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
  485. int cpu_first_thread_of_core(int core)
  486. {
  487. return core << threads_shift;
  488. }
  489. EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
  490. /* Must be called when no change can occur to cpu_present_mask,
  491. * i.e. during cpu online or offline.
  492. */
  493. static struct device_node *cpu_to_l2cache(int cpu)
  494. {
  495. struct device_node *np;
  496. struct device_node *cache;
  497. if (!cpu_present(cpu))
  498. return NULL;
  499. np = of_get_cpu_node(cpu, NULL);
  500. if (np == NULL)
  501. return NULL;
  502. cache = of_find_next_cache_node(np);
  503. of_node_put(np);
  504. return cache;
  505. }
  506. /* Activate a secondary processor. */
  507. void __devinit start_secondary(void *unused)
  508. {
  509. unsigned int cpu = smp_processor_id();
  510. struct device_node *l2_cache;
  511. int i, base;
  512. atomic_inc(&init_mm.mm_count);
  513. current->active_mm = &init_mm;
  514. smp_store_cpu_info(cpu);
  515. set_dec(tb_ticks_per_jiffy);
  516. preempt_disable();
  517. cpu_callin_map[cpu] = 1;
  518. if (smp_ops->setup_cpu)
  519. smp_ops->setup_cpu(cpu);
  520. if (smp_ops->take_timebase)
  521. smp_ops->take_timebase();
  522. secondary_cpu_time_init();
  523. #ifdef CONFIG_PPC64
  524. if (system_state == SYSTEM_RUNNING)
  525. vdso_data->processorCount++;
  526. #endif
  527. ipi_call_lock();
  528. notify_cpu_starting(cpu);
  529. set_cpu_online(cpu, true);
  530. /* Update sibling maps */
  531. base = cpu_first_thread_sibling(cpu);
  532. for (i = 0; i < threads_per_core; i++) {
  533. if (cpu_is_offline(base + i))
  534. continue;
  535. cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
  536. cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
  537. /* cpu_core_map should be a superset of
  538. * cpu_sibling_map even if we don't have cache
  539. * information, so update the former here, too.
  540. */
  541. cpumask_set_cpu(cpu, cpu_core_mask(base + i));
  542. cpumask_set_cpu(base + i, cpu_core_mask(cpu));
  543. }
  544. l2_cache = cpu_to_l2cache(cpu);
  545. for_each_online_cpu(i) {
  546. struct device_node *np = cpu_to_l2cache(i);
  547. if (!np)
  548. continue;
  549. if (np == l2_cache) {
  550. cpumask_set_cpu(cpu, cpu_core_mask(i));
  551. cpumask_set_cpu(i, cpu_core_mask(cpu));
  552. }
  553. of_node_put(np);
  554. }
  555. of_node_put(l2_cache);
  556. ipi_call_unlock();
  557. local_irq_enable();
  558. cpu_idle();
  559. BUG();
  560. }
  561. int setup_profiling_timer(unsigned int multiplier)
  562. {
  563. return 0;
  564. }
  565. void __init smp_cpus_done(unsigned int max_cpus)
  566. {
  567. cpumask_var_t old_mask;
  568. /* We want the setup_cpu() here to be called from CPU 0, but our
  569. * init thread may have been "borrowed" by another CPU in the meantime
  570. * se we pin us down to CPU 0 for a short while
  571. */
  572. alloc_cpumask_var(&old_mask, GFP_NOWAIT);
  573. cpumask_copy(old_mask, tsk_cpus_allowed(current));
  574. set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
  575. if (smp_ops && smp_ops->setup_cpu)
  576. smp_ops->setup_cpu(boot_cpuid);
  577. set_cpus_allowed_ptr(current, old_mask);
  578. free_cpumask_var(old_mask);
  579. if (smp_ops && smp_ops->bringup_done)
  580. smp_ops->bringup_done();
  581. dump_numa_cpu_topology();
  582. }
  583. int arch_sd_sibling_asym_packing(void)
  584. {
  585. if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
  586. printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
  587. return SD_ASYM_PACKING;
  588. }
  589. return 0;
  590. }
  591. #ifdef CONFIG_HOTPLUG_CPU
  592. int __cpu_disable(void)
  593. {
  594. struct device_node *l2_cache;
  595. int cpu = smp_processor_id();
  596. int base, i;
  597. int err;
  598. if (!smp_ops->cpu_disable)
  599. return -ENOSYS;
  600. err = smp_ops->cpu_disable();
  601. if (err)
  602. return err;
  603. /* Update sibling maps */
  604. base = cpu_first_thread_sibling(cpu);
  605. for (i = 0; i < threads_per_core; i++) {
  606. cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
  607. cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
  608. cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
  609. cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
  610. }
  611. l2_cache = cpu_to_l2cache(cpu);
  612. for_each_present_cpu(i) {
  613. struct device_node *np = cpu_to_l2cache(i);
  614. if (!np)
  615. continue;
  616. if (np == l2_cache) {
  617. cpumask_clear_cpu(cpu, cpu_core_mask(i));
  618. cpumask_clear_cpu(i, cpu_core_mask(cpu));
  619. }
  620. of_node_put(np);
  621. }
  622. of_node_put(l2_cache);
  623. return 0;
  624. }
  625. void __cpu_die(unsigned int cpu)
  626. {
  627. if (smp_ops->cpu_die)
  628. smp_ops->cpu_die(cpu);
  629. }
  630. static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
  631. void cpu_hotplug_driver_lock()
  632. {
  633. mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
  634. }
  635. void cpu_hotplug_driver_unlock()
  636. {
  637. mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
  638. }
  639. void cpu_die(void)
  640. {
  641. if (ppc_md.cpu_die)
  642. ppc_md.cpu_die();
  643. /* If we return, we re-enter start_secondary */
  644. start_secondary_resume();
  645. }
  646. #endif