smp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /*
  2. * SMP initialisation and IPI support
  3. * Based on arch/arm/kernel/smp.c
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/delay.h>
  20. #include <linux/init.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/sched.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/cache.h>
  25. #include <linux/profile.h>
  26. #include <linux/errno.h>
  27. #include <linux/mm.h>
  28. #include <linux/err.h>
  29. #include <linux/cpu.h>
  30. #include <linux/smp.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/irq.h>
  33. #include <linux/percpu.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/completion.h>
  36. #include <linux/of.h>
  37. #include <asm/atomic.h>
  38. #include <asm/cacheflush.h>
  39. #include <asm/cputype.h>
  40. #include <asm/cpu_ops.h>
  41. #include <asm/mmu_context.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/pgalloc.h>
  44. #include <asm/processor.h>
  45. #include <asm/smp_plat.h>
  46. #include <asm/sections.h>
  47. #include <asm/tlbflush.h>
  48. #include <asm/ptrace.h>
  49. /*
  50. * as from 2.5, kernels no longer have an init_tasks structure
  51. * so we need some other way of telling a new secondary core
  52. * where to place its SVC stack
  53. */
  54. struct secondary_data secondary_data;
  55. enum ipi_msg_type {
  56. IPI_RESCHEDULE,
  57. IPI_CALL_FUNC,
  58. IPI_CALL_FUNC_SINGLE,
  59. IPI_CPU_STOP,
  60. };
  61. /*
  62. * Boot a secondary CPU, and assign it the specified idle task.
  63. * This also gives us the initial stack to use for this CPU.
  64. */
  65. static int boot_secondary(unsigned int cpu, struct task_struct *idle)
  66. {
  67. if (cpu_ops[cpu]->cpu_boot)
  68. return cpu_ops[cpu]->cpu_boot(cpu);
  69. return -EOPNOTSUPP;
  70. }
  71. static DECLARE_COMPLETION(cpu_running);
  72. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  73. {
  74. int ret;
  75. /*
  76. * We need to tell the secondary core where to find its stack and the
  77. * page tables.
  78. */
  79. secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
  80. __flush_dcache_area(&secondary_data, sizeof(secondary_data));
  81. /*
  82. * Now bring the CPU into our world.
  83. */
  84. ret = boot_secondary(cpu, idle);
  85. if (ret == 0) {
  86. /*
  87. * CPU was successfully started, wait for it to come online or
  88. * time out.
  89. */
  90. wait_for_completion_timeout(&cpu_running,
  91. msecs_to_jiffies(1000));
  92. if (!cpu_online(cpu)) {
  93. pr_crit("CPU%u: failed to come online\n", cpu);
  94. ret = -EIO;
  95. }
  96. } else {
  97. pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
  98. }
  99. secondary_data.stack = NULL;
  100. return ret;
  101. }
  102. /*
  103. * This is the secondary CPU boot entry. We're using this CPUs
  104. * idle thread stack, but a set of temporary page tables.
  105. */
  106. asmlinkage void secondary_start_kernel(void)
  107. {
  108. struct mm_struct *mm = &init_mm;
  109. unsigned int cpu = smp_processor_id();
  110. printk("CPU%u: Booted secondary processor\n", cpu);
  111. /*
  112. * All kernel threads share the same mm context; grab a
  113. * reference and switch to it.
  114. */
  115. atomic_inc(&mm->mm_count);
  116. current->active_mm = mm;
  117. cpumask_set_cpu(cpu, mm_cpumask(mm));
  118. /*
  119. * TTBR0 is only used for the identity mapping at this stage. Make it
  120. * point to zero page to avoid speculatively fetching new entries.
  121. */
  122. cpu_set_reserved_ttbr0();
  123. flush_tlb_all();
  124. preempt_disable();
  125. trace_hardirqs_off();
  126. if (cpu_ops[cpu]->cpu_postboot)
  127. cpu_ops[cpu]->cpu_postboot();
  128. /*
  129. * Enable GIC and timers.
  130. */
  131. notify_cpu_starting(cpu);
  132. /*
  133. * OK, now it's safe to let the boot CPU continue. Wait for
  134. * the CPU migration code to notice that the CPU is online
  135. * before we continue.
  136. */
  137. set_cpu_online(cpu, true);
  138. complete(&cpu_running);
  139. local_irq_enable();
  140. local_fiq_enable();
  141. local_async_enable();
  142. /*
  143. * OK, it's off to the idle thread for us
  144. */
  145. cpu_startup_entry(CPUHP_ONLINE);
  146. }
  147. #ifdef CONFIG_HOTPLUG_CPU
  148. static int op_cpu_disable(unsigned int cpu)
  149. {
  150. /*
  151. * If we don't have a cpu_die method, abort before we reach the point
  152. * of no return. CPU0 may not have an cpu_ops, so test for it.
  153. */
  154. if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
  155. return -EOPNOTSUPP;
  156. /*
  157. * We may need to abort a hot unplug for some other mechanism-specific
  158. * reason.
  159. */
  160. if (cpu_ops[cpu]->cpu_disable)
  161. return cpu_ops[cpu]->cpu_disable(cpu);
  162. return 0;
  163. }
  164. /*
  165. * __cpu_disable runs on the processor to be shutdown.
  166. */
  167. int __cpu_disable(void)
  168. {
  169. unsigned int cpu = smp_processor_id();
  170. int ret;
  171. ret = op_cpu_disable(cpu);
  172. if (ret)
  173. return ret;
  174. /*
  175. * Take this CPU offline. Once we clear this, we can't return,
  176. * and we must not schedule until we're ready to give up the cpu.
  177. */
  178. set_cpu_online(cpu, false);
  179. /*
  180. * OK - migrate IRQs away from this CPU
  181. */
  182. migrate_irqs();
  183. /*
  184. * Remove this CPU from the vm mask set of all processes.
  185. */
  186. clear_tasks_mm_cpumask(cpu);
  187. return 0;
  188. }
  189. static DECLARE_COMPLETION(cpu_died);
  190. /*
  191. * called on the thread which is asking for a CPU to be shutdown -
  192. * waits until shutdown has completed, or it is timed out.
  193. */
  194. void __cpu_die(unsigned int cpu)
  195. {
  196. if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
  197. pr_crit("CPU%u: cpu didn't die\n", cpu);
  198. return;
  199. }
  200. pr_notice("CPU%u: shutdown\n", cpu);
  201. }
  202. /*
  203. * Called from the idle thread for the CPU which has been shutdown.
  204. *
  205. * Note that we disable IRQs here, but do not re-enable them
  206. * before returning to the caller. This is also the behaviour
  207. * of the other hotplug-cpu capable cores, so presumably coming
  208. * out of idle fixes this.
  209. */
  210. void cpu_die(void)
  211. {
  212. unsigned int cpu = smp_processor_id();
  213. idle_task_exit();
  214. local_irq_disable();
  215. /* Tell __cpu_die() that this CPU is now safe to dispose of */
  216. complete(&cpu_died);
  217. /*
  218. * Actually shutdown the CPU. This must never fail. The specific hotplug
  219. * mechanism must perform all required cache maintenance to ensure that
  220. * no dirty lines are lost in the process of shutting down the CPU.
  221. */
  222. cpu_ops[cpu]->cpu_die(cpu);
  223. BUG();
  224. }
  225. #endif
  226. void __init smp_cpus_done(unsigned int max_cpus)
  227. {
  228. pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
  229. }
  230. void __init smp_prepare_boot_cpu(void)
  231. {
  232. }
  233. static void (*smp_cross_call)(const struct cpumask *, unsigned int);
  234. /*
  235. * Enumerate the possible CPU set from the device tree and build the
  236. * cpu logical map array containing MPIDR values related to logical
  237. * cpus. Assumes that cpu_logical_map(0) has already been initialized.
  238. */
  239. void __init smp_init_cpus(void)
  240. {
  241. struct device_node *dn = NULL;
  242. unsigned int i, cpu = 1;
  243. bool bootcpu_valid = false;
  244. while ((dn = of_find_node_by_type(dn, "cpu"))) {
  245. const u32 *cell;
  246. u64 hwid;
  247. /*
  248. * A cpu node with missing "reg" property is
  249. * considered invalid to build a cpu_logical_map
  250. * entry.
  251. */
  252. cell = of_get_property(dn, "reg", NULL);
  253. if (!cell) {
  254. pr_err("%s: missing reg property\n", dn->full_name);
  255. goto next;
  256. }
  257. hwid = of_read_number(cell, of_n_addr_cells(dn));
  258. /*
  259. * Non affinity bits must be set to 0 in the DT
  260. */
  261. if (hwid & ~MPIDR_HWID_BITMASK) {
  262. pr_err("%s: invalid reg property\n", dn->full_name);
  263. goto next;
  264. }
  265. /*
  266. * Duplicate MPIDRs are a recipe for disaster. Scan
  267. * all initialized entries and check for
  268. * duplicates. If any is found just ignore the cpu.
  269. * cpu_logical_map was initialized to INVALID_HWID to
  270. * avoid matching valid MPIDR values.
  271. */
  272. for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
  273. if (cpu_logical_map(i) == hwid) {
  274. pr_err("%s: duplicate cpu reg properties in the DT\n",
  275. dn->full_name);
  276. goto next;
  277. }
  278. }
  279. /*
  280. * The numbering scheme requires that the boot CPU
  281. * must be assigned logical id 0. Record it so that
  282. * the logical map built from DT is validated and can
  283. * be used.
  284. */
  285. if (hwid == cpu_logical_map(0)) {
  286. if (bootcpu_valid) {
  287. pr_err("%s: duplicate boot cpu reg property in DT\n",
  288. dn->full_name);
  289. goto next;
  290. }
  291. bootcpu_valid = true;
  292. /*
  293. * cpu_logical_map has already been
  294. * initialized and the boot cpu doesn't need
  295. * the enable-method so continue without
  296. * incrementing cpu.
  297. */
  298. continue;
  299. }
  300. if (cpu >= NR_CPUS)
  301. goto next;
  302. if (cpu_read_ops(dn, cpu) != 0)
  303. goto next;
  304. if (cpu_ops[cpu]->cpu_init(dn, cpu))
  305. goto next;
  306. pr_debug("cpu logical map 0x%llx\n", hwid);
  307. cpu_logical_map(cpu) = hwid;
  308. next:
  309. cpu++;
  310. }
  311. /* sanity check */
  312. if (cpu > NR_CPUS)
  313. pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
  314. cpu, NR_CPUS);
  315. if (!bootcpu_valid) {
  316. pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
  317. return;
  318. }
  319. /*
  320. * All the cpus that made it to the cpu_logical_map have been
  321. * validated so set them as possible cpus.
  322. */
  323. for (i = 0; i < NR_CPUS; i++)
  324. if (cpu_logical_map(i) != INVALID_HWID)
  325. set_cpu_possible(i, true);
  326. }
  327. void __init smp_prepare_cpus(unsigned int max_cpus)
  328. {
  329. int err;
  330. unsigned int cpu, ncores = num_possible_cpus();
  331. /*
  332. * are we trying to boot more cores than exist?
  333. */
  334. if (max_cpus > ncores)
  335. max_cpus = ncores;
  336. /* Don't bother if we're effectively UP */
  337. if (max_cpus <= 1)
  338. return;
  339. /*
  340. * Initialise the present map (which describes the set of CPUs
  341. * actually populated at the present time) and release the
  342. * secondaries from the bootloader.
  343. *
  344. * Make sure we online at most (max_cpus - 1) additional CPUs.
  345. */
  346. max_cpus--;
  347. for_each_possible_cpu(cpu) {
  348. if (max_cpus == 0)
  349. break;
  350. if (cpu == smp_processor_id())
  351. continue;
  352. if (!cpu_ops[cpu])
  353. continue;
  354. err = cpu_ops[cpu]->cpu_prepare(cpu);
  355. if (err)
  356. continue;
  357. set_cpu_present(cpu, true);
  358. max_cpus--;
  359. }
  360. }
  361. void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
  362. {
  363. smp_cross_call = fn;
  364. }
  365. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  366. {
  367. smp_cross_call(mask, IPI_CALL_FUNC);
  368. }
  369. void arch_send_call_function_single_ipi(int cpu)
  370. {
  371. smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
  372. }
  373. static const char *ipi_types[NR_IPI] = {
  374. #define S(x,s) [x - IPI_RESCHEDULE] = s
  375. S(IPI_RESCHEDULE, "Rescheduling interrupts"),
  376. S(IPI_CALL_FUNC, "Function call interrupts"),
  377. S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
  378. S(IPI_CPU_STOP, "CPU stop interrupts"),
  379. };
  380. void show_ipi_list(struct seq_file *p, int prec)
  381. {
  382. unsigned int cpu, i;
  383. for (i = 0; i < NR_IPI; i++) {
  384. seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
  385. prec >= 4 ? " " : "");
  386. for_each_online_cpu(cpu)
  387. seq_printf(p, "%10u ",
  388. __get_irq_stat(cpu, ipi_irqs[i]));
  389. seq_printf(p, " %s\n", ipi_types[i]);
  390. }
  391. }
  392. u64 smp_irq_stat_cpu(unsigned int cpu)
  393. {
  394. u64 sum = 0;
  395. int i;
  396. for (i = 0; i < NR_IPI; i++)
  397. sum += __get_irq_stat(cpu, ipi_irqs[i]);
  398. return sum;
  399. }
  400. static DEFINE_RAW_SPINLOCK(stop_lock);
  401. /*
  402. * ipi_cpu_stop - handle IPI from smp_send_stop()
  403. */
  404. static void ipi_cpu_stop(unsigned int cpu)
  405. {
  406. if (system_state == SYSTEM_BOOTING ||
  407. system_state == SYSTEM_RUNNING) {
  408. raw_spin_lock(&stop_lock);
  409. pr_crit("CPU%u: stopping\n", cpu);
  410. dump_stack();
  411. raw_spin_unlock(&stop_lock);
  412. }
  413. set_cpu_online(cpu, false);
  414. local_fiq_disable();
  415. local_irq_disable();
  416. while (1)
  417. cpu_relax();
  418. }
  419. /*
  420. * Main handler for inter-processor interrupts
  421. */
  422. void handle_IPI(int ipinr, struct pt_regs *regs)
  423. {
  424. unsigned int cpu = smp_processor_id();
  425. struct pt_regs *old_regs = set_irq_regs(regs);
  426. if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
  427. __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
  428. switch (ipinr) {
  429. case IPI_RESCHEDULE:
  430. scheduler_ipi();
  431. break;
  432. case IPI_CALL_FUNC:
  433. irq_enter();
  434. generic_smp_call_function_interrupt();
  435. irq_exit();
  436. break;
  437. case IPI_CALL_FUNC_SINGLE:
  438. irq_enter();
  439. generic_smp_call_function_single_interrupt();
  440. irq_exit();
  441. break;
  442. case IPI_CPU_STOP:
  443. irq_enter();
  444. ipi_cpu_stop(cpu);
  445. irq_exit();
  446. break;
  447. default:
  448. pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
  449. break;
  450. }
  451. set_irq_regs(old_regs);
  452. }
  453. void smp_send_reschedule(int cpu)
  454. {
  455. smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
  456. }
  457. void smp_send_stop(void)
  458. {
  459. unsigned long timeout;
  460. if (num_online_cpus() > 1) {
  461. cpumask_t mask;
  462. cpumask_copy(&mask, cpu_online_mask);
  463. cpu_clear(smp_processor_id(), mask);
  464. smp_cross_call(&mask, IPI_CPU_STOP);
  465. }
  466. /* Wait up to one second for other CPUs to stop */
  467. timeout = USEC_PER_SEC;
  468. while (num_online_cpus() > 1 && timeout--)
  469. udelay(1);
  470. if (num_online_cpus() > 1)
  471. pr_warning("SMP: failed to stop secondary CPUs\n");
  472. }
  473. /*
  474. * not supported here
  475. */
  476. int setup_profiling_timer(unsigned int multiplier)
  477. {
  478. return -EINVAL;
  479. }