smp.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152
  1. /* SMP support routines.
  2. *
  3. * Copyright (C) 2006-2008 Panasonic Corporation
  4. * All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * version 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/err.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/sched.h>
  24. #include <linux/profile.h>
  25. #include <linux/smp.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/system.h>
  28. #include <asm/bitops.h>
  29. #include <asm/processor.h>
  30. #include <asm/bug.h>
  31. #include <asm/exceptions.h>
  32. #include <asm/hardirq.h>
  33. #include <asm/fpu.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/thread_info.h>
  36. #include <asm/cpu-regs.h>
  37. #include <asm/intctl-regs.h>
  38. #include "internal.h"
  39. #ifdef CONFIG_HOTPLUG_CPU
  40. #include <linux/cpu.h>
  41. #include <asm/cacheflush.h>
  42. static unsigned long sleep_mode[NR_CPUS];
  43. static void run_sleep_cpu(unsigned int cpu);
  44. static void run_wakeup_cpu(unsigned int cpu);
  45. #endif /* CONFIG_HOTPLUG_CPU */
  46. /*
  47. * Debug Message function
  48. */
  49. #undef DEBUG_SMP
  50. #ifdef DEBUG_SMP
  51. #define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  52. #else
  53. #define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
  54. #endif
  55. /* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
  56. #define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
  57. /*
  58. * Structure and data for smp_nmi_call_function().
  59. */
  60. struct nmi_call_data_struct {
  61. smp_call_func_t func;
  62. void *info;
  63. cpumask_t started;
  64. cpumask_t finished;
  65. int wait;
  66. char size_alignment[0]
  67. __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  68. } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  69. static DEFINE_SPINLOCK(smp_nmi_call_lock);
  70. static struct nmi_call_data_struct *nmi_call_data;
  71. /*
  72. * Data structures and variables
  73. */
  74. static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
  75. static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
  76. cpumask_t cpu_boot_map; /* Bitmask of boot APs */
  77. unsigned long start_stack[NR_CPUS - 1];
  78. /*
  79. * Per CPU parameters
  80. */
  81. struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
  82. static int cpucount; /* The count of boot CPUs */
  83. static cpumask_t smp_commenced_mask;
  84. cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
  85. /*
  86. * Function Prototypes
  87. */
  88. static int do_boot_cpu(int);
  89. static void smp_show_cpu_info(int cpu_id);
  90. static void smp_callin(void);
  91. static void smp_online(void);
  92. static void smp_store_cpu_info(int);
  93. static void smp_cpu_init(void);
  94. static void smp_tune_scheduling(void);
  95. static void send_IPI_mask(const cpumask_t *cpumask, int irq);
  96. static void init_ipi(void);
  97. /*
  98. * IPI Initialization interrupt definitions
  99. */
  100. static void mn10300_ipi_disable(unsigned int irq);
  101. static void mn10300_ipi_enable(unsigned int irq);
  102. static void mn10300_ipi_ack(unsigned int irq);
  103. static void mn10300_ipi_nop(unsigned int irq);
  104. static struct irq_chip mn10300_ipi_type = {
  105. .name = "cpu_ipi",
  106. .disable = mn10300_ipi_disable,
  107. .enable = mn10300_ipi_enable,
  108. .ack = mn10300_ipi_ack,
  109. .eoi = mn10300_ipi_nop
  110. };
  111. static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
  112. static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
  113. static struct irqaction reschedule_ipi = {
  114. .handler = smp_reschedule_interrupt,
  115. .name = "smp reschedule IPI"
  116. };
  117. static struct irqaction call_function_ipi = {
  118. .handler = smp_call_function_interrupt,
  119. .name = "smp call function IPI"
  120. };
  121. #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
  122. static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
  123. static struct irqaction local_timer_ipi = {
  124. .handler = smp_ipi_timer_interrupt,
  125. .flags = IRQF_DISABLED,
  126. .name = "smp local timer IPI"
  127. };
  128. #endif
  129. /**
  130. * init_ipi - Initialise the IPI mechanism
  131. */
  132. static void init_ipi(void)
  133. {
  134. unsigned long flags;
  135. u16 tmp16;
  136. /* set up the reschedule IPI */
  137. set_irq_chip_and_handler(RESCHEDULE_IPI,
  138. &mn10300_ipi_type, handle_percpu_irq);
  139. setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
  140. set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
  141. mn10300_ipi_enable(RESCHEDULE_IPI);
  142. /* set up the call function IPI */
  143. set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
  144. &mn10300_ipi_type, handle_percpu_irq);
  145. setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
  146. set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
  147. mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
  148. /* set up the local timer IPI */
  149. #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
  150. defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
  151. set_irq_chip_and_handler(LOCAL_TIMER_IPI,
  152. &mn10300_ipi_type, handle_percpu_irq);
  153. setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
  154. set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
  155. mn10300_ipi_enable(LOCAL_TIMER_IPI);
  156. #endif
  157. #ifdef CONFIG_MN10300_CACHE_ENABLED
  158. /* set up the cache flush IPI */
  159. flags = arch_local_cli_save();
  160. __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
  161. mn10300_low_ipi_handler);
  162. GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
  163. mn10300_ipi_enable(FLUSH_CACHE_IPI);
  164. arch_local_irq_restore(flags);
  165. #endif
  166. /* set up the NMI call function IPI */
  167. flags = arch_local_cli_save();
  168. GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
  169. tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
  170. arch_local_irq_restore(flags);
  171. /* set up the SMP boot IPI */
  172. flags = arch_local_cli_save();
  173. __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
  174. mn10300_low_ipi_handler);
  175. arch_local_irq_restore(flags);
  176. }
  177. /**
  178. * mn10300_ipi_shutdown - Shut down handling of an IPI
  179. * @irq: The IPI to be shut down.
  180. */
  181. static void mn10300_ipi_shutdown(unsigned int irq)
  182. {
  183. unsigned long flags;
  184. u16 tmp;
  185. flags = arch_local_cli_save();
  186. tmp = GxICR(irq);
  187. GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
  188. tmp = GxICR(irq);
  189. arch_local_irq_restore(flags);
  190. }
  191. /**
  192. * mn10300_ipi_enable - Enable an IPI
  193. * @irq: The IPI to be enabled.
  194. */
  195. static void mn10300_ipi_enable(unsigned int irq)
  196. {
  197. unsigned long flags;
  198. u16 tmp;
  199. flags = arch_local_cli_save();
  200. tmp = GxICR(irq);
  201. GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
  202. tmp = GxICR(irq);
  203. arch_local_irq_restore(flags);
  204. }
  205. /**
  206. * mn10300_ipi_disable - Disable an IPI
  207. * @irq: The IPI to be disabled.
  208. */
  209. static void mn10300_ipi_disable(unsigned int irq)
  210. {
  211. unsigned long flags;
  212. u16 tmp;
  213. flags = arch_local_cli_save();
  214. tmp = GxICR(irq);
  215. GxICR(irq) = tmp & GxICR_LEVEL;
  216. tmp = GxICR(irq);
  217. arch_local_irq_restore(flags);
  218. }
  219. /**
  220. * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
  221. * @irq: The IPI to be acknowledged.
  222. *
  223. * Clear the interrupt detection flag for the IPI on the appropriate interrupt
  224. * channel in the PIC.
  225. */
  226. static void mn10300_ipi_ack(unsigned int irq)
  227. {
  228. unsigned long flags;
  229. u16 tmp;
  230. flags = arch_local_cli_save();
  231. GxICR_u8(irq) = GxICR_DETECT;
  232. tmp = GxICR(irq);
  233. arch_local_irq_restore(flags);
  234. }
  235. /**
  236. * mn10300_ipi_nop - Dummy IPI action
  237. * @irq: The IPI to be acted upon.
  238. */
  239. static void mn10300_ipi_nop(unsigned int irq)
  240. {
  241. }
  242. /**
  243. * send_IPI_mask - Send IPIs to all CPUs in list
  244. * @cpumask: The list of CPUs to target.
  245. * @irq: The IPI request to be sent.
  246. *
  247. * Send the specified IPI to all the CPUs in the list, not waiting for them to
  248. * finish before returning. The caller is responsible for synchronisation if
  249. * that is needed.
  250. */
  251. static void send_IPI_mask(const cpumask_t *cpumask, int irq)
  252. {
  253. int i;
  254. u16 tmp;
  255. for (i = 0; i < NR_CPUS; i++) {
  256. if (cpu_isset(i, *cpumask)) {
  257. /* send IPI */
  258. tmp = CROSS_GxICR(irq, i);
  259. CROSS_GxICR(irq, i) =
  260. tmp | GxICR_REQUEST | GxICR_DETECT;
  261. tmp = CROSS_GxICR(irq, i); /* flush write buffer */
  262. }
  263. }
  264. }
  265. /**
  266. * send_IPI_self - Send an IPI to this CPU.
  267. * @irq: The IPI request to be sent.
  268. *
  269. * Send the specified IPI to the current CPU.
  270. */
  271. void send_IPI_self(int irq)
  272. {
  273. send_IPI_mask(cpumask_of(smp_processor_id()), irq);
  274. }
  275. /**
  276. * send_IPI_allbutself - Send IPIs to all the other CPUs.
  277. * @irq: The IPI request to be sent.
  278. *
  279. * Send the specified IPI to all CPUs in the system barring the current one,
  280. * not waiting for them to finish before returning. The caller is responsible
  281. * for synchronisation if that is needed.
  282. */
  283. void send_IPI_allbutself(int irq)
  284. {
  285. cpumask_t cpumask;
  286. cpumask = cpu_online_map;
  287. cpu_clear(smp_processor_id(), cpumask);
  288. send_IPI_mask(&cpumask, irq);
  289. }
  290. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  291. {
  292. BUG();
  293. /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
  294. }
  295. void arch_send_call_function_single_ipi(int cpu)
  296. {
  297. send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
  298. }
  299. /**
  300. * smp_send_reschedule - Send reschedule IPI to a CPU
  301. * @cpu: The CPU to target.
  302. */
  303. void smp_send_reschedule(int cpu)
  304. {
  305. send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
  306. }
  307. /**
  308. * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
  309. * @func: The function to ask to be run.
  310. * @info: The context data to pass to that function.
  311. * @wait: If true, wait (atomically) until function is run on all CPUs.
  312. *
  313. * Send a non-maskable request to all CPUs in the system, requesting them to
  314. * run the specified function with the given context data, and, potentially, to
  315. * wait for completion of that function on all CPUs.
  316. *
  317. * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
  318. * timeout.
  319. */
  320. int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
  321. {
  322. struct nmi_call_data_struct data;
  323. unsigned long flags;
  324. unsigned int cnt;
  325. int cpus, ret = 0;
  326. cpus = num_online_cpus() - 1;
  327. if (cpus < 1)
  328. return 0;
  329. data.func = func;
  330. data.info = info;
  331. data.started = cpu_online_map;
  332. cpu_clear(smp_processor_id(), data.started);
  333. data.wait = wait;
  334. if (wait)
  335. data.finished = data.started;
  336. spin_lock_irqsave(&smp_nmi_call_lock, flags);
  337. nmi_call_data = &data;
  338. smp_mb();
  339. /* Send a message to all other CPUs and wait for them to respond */
  340. send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
  341. /* Wait for response */
  342. if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
  343. for (cnt = 0;
  344. cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
  345. !cpus_empty(data.started);
  346. cnt++)
  347. mdelay(1);
  348. if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
  349. for (cnt = 0;
  350. cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
  351. !cpus_empty(data.finished);
  352. cnt++)
  353. mdelay(1);
  354. }
  355. if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
  356. ret = -ETIMEDOUT;
  357. } else {
  358. /* If timeout value is zero, wait until cpumask has been
  359. * cleared */
  360. while (!cpus_empty(data.started))
  361. barrier();
  362. if (wait)
  363. while (!cpus_empty(data.finished))
  364. barrier();
  365. }
  366. spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
  367. return ret;
  368. }
  369. /**
  370. * stop_this_cpu - Callback to stop a CPU.
  371. * @unused: Callback context (ignored).
  372. */
  373. void stop_this_cpu(void *unused)
  374. {
  375. static volatile int stopflag;
  376. unsigned long flags;
  377. #ifdef CONFIG_GDBSTUB
  378. /* In case of single stepping smp_send_stop by other CPU,
  379. * clear procindebug to avoid deadlock.
  380. */
  381. atomic_set(&procindebug[smp_processor_id()], 0);
  382. #endif /* CONFIG_GDBSTUB */
  383. flags = arch_local_cli_save();
  384. cpu_clear(smp_processor_id(), cpu_online_map);
  385. while (!stopflag)
  386. cpu_relax();
  387. cpu_set(smp_processor_id(), cpu_online_map);
  388. arch_local_irq_restore(flags);
  389. }
  390. /**
  391. * smp_send_stop - Send a stop request to all CPUs.
  392. */
  393. void smp_send_stop(void)
  394. {
  395. smp_nmi_call_function(stop_this_cpu, NULL, 0);
  396. }
  397. /**
  398. * smp_reschedule_interrupt - Reschedule IPI handler
  399. * @irq: The interrupt number.
  400. * @dev_id: The device ID.
  401. *
  402. * We need do nothing here, since the scheduling will be effected on our way
  403. * back through entry.S.
  404. *
  405. * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
  406. */
  407. static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
  408. {
  409. /* do nothing */
  410. return IRQ_HANDLED;
  411. }
  412. /**
  413. * smp_call_function_interrupt - Call function IPI handler
  414. * @irq: The interrupt number.
  415. * @dev_id: The device ID.
  416. *
  417. * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
  418. */
  419. static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
  420. {
  421. /* generic_smp_call_function_interrupt(); */
  422. generic_smp_call_function_single_interrupt();
  423. return IRQ_HANDLED;
  424. }
  425. /**
  426. * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
  427. */
  428. void smp_nmi_call_function_interrupt(void)
  429. {
  430. smp_call_func_t func = nmi_call_data->func;
  431. void *info = nmi_call_data->info;
  432. int wait = nmi_call_data->wait;
  433. /* Notify the initiating CPU that I've grabbed the data and am about to
  434. * execute the function
  435. */
  436. smp_mb();
  437. cpu_clear(smp_processor_id(), nmi_call_data->started);
  438. (*func)(info);
  439. if (wait) {
  440. smp_mb();
  441. cpu_clear(smp_processor_id(), nmi_call_data->finished);
  442. }
  443. }
  444. #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
  445. defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
  446. /**
  447. * smp_ipi_timer_interrupt - Local timer IPI handler
  448. * @irq: The interrupt number.
  449. * @dev_id: The device ID.
  450. *
  451. * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
  452. */
  453. static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
  454. {
  455. return local_timer_interrupt();
  456. }
  457. #endif
  458. void __init smp_init_cpus(void)
  459. {
  460. int i;
  461. for (i = 0; i < NR_CPUS; i++) {
  462. set_cpu_possible(i, true);
  463. set_cpu_present(i, true);
  464. }
  465. }
  466. /**
  467. * smp_cpu_init - Initialise AP in start_secondary.
  468. *
  469. * For this Application Processor, set up init_mm, initialise FPU and set
  470. * interrupt level 0-6 setting.
  471. */
  472. static void __init smp_cpu_init(void)
  473. {
  474. unsigned long flags;
  475. int cpu_id = smp_processor_id();
  476. u16 tmp16;
  477. if (test_and_set_bit(cpu_id, &cpu_initialized)) {
  478. printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
  479. for (;;)
  480. local_irq_enable();
  481. }
  482. printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
  483. atomic_inc(&init_mm.mm_count);
  484. current->active_mm = &init_mm;
  485. BUG_ON(current->mm);
  486. enter_lazy_tlb(&init_mm, current);
  487. /* Force FPU initialization */
  488. clear_using_fpu(current);
  489. GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
  490. mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
  491. GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
  492. mn10300_ipi_enable(LOCAL_TIMER_IPI);
  493. GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
  494. mn10300_ipi_enable(RESCHEDULE_IPI);
  495. #ifdef CONFIG_MN10300_CACHE_ENABLED
  496. GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
  497. mn10300_ipi_enable(FLUSH_CACHE_IPI);
  498. #endif
  499. mn10300_ipi_shutdown(SMP_BOOT_IRQ);
  500. /* Set up the non-maskable call function IPI */
  501. flags = arch_local_cli_save();
  502. GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
  503. tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
  504. arch_local_irq_restore(flags);
  505. }
  506. /**
  507. * smp_prepare_cpu_init - Initialise CPU in startup_secondary
  508. *
  509. * Set interrupt level 0-6 setting and init ICR of gdbstub.
  510. */
  511. void smp_prepare_cpu_init(void)
  512. {
  513. int loop;
  514. /* Set the interrupt vector registers */
  515. IVAR0 = EXCEP_IRQ_LEVEL0;
  516. IVAR1 = EXCEP_IRQ_LEVEL1;
  517. IVAR2 = EXCEP_IRQ_LEVEL2;
  518. IVAR3 = EXCEP_IRQ_LEVEL3;
  519. IVAR4 = EXCEP_IRQ_LEVEL4;
  520. IVAR5 = EXCEP_IRQ_LEVEL5;
  521. IVAR6 = EXCEP_IRQ_LEVEL6;
  522. /* Disable all interrupts and set to priority 6 (lowest) */
  523. for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
  524. GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
  525. #ifdef CONFIG_GDBSTUB
  526. /* initialise GDB-stub */
  527. do {
  528. unsigned long flags;
  529. u16 tmp16;
  530. flags = arch_local_cli_save();
  531. GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
  532. tmp16 = GxICR(GDB_NMI_IPI);
  533. arch_local_irq_restore(flags);
  534. } while (0);
  535. #endif
  536. }
  537. /**
  538. * start_secondary - Activate a secondary CPU (AP)
  539. * @unused: Thread parameter (ignored).
  540. */
  541. int __init start_secondary(void *unused)
  542. {
  543. smp_cpu_init();
  544. smp_callin();
  545. while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
  546. cpu_relax();
  547. local_flush_tlb();
  548. preempt_disable();
  549. smp_online();
  550. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  551. init_clockevents();
  552. #endif
  553. cpu_idle();
  554. return 0;
  555. }
  556. /**
  557. * smp_prepare_cpus - Boot up secondary CPUs (APs)
  558. * @max_cpus: Maximum number of CPUs to boot.
  559. *
  560. * Call do_boot_cpu, and boot up APs.
  561. */
  562. void __init smp_prepare_cpus(unsigned int max_cpus)
  563. {
  564. int phy_id;
  565. /* Setup boot CPU information */
  566. smp_store_cpu_info(0);
  567. smp_tune_scheduling();
  568. init_ipi();
  569. /* If SMP should be disabled, then finish */
  570. if (max_cpus == 0) {
  571. printk(KERN_INFO "SMP mode deactivated.\n");
  572. goto smp_done;
  573. }
  574. /* Boot secondary CPUs (for which phy_id > 0) */
  575. for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
  576. /* Don't boot primary CPU */
  577. if (max_cpus <= cpucount + 1)
  578. continue;
  579. if (phy_id != 0)
  580. do_boot_cpu(phy_id);
  581. set_cpu_possible(phy_id, true);
  582. smp_show_cpu_info(phy_id);
  583. }
  584. smp_done:
  585. Dprintk("Boot done.\n");
  586. }
  587. /**
  588. * smp_store_cpu_info - Save a CPU's information
  589. * @cpu: The CPU to save for.
  590. *
  591. * Save boot_cpu_data and jiffy for the specified CPU.
  592. */
  593. static void __init smp_store_cpu_info(int cpu)
  594. {
  595. struct mn10300_cpuinfo *ci = &cpu_data[cpu];
  596. *ci = boot_cpu_data;
  597. ci->loops_per_jiffy = loops_per_jiffy;
  598. ci->type = CPUREV;
  599. }
  600. /**
  601. * smp_tune_scheduling - Set time slice value
  602. *
  603. * Nothing to do here.
  604. */
  605. static void __init smp_tune_scheduling(void)
  606. {
  607. }
  608. /**
  609. * do_boot_cpu: Boot up one CPU
  610. * @phy_id: Physical ID of CPU to boot.
  611. *
  612. * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
  613. * otherwise.
  614. */
  615. static int __init do_boot_cpu(int phy_id)
  616. {
  617. struct task_struct *idle;
  618. unsigned long send_status, callin_status;
  619. int timeout, cpu_id;
  620. send_status = GxICR_REQUEST;
  621. callin_status = 0;
  622. timeout = 0;
  623. cpu_id = phy_id;
  624. cpucount++;
  625. /* Create idle thread for this CPU */
  626. idle = fork_idle(cpu_id);
  627. if (IS_ERR(idle))
  628. panic("Failed fork for CPU#%d.", cpu_id);
  629. idle->thread.pc = (unsigned long)start_secondary;
  630. printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
  631. start_stack[cpu_id - 1] = idle->thread.sp;
  632. task_thread_info(idle)->cpu = cpu_id;
  633. /* Send boot IPI to AP */
  634. send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
  635. Dprintk("Waiting for send to finish...\n");
  636. /* Wait for AP's IPI receive in 100[ms] */
  637. do {
  638. udelay(1000);
  639. send_status =
  640. CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
  641. } while (send_status == GxICR_REQUEST && timeout++ < 100);
  642. Dprintk("Waiting for cpu_callin_map.\n");
  643. if (send_status == 0) {
  644. /* Allow AP to start initializing */
  645. cpu_set(cpu_id, cpu_callout_map);
  646. /* Wait for setting cpu_callin_map */
  647. timeout = 0;
  648. do {
  649. udelay(1000);
  650. callin_status = cpu_isset(cpu_id, cpu_callin_map);
  651. } while (callin_status == 0 && timeout++ < 5000);
  652. if (callin_status == 0)
  653. Dprintk("Not responding.\n");
  654. } else {
  655. printk(KERN_WARNING "IPI not delivered.\n");
  656. }
  657. if (send_status == GxICR_REQUEST || callin_status == 0) {
  658. cpu_clear(cpu_id, cpu_callout_map);
  659. cpu_clear(cpu_id, cpu_callin_map);
  660. cpu_clear(cpu_id, cpu_initialized);
  661. cpucount--;
  662. return 1;
  663. }
  664. return 0;
  665. }
  666. /**
  667. * smp_show_cpu_info - Show SMP CPU information
  668. * @cpu: The CPU of interest.
  669. */
  670. static void __init smp_show_cpu_info(int cpu)
  671. {
  672. struct mn10300_cpuinfo *ci = &cpu_data[cpu];
  673. printk(KERN_INFO
  674. "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
  675. cpu,
  676. MN10300_IOCLK / 1000000,
  677. (MN10300_IOCLK / 10000) % 100,
  678. ci->loops_per_jiffy / (500000 / HZ),
  679. (ci->loops_per_jiffy / (5000 / HZ)) % 100);
  680. }
  681. /**
  682. * smp_callin - Set cpu_callin_map of the current CPU ID
  683. */
  684. static void __init smp_callin(void)
  685. {
  686. unsigned long timeout;
  687. int cpu;
  688. cpu = smp_processor_id();
  689. timeout = jiffies + (2 * HZ);
  690. if (cpu_isset(cpu, cpu_callin_map)) {
  691. printk(KERN_ERR "CPU#%d already present.\n", cpu);
  692. BUG();
  693. }
  694. Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
  695. /* Wait for AP startup 2s total */
  696. while (time_before(jiffies, timeout)) {
  697. if (cpu_isset(cpu, cpu_callout_map))
  698. break;
  699. cpu_relax();
  700. }
  701. if (!time_before(jiffies, timeout)) {
  702. printk(KERN_ERR
  703. "BUG: CPU#%d started up but did not get a callout!\n",
  704. cpu);
  705. BUG();
  706. }
  707. #ifdef CONFIG_CALIBRATE_DELAY
  708. calibrate_delay(); /* Get our bogomips */
  709. #endif
  710. /* Save our processor parameters */
  711. smp_store_cpu_info(cpu);
  712. /* Allow the boot processor to continue */
  713. cpu_set(cpu, cpu_callin_map);
  714. }
  715. /**
  716. * smp_online - Set cpu_online_map
  717. */
  718. static void __init smp_online(void)
  719. {
  720. int cpu;
  721. cpu = smp_processor_id();
  722. local_irq_enable();
  723. cpu_set(cpu, cpu_online_map);
  724. smp_wmb();
  725. }
  726. /**
  727. * smp_cpus_done -
  728. * @max_cpus: Maximum CPU count.
  729. *
  730. * Do nothing.
  731. */
  732. void __init smp_cpus_done(unsigned int max_cpus)
  733. {
  734. }
  735. /*
  736. * smp_prepare_boot_cpu - Set up stuff for the boot processor.
  737. *
  738. * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
  739. * processor (CPU 0).
  740. */
  741. void __devinit smp_prepare_boot_cpu(void)
  742. {
  743. cpu_set(0, cpu_callout_map);
  744. cpu_set(0, cpu_callin_map);
  745. current_thread_info()->cpu = 0;
  746. }
  747. /*
  748. * initialize_secondary - Initialise a secondary CPU (Application Processor).
  749. *
  750. * Set SP register and jump to thread's PC address.
  751. */
  752. void initialize_secondary(void)
  753. {
  754. asm volatile (
  755. "mov %0,sp \n"
  756. "jmp (%1) \n"
  757. :
  758. : "a"(current->thread.sp), "a"(current->thread.pc));
  759. }
  760. /**
  761. * __cpu_up - Set smp_commenced_mask for the nominated CPU
  762. * @cpu: The target CPU.
  763. */
  764. int __devinit __cpu_up(unsigned int cpu)
  765. {
  766. int timeout;
  767. #ifdef CONFIG_HOTPLUG_CPU
  768. if (num_online_cpus() == 1)
  769. disable_hlt();
  770. if (sleep_mode[cpu])
  771. run_wakeup_cpu(cpu);
  772. #endif /* CONFIG_HOTPLUG_CPU */
  773. cpu_set(cpu, smp_commenced_mask);
  774. /* Wait 5s total for a response */
  775. for (timeout = 0 ; timeout < 5000 ; timeout++) {
  776. if (cpu_isset(cpu, cpu_online_map))
  777. break;
  778. udelay(1000);
  779. }
  780. BUG_ON(!cpu_isset(cpu, cpu_online_map));
  781. return 0;
  782. }
  783. /**
  784. * setup_profiling_timer - Set up the profiling timer
  785. * @multiplier - The frequency multiplier to use
  786. *
  787. * The frequency of the profiling timer can be changed by writing a multiplier
  788. * value into /proc/profile.
  789. */
  790. int setup_profiling_timer(unsigned int multiplier)
  791. {
  792. return -EINVAL;
  793. }
  794. /*
  795. * CPU hotplug routines
  796. */
  797. #ifdef CONFIG_HOTPLUG_CPU
  798. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  799. static int __init topology_init(void)
  800. {
  801. int cpu, ret;
  802. for_each_cpu(cpu) {
  803. ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
  804. if (ret)
  805. printk(KERN_WARNING
  806. "topology_init: register_cpu %d failed (%d)\n",
  807. cpu, ret);
  808. }
  809. return 0;
  810. }
  811. subsys_initcall(topology_init);
  812. int __cpu_disable(void)
  813. {
  814. int cpu = smp_processor_id();
  815. if (cpu == 0)
  816. return -EBUSY;
  817. migrate_irqs();
  818. cpu_clear(cpu, current->active_mm->cpu_vm_mask);
  819. return 0;
  820. }
  821. void __cpu_die(unsigned int cpu)
  822. {
  823. run_sleep_cpu(cpu);
  824. if (num_online_cpus() == 1)
  825. enable_hlt();
  826. }
  827. #ifdef CONFIG_MN10300_CACHE_ENABLED
  828. static inline void hotplug_cpu_disable_cache(void)
  829. {
  830. int tmp;
  831. asm volatile(
  832. " movhu (%1),%0 \n"
  833. " and %2,%0 \n"
  834. " movhu %0,(%1) \n"
  835. "1: movhu (%1),%0 \n"
  836. " btst %3,%0 \n"
  837. " bne 1b \n"
  838. : "=&r"(tmp)
  839. : "a"(&CHCTR),
  840. "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
  841. "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
  842. : "memory", "cc");
  843. }
  844. static inline void hotplug_cpu_enable_cache(void)
  845. {
  846. int tmp;
  847. asm volatile(
  848. "movhu (%1),%0 \n"
  849. "or %2,%0 \n"
  850. "movhu %0,(%1) \n"
  851. : "=&r"(tmp)
  852. : "a"(&CHCTR),
  853. "i"(CHCTR_ICEN | CHCTR_DCEN)
  854. : "memory", "cc");
  855. }
  856. static inline void hotplug_cpu_invalidate_cache(void)
  857. {
  858. int tmp;
  859. asm volatile (
  860. "movhu (%1),%0 \n"
  861. "or %2,%0 \n"
  862. "movhu %0,(%1) \n"
  863. : "=&r"(tmp)
  864. : "a"(&CHCTR),
  865. "i"(CHCTR_ICINV | CHCTR_DCINV)
  866. : "cc");
  867. }
  868. #else /* CONFIG_MN10300_CACHE_ENABLED */
  869. #define hotplug_cpu_disable_cache() do {} while (0)
  870. #define hotplug_cpu_enable_cache() do {} while (0)
  871. #define hotplug_cpu_invalidate_cache() do {} while (0)
  872. #endif /* CONFIG_MN10300_CACHE_ENABLED */
  873. /**
  874. * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
  875. * @cpumask: List of target CPUs.
  876. * @func: The function to call on those CPUs.
  877. * @info: The context data for the function to be called.
  878. * @wait: Whether to wait for the calls to complete.
  879. *
  880. * Non-maskably call a function on another CPU for hotplug purposes.
  881. *
  882. * This function must be called with maskable interrupts disabled.
  883. */
  884. static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
  885. smp_call_func_t func, void *info,
  886. int wait)
  887. {
  888. /*
  889. * The address and the size of nmi_call_func_mask_data
  890. * need to be aligned on L1_CACHE_BYTES.
  891. */
  892. static struct nmi_call_data_struct nmi_call_func_mask_data
  893. __cacheline_aligned;
  894. unsigned long start, end;
  895. start = (unsigned long)&nmi_call_func_mask_data;
  896. end = start + sizeof(struct nmi_call_data_struct);
  897. nmi_call_func_mask_data.func = func;
  898. nmi_call_func_mask_data.info = info;
  899. nmi_call_func_mask_data.started = cpumask;
  900. nmi_call_func_mask_data.wait = wait;
  901. if (wait)
  902. nmi_call_func_mask_data.finished = cpumask;
  903. spin_lock(&smp_nmi_call_lock);
  904. nmi_call_data = &nmi_call_func_mask_data;
  905. mn10300_local_dcache_flush_range(start, end);
  906. smp_wmb();
  907. send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
  908. do {
  909. mn10300_local_dcache_inv_range(start, end);
  910. barrier();
  911. } while (!cpus_empty(nmi_call_func_mask_data.started));
  912. if (wait) {
  913. do {
  914. mn10300_local_dcache_inv_range(start, end);
  915. barrier();
  916. } while (!cpus_empty(nmi_call_func_mask_data.finished));
  917. }
  918. spin_unlock(&smp_nmi_call_lock);
  919. return 0;
  920. }
  921. static void restart_wakeup_cpu(void)
  922. {
  923. unsigned int cpu = smp_processor_id();
  924. cpu_set(cpu, cpu_callin_map);
  925. local_flush_tlb();
  926. cpu_set(cpu, cpu_online_map);
  927. smp_wmb();
  928. }
  929. static void prepare_sleep_cpu(void *unused)
  930. {
  931. sleep_mode[smp_processor_id()] = 1;
  932. smp_mb();
  933. mn10300_local_dcache_flush_inv();
  934. hotplug_cpu_disable_cache();
  935. hotplug_cpu_invalidate_cache();
  936. }
  937. /* when this function called, IE=0, NMID=0. */
  938. static void sleep_cpu(void *unused)
  939. {
  940. unsigned int cpu_id = smp_processor_id();
  941. /*
  942. * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
  943. * before this cpu goes in SLEEP mode.
  944. */
  945. do {
  946. smp_mb();
  947. __sleep_cpu();
  948. } while (sleep_mode[cpu_id]);
  949. restart_wakeup_cpu();
  950. }
  951. static void run_sleep_cpu(unsigned int cpu)
  952. {
  953. unsigned long flags;
  954. cpumask_t cpumask = cpumask_of(cpu);
  955. flags = arch_local_cli_save();
  956. hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
  957. hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
  958. udelay(1); /* delay for the cpu to sleep. */
  959. arch_local_irq_restore(flags);
  960. }
  961. static void wakeup_cpu(void)
  962. {
  963. hotplug_cpu_invalidate_cache();
  964. hotplug_cpu_enable_cache();
  965. smp_mb();
  966. sleep_mode[smp_processor_id()] = 0;
  967. }
  968. static void run_wakeup_cpu(unsigned int cpu)
  969. {
  970. unsigned long flags;
  971. flags = arch_local_cli_save();
  972. #if NR_CPUS == 2
  973. mn10300_local_dcache_flush_inv();
  974. #else
  975. /*
  976. * Before waking up the cpu,
  977. * all online cpus should stop and flush D-Cache for global data.
  978. */
  979. #error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
  980. #endif
  981. hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
  982. arch_local_irq_restore(flags);
  983. }
  984. #endif /* CONFIG_HOTPLUG_CPU */