smp.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/init.h>
  10. #include <linux/delay.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/sched.h>
  15. #include <linux/module.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/system.h>
  18. #include <asm/time.h>
  19. #include <asm/octeon/octeon.h>
  20. #include "octeon_boot.h"
  21. volatile unsigned long octeon_processor_boot = 0xff;
  22. volatile unsigned long octeon_processor_sp;
  23. volatile unsigned long octeon_processor_gp;
  24. #ifdef CONFIG_HOTPLUG_CPU
  25. static unsigned int InitTLBStart_addr;
  26. #endif
  27. static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
  28. {
  29. const int coreid = cvmx_get_core_num();
  30. uint64_t action;
  31. /* Load the mailbox register to figure out what we're supposed to do */
  32. action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
  33. /* Clear the mailbox to clear the interrupt */
  34. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
  35. if (action & SMP_CALL_FUNCTION)
  36. smp_call_function_interrupt();
  37. /* Check if we've been told to flush the icache */
  38. if (action & SMP_ICACHE_FLUSH)
  39. asm volatile ("synci 0($0)\n");
  40. return IRQ_HANDLED;
  41. }
  42. /**
  43. * Cause the function described by call_data to be executed on the passed
  44. * cpu. When the function has finished, increment the finished field of
  45. * call_data.
  46. */
  47. void octeon_send_ipi_single(int cpu, unsigned int action)
  48. {
  49. int coreid = cpu_logical_map(cpu);
  50. /*
  51. pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
  52. coreid, action);
  53. */
  54. cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
  55. }
  56. static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
  57. {
  58. unsigned int i;
  59. for_each_cpu_mask(i, mask)
  60. octeon_send_ipi_single(i, action);
  61. }
  62. /**
  63. * Detect available CPUs, populate cpu_possible_map
  64. */
  65. static void octeon_smp_hotplug_setup(void)
  66. {
  67. #ifdef CONFIG_HOTPLUG_CPU
  68. uint32_t labi_signature;
  69. labi_signature =
  70. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  71. LABI_ADDR_IN_BOOTLOADER +
  72. offsetof(struct linux_app_boot_info,
  73. labi_signature)));
  74. if (labi_signature != LABI_SIGNATURE)
  75. pr_err("The bootloader version on this board is incorrect\n");
  76. InitTLBStart_addr =
  77. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  78. LABI_ADDR_IN_BOOTLOADER +
  79. offsetof(struct linux_app_boot_info,
  80. InitTLBStart_addr)));
  81. #endif
  82. }
  83. static void octeon_smp_setup(void)
  84. {
  85. const int coreid = cvmx_get_core_num();
  86. int cpus;
  87. int id;
  88. int core_mask = octeon_get_boot_coremask();
  89. cpus_clear(cpu_possible_map);
  90. __cpu_number_map[coreid] = 0;
  91. __cpu_logical_map[0] = coreid;
  92. cpu_set(0, cpu_possible_map);
  93. cpus = 1;
  94. for (id = 0; id < 16; id++) {
  95. if ((id != coreid) && (core_mask & (1 << id))) {
  96. cpu_set(cpus, cpu_possible_map);
  97. __cpu_number_map[id] = cpus;
  98. __cpu_logical_map[cpus] = id;
  99. cpus++;
  100. }
  101. }
  102. cpu_present_map = cpu_possible_map;
  103. octeon_smp_hotplug_setup();
  104. }
  105. /**
  106. * Firmware CPU startup hook
  107. *
  108. */
  109. static void octeon_boot_secondary(int cpu, struct task_struct *idle)
  110. {
  111. int count;
  112. pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
  113. cpu_logical_map(cpu));
  114. octeon_processor_sp = __KSTK_TOS(idle);
  115. octeon_processor_gp = (unsigned long)(task_thread_info(idle));
  116. octeon_processor_boot = cpu_logical_map(cpu);
  117. mb();
  118. count = 10000;
  119. while (octeon_processor_sp && count) {
  120. /* Waiting for processor to get the SP and GP */
  121. udelay(1);
  122. count--;
  123. }
  124. if (count == 0)
  125. pr_err("Secondary boot timeout\n");
  126. }
  127. /**
  128. * After we've done initial boot, this function is called to allow the
  129. * board code to clean up state, if needed
  130. */
  131. static void octeon_init_secondary(void)
  132. {
  133. const int coreid = cvmx_get_core_num();
  134. union cvmx_ciu_intx_sum0 interrupt_enable;
  135. #ifdef CONFIG_HOTPLUG_CPU
  136. unsigned int cur_exception_base;
  137. cur_exception_base = cvmx_read64_uint32(
  138. CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  139. LABI_ADDR_IN_BOOTLOADER +
  140. offsetof(struct linux_app_boot_info,
  141. cur_exception_base)));
  142. /* cur_exception_base is incremented in bootloader after setting */
  143. write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
  144. #endif
  145. octeon_check_cpu_bist();
  146. octeon_init_cvmcount();
  147. /*
  148. pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
  149. */
  150. /* Enable Mailbox interrupts to this core. These are the only
  151. interrupts allowed on line 3 */
  152. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
  153. interrupt_enable.u64 = 0;
  154. interrupt_enable.s.mbox = 0x3;
  155. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
  156. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
  157. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
  158. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
  159. /* Enable core interrupt processing for 2,3 and 7 */
  160. set_c0_status(0x8c01);
  161. }
  162. /**
  163. * Callout to firmware before smp_init
  164. *
  165. */
  166. void octeon_prepare_cpus(unsigned int max_cpus)
  167. {
  168. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
  169. if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
  170. "mailbox0", mailbox_interrupt)) {
  171. panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
  172. }
  173. if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED,
  174. "mailbox1", mailbox_interrupt)) {
  175. panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
  176. }
  177. }
  178. /**
  179. * Last chance for the board code to finish SMP initialization before
  180. * the CPU is "online".
  181. */
  182. static void octeon_smp_finish(void)
  183. {
  184. #ifdef CONFIG_CAVIUM_GDB
  185. unsigned long tmp;
  186. /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
  187. to be not masked by this core so we know the signal is received by
  188. someone */
  189. asm volatile ("dmfc0 %0, $22\n"
  190. "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
  191. #endif
  192. octeon_user_io_init();
  193. /* to generate the first CPU timer interrupt */
  194. write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
  195. }
  196. /**
  197. * Hook for after all CPUs are online
  198. */
  199. static void octeon_cpus_done(void)
  200. {
  201. #ifdef CONFIG_CAVIUM_GDB
  202. unsigned long tmp;
  203. /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
  204. to be not masked by this core so we know the signal is received by
  205. someone */
  206. asm volatile ("dmfc0 %0, $22\n"
  207. "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
  208. #endif
  209. }
  210. #ifdef CONFIG_HOTPLUG_CPU
  211. /* State of each CPU. */
  212. DEFINE_PER_CPU(int, cpu_state);
  213. extern void fixup_irqs(void);
  214. static DEFINE_SPINLOCK(smp_reserve_lock);
  215. static int octeon_cpu_disable(void)
  216. {
  217. unsigned int cpu = smp_processor_id();
  218. if (cpu == 0)
  219. return -EBUSY;
  220. spin_lock(&smp_reserve_lock);
  221. cpu_clear(cpu, cpu_online_map);
  222. cpu_clear(cpu, cpu_callin_map);
  223. local_irq_disable();
  224. fixup_irqs();
  225. local_irq_enable();
  226. flush_cache_all();
  227. local_flush_tlb_all();
  228. spin_unlock(&smp_reserve_lock);
  229. return 0;
  230. }
  231. static void octeon_cpu_die(unsigned int cpu)
  232. {
  233. int coreid = cpu_logical_map(cpu);
  234. uint32_t avail_coremask;
  235. struct cvmx_bootmem_named_block_desc *block_desc;
  236. #ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
  237. /* Disable the watchdog */
  238. cvmx_ciu_wdogx_t ciu_wdog;
  239. ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
  240. ciu_wdog.s.mode = 0;
  241. cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
  242. #endif
  243. while (per_cpu(cpu_state, cpu) != CPU_DEAD)
  244. cpu_relax();
  245. /*
  246. * This is a bit complicated strategics of getting/settig available
  247. * cores mask, copied from bootloader
  248. */
  249. /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
  250. block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
  251. if (!block_desc) {
  252. avail_coremask =
  253. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  254. LABI_ADDR_IN_BOOTLOADER +
  255. offsetof
  256. (struct linux_app_boot_info,
  257. avail_coremask)));
  258. } else { /* alternative, already initialized */
  259. avail_coremask =
  260. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  261. block_desc->base_addr +
  262. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
  263. }
  264. avail_coremask |= 1 << coreid;
  265. /* Setting avail_coremask for bootoct binary */
  266. if (!block_desc) {
  267. cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  268. LABI_ADDR_IN_BOOTLOADER +
  269. offsetof(struct linux_app_boot_info,
  270. avail_coremask)),
  271. avail_coremask);
  272. } else {
  273. cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  274. block_desc->base_addr +
  275. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
  276. avail_coremask);
  277. }
  278. pr_info("Reset core %d. Available Coremask = %x \n", coreid,
  279. avail_coremask);
  280. cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
  281. cvmx_write_csr(CVMX_CIU_PP_RST, 0);
  282. }
  283. void play_dead(void)
  284. {
  285. int coreid = cvmx_get_core_num();
  286. idle_task_exit();
  287. octeon_processor_boot = 0xff;
  288. per_cpu(cpu_state, coreid) = CPU_DEAD;
  289. while (1) /* core will be reset here */
  290. ;
  291. }
  292. extern void kernel_entry(unsigned long arg1, ...);
  293. static void start_after_reset(void)
  294. {
  295. kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
  296. }
  297. int octeon_update_boot_vector(unsigned int cpu)
  298. {
  299. int coreid = cpu_logical_map(cpu);
  300. unsigned int avail_coremask;
  301. struct cvmx_bootmem_named_block_desc *block_desc;
  302. struct boot_init_vector *boot_vect =
  303. (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 +
  304. BOOTLOADER_BOOT_VECTOR);
  305. block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
  306. if (!block_desc) {
  307. avail_coremask =
  308. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  309. LABI_ADDR_IN_BOOTLOADER +
  310. offsetof(struct linux_app_boot_info,
  311. avail_coremask)));
  312. } else { /* alternative, already initialized */
  313. avail_coremask =
  314. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  315. block_desc->base_addr +
  316. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
  317. }
  318. if (!(avail_coremask & (1 << coreid))) {
  319. /* core not available, assume, that catched by simple-executive */
  320. cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
  321. cvmx_write_csr(CVMX_CIU_PP_RST, 0);
  322. }
  323. boot_vect[coreid].app_start_func_addr =
  324. (uint32_t) (unsigned long) start_after_reset;
  325. boot_vect[coreid].code_addr = InitTLBStart_addr;
  326. CVMX_SYNC;
  327. cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
  328. return 0;
  329. }
  330. static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
  331. unsigned long action, void *hcpu)
  332. {
  333. unsigned int cpu = (unsigned long)hcpu;
  334. switch (action) {
  335. case CPU_UP_PREPARE:
  336. octeon_update_boot_vector(cpu);
  337. break;
  338. case CPU_ONLINE:
  339. pr_info("Cpu %d online\n", cpu);
  340. break;
  341. case CPU_DEAD:
  342. break;
  343. }
  344. return NOTIFY_OK;
  345. }
  346. static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
  347. .notifier_call = octeon_cpu_callback,
  348. };
  349. static int __cpuinit register_cavium_notifier(void)
  350. {
  351. register_hotcpu_notifier(&octeon_cpu_notifier);
  352. return 0;
  353. }
  354. late_initcall(register_cavium_notifier);
  355. #endif /* CONFIG_HOTPLUG_CPU */
  356. struct plat_smp_ops octeon_smp_ops = {
  357. .send_ipi_single = octeon_send_ipi_single,
  358. .send_ipi_mask = octeon_send_ipi_mask,
  359. .init_secondary = octeon_init_secondary,
  360. .smp_finish = octeon_smp_finish,
  361. .cpus_done = octeon_cpus_done,
  362. .boot_secondary = octeon_boot_secondary,
  363. .smp_setup = octeon_smp_setup,
  364. .prepare_cpus = octeon_prepare_cpus,
  365. #ifdef CONFIG_HOTPLUG_CPU
  366. .cpu_disable = octeon_cpu_disable,
  367. .cpu_die = octeon_cpu_die,
  368. #endif
  369. };