smp_32.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. /* smp.c: Sparc SMP support.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
  6. */
  7. #include <asm/head.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/threads.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/init.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/mm.h>
  17. #include <linux/fs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/cache.h>
  20. #include <linux/delay.h>
  21. #include <linux/cpu.h>
  22. #include <asm/ptrace.h>
  23. #include <linux/atomic.h>
  24. #include <asm/irq.h>
  25. #include <asm/page.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/oplib.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/tlbflush.h>
  31. #include <asm/cpudata.h>
  32. #include <asm/timer.h>
  33. #include <asm/leon.h>
  34. #include "kernel.h"
  35. #include "irq.h"
  36. volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,};
  37. cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  38. const struct sparc32_ipi_ops *sparc32_ipi_ops;
  39. /* The only guaranteed locking primitive available on all Sparc
  40. * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  41. * places the current byte at the effective address into dest_reg and
  42. * places 0xff there afterwards. Pretty lame locking primitive
  43. * compared to the Alpha and the Intel no? Most Sparcs have 'swap'
  44. * instruction which is much better...
  45. */
  46. void __cpuinit smp_store_cpu_info(int id)
  47. {
  48. int cpu_node;
  49. int mid;
  50. cpu_data(id).udelay_val = loops_per_jiffy;
  51. cpu_find_by_mid(id, &cpu_node);
  52. cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  53. "clock-frequency", 0);
  54. cpu_data(id).prom_node = cpu_node;
  55. mid = cpu_get_hwmid(cpu_node);
  56. if (mid < 0) {
  57. printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
  58. mid = 0;
  59. }
  60. cpu_data(id).mid = mid;
  61. }
  62. void __init smp_cpus_done(unsigned int max_cpus)
  63. {
  64. extern void smp4m_smp_done(void);
  65. extern void smp4d_smp_done(void);
  66. unsigned long bogosum = 0;
  67. int cpu, num = 0;
  68. for_each_online_cpu(cpu) {
  69. num++;
  70. bogosum += cpu_data(cpu).udelay_val;
  71. }
  72. printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
  73. num, bogosum/(500000/HZ),
  74. (bogosum/(5000/HZ))%100);
  75. switch(sparc_cpu_model) {
  76. case sun4m:
  77. smp4m_smp_done();
  78. break;
  79. case sun4d:
  80. smp4d_smp_done();
  81. break;
  82. case sparc_leon:
  83. leon_smp_done();
  84. break;
  85. case sun4e:
  86. printk("SUN4E\n");
  87. BUG();
  88. break;
  89. case sun4u:
  90. printk("SUN4U\n");
  91. BUG();
  92. break;
  93. default:
  94. printk("UNKNOWN!\n");
  95. BUG();
  96. break;
  97. }
  98. }
  99. void cpu_panic(void)
  100. {
  101. printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
  102. panic("SMP bolixed\n");
  103. }
  104. struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
  105. void smp_send_reschedule(int cpu)
  106. {
  107. /*
  108. * CPU model dependent way of implementing IPI generation targeting
  109. * a single CPU. The trap handler needs only to do trap entry/return
  110. * to call schedule.
  111. */
  112. sparc32_ipi_ops->resched(cpu);
  113. }
  114. void smp_send_stop(void)
  115. {
  116. }
  117. void arch_send_call_function_single_ipi(int cpu)
  118. {
  119. /* trigger one IPI single call on one CPU */
  120. sparc32_ipi_ops->single(cpu);
  121. }
  122. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  123. {
  124. int cpu;
  125. /* trigger IPI mask call on each CPU */
  126. for_each_cpu(cpu, mask)
  127. sparc32_ipi_ops->mask_one(cpu);
  128. }
  129. void smp_resched_interrupt(void)
  130. {
  131. irq_enter();
  132. scheduler_ipi();
  133. local_cpu_data().irq_resched_count++;
  134. irq_exit();
  135. /* re-schedule routine called by interrupt return code. */
  136. }
  137. void smp_call_function_single_interrupt(void)
  138. {
  139. irq_enter();
  140. generic_smp_call_function_single_interrupt();
  141. local_cpu_data().irq_call_count++;
  142. irq_exit();
  143. }
  144. void smp_call_function_interrupt(void)
  145. {
  146. irq_enter();
  147. generic_smp_call_function_interrupt();
  148. local_cpu_data().irq_call_count++;
  149. irq_exit();
  150. }
  151. int setup_profiling_timer(unsigned int multiplier)
  152. {
  153. return -EINVAL;
  154. }
  155. void __init smp_prepare_cpus(unsigned int max_cpus)
  156. {
  157. extern void __init smp4m_boot_cpus(void);
  158. extern void __init smp4d_boot_cpus(void);
  159. int i, cpuid, extra;
  160. printk("Entering SMP Mode...\n");
  161. extra = 0;
  162. for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
  163. if (cpuid >= NR_CPUS)
  164. extra++;
  165. }
  166. /* i = number of cpus */
  167. if (extra && max_cpus > i - extra)
  168. printk("Warning: NR_CPUS is too low to start all cpus\n");
  169. smp_store_cpu_info(boot_cpu_id);
  170. switch(sparc_cpu_model) {
  171. case sun4m:
  172. smp4m_boot_cpus();
  173. break;
  174. case sun4d:
  175. smp4d_boot_cpus();
  176. break;
  177. case sparc_leon:
  178. leon_boot_cpus();
  179. break;
  180. case sun4e:
  181. printk("SUN4E\n");
  182. BUG();
  183. break;
  184. case sun4u:
  185. printk("SUN4U\n");
  186. BUG();
  187. break;
  188. default:
  189. printk("UNKNOWN!\n");
  190. BUG();
  191. break;
  192. }
  193. }
  194. /* Set this up early so that things like the scheduler can init
  195. * properly. We use the same cpu mask for both the present and
  196. * possible cpu map.
  197. */
  198. void __init smp_setup_cpu_possible_map(void)
  199. {
  200. int instance, mid;
  201. instance = 0;
  202. while (!cpu_find_by_instance(instance, NULL, &mid)) {
  203. if (mid < NR_CPUS) {
  204. set_cpu_possible(mid, true);
  205. set_cpu_present(mid, true);
  206. }
  207. instance++;
  208. }
  209. }
  210. void __init smp_prepare_boot_cpu(void)
  211. {
  212. int cpuid = hard_smp_processor_id();
  213. if (cpuid >= NR_CPUS) {
  214. prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
  215. prom_halt();
  216. }
  217. if (cpuid != 0)
  218. printk("boot cpu id != 0, this could work but is untested\n");
  219. current_thread_info()->cpu = cpuid;
  220. set_cpu_online(cpuid, true);
  221. set_cpu_possible(cpuid, true);
  222. }
  223. int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
  224. {
  225. extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *);
  226. extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *);
  227. int ret=0;
  228. switch(sparc_cpu_model) {
  229. case sun4m:
  230. ret = smp4m_boot_one_cpu(cpu, tidle);
  231. break;
  232. case sun4d:
  233. ret = smp4d_boot_one_cpu(cpu, tidle);
  234. break;
  235. case sparc_leon:
  236. ret = leon_boot_one_cpu(cpu, tidle);
  237. break;
  238. case sun4e:
  239. printk("SUN4E\n");
  240. BUG();
  241. break;
  242. case sun4u:
  243. printk("SUN4U\n");
  244. BUG();
  245. break;
  246. default:
  247. printk("UNKNOWN!\n");
  248. BUG();
  249. break;
  250. }
  251. if (!ret) {
  252. cpumask_set_cpu(cpu, &smp_commenced_mask);
  253. while (!cpu_online(cpu))
  254. mb();
  255. }
  256. return ret;
  257. }
  258. void __cpuinit arch_cpu_pre_starting(void *arg)
  259. {
  260. local_ops->cache_all();
  261. local_ops->tlb_all();
  262. switch(sparc_cpu_model) {
  263. case sun4m:
  264. sun4m_cpu_pre_starting(arg);
  265. break;
  266. case sun4d:
  267. sun4d_cpu_pre_starting(arg);
  268. break;
  269. case sparc_leon:
  270. leon_cpu_pre_starting(arg);
  271. break;
  272. default:
  273. BUG();
  274. }
  275. }
  276. void __cpuinit arch_cpu_pre_online(void *arg)
  277. {
  278. unsigned int cpuid = hard_smp_processor_id();
  279. register_percpu_ce(cpuid);
  280. calibrate_delay();
  281. smp_store_cpu_info(cpuid);
  282. local_ops->cache_all();
  283. local_ops->tlb_all();
  284. switch(sparc_cpu_model) {
  285. case sun4m:
  286. sun4m_cpu_pre_online(arg);
  287. break;
  288. case sun4d:
  289. sun4d_cpu_pre_online(arg);
  290. break;
  291. case sparc_leon:
  292. leon_cpu_pre_online(arg);
  293. break;
  294. default:
  295. BUG();
  296. }
  297. }
  298. void __cpuinit sparc_start_secondary(void *arg)
  299. {
  300. unsigned int cpu;
  301. /*
  302. * SMP booting is extremely fragile in some architectures. So run
  303. * the cpu initialization code first before anything else.
  304. */
  305. arch_cpu_pre_starting(arg);
  306. preempt_disable();
  307. cpu = smp_processor_id();
  308. /* Invoke the CPU_STARTING notifier callbacks */
  309. notify_cpu_starting(cpu);
  310. arch_cpu_pre_online(arg);
  311. /* Set the CPU in the cpu_online_mask */
  312. set_cpu_online(cpu, true);
  313. /* Enable local interrupts now */
  314. local_irq_enable();
  315. wmb();
  316. cpu_idle();
  317. /* We should never reach here! */
  318. BUG();
  319. }
  320. void __cpuinit smp_callin(void)
  321. {
  322. sparc_start_secondary(NULL);
  323. }
  324. void smp_bogo(struct seq_file *m)
  325. {
  326. int i;
  327. for_each_online_cpu(i) {
  328. seq_printf(m,
  329. "Cpu%dBogo\t: %lu.%02lu\n",
  330. i,
  331. cpu_data(i).udelay_val/(500000/HZ),
  332. (cpu_data(i).udelay_val/(5000/HZ))%100);
  333. }
  334. }
  335. void smp_info(struct seq_file *m)
  336. {
  337. int i;
  338. seq_printf(m, "State:\n");
  339. for_each_online_cpu(i)
  340. seq_printf(m, "CPU%d\t\t: online\n", i);
  341. }