sun4m_smp.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /* sun4m_smp.c: Sparc SUN4M SMP support.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #include <asm/head.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/threads.h>
  9. #include <linux/smp.h>
  10. #include <linux/smp_lock.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/init.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/mm.h>
  16. #include <linux/swap.h>
  17. #include <linux/profile.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/atomic.h>
  22. #include <asm/delay.h>
  23. #include <asm/irq.h>
  24. #include <asm/page.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/oplib.h>
  28. #include <asm/cpudata.h>
  29. #define IRQ_RESCHEDULE 13
  30. #define IRQ_STOP_CPU 14
  31. #define IRQ_CROSS_CALL 15
  32. extern ctxd_t *srmmu_ctx_table_phys;
  33. extern void calibrate_delay(void);
  34. extern volatile unsigned long cpu_callin_map[NR_CPUS];
  35. extern unsigned char boot_cpu_id;
  36. extern cpumask_t smp_commenced_mask;
  37. extern int __smp4m_processor_id(void);
  38. /*#define SMP_DEBUG*/
  39. #ifdef SMP_DEBUG
  40. #define SMP_PRINTK(x) printk x
  41. #else
  42. #define SMP_PRINTK(x)
  43. #endif
  44. static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
  45. {
  46. __asm__ __volatile__("swap [%1], %0\n\t" :
  47. "=&r" (val), "=&r" (ptr) :
  48. "0" (val), "1" (ptr));
  49. return val;
  50. }
  51. static void smp_setup_percpu_timer(void);
  52. extern void cpu_probe(void);
  53. void __cpuinit smp4m_callin(void)
  54. {
  55. int cpuid = hard_smp_processor_id();
  56. local_flush_cache_all();
  57. local_flush_tlb_all();
  58. /* Get our local ticker going. */
  59. smp_setup_percpu_timer();
  60. calibrate_delay();
  61. smp_store_cpu_info(cpuid);
  62. local_flush_cache_all();
  63. local_flush_tlb_all();
  64. /*
  65. * Unblock the master CPU _only_ when the scheduler state
  66. * of all secondary CPUs will be up-to-date, so after
  67. * the SMP initialization the master will be just allowed
  68. * to call the scheduler code.
  69. */
  70. /* Allow master to continue. */
  71. swap(&cpu_callin_map[cpuid], 1);
  72. /* XXX: What's up with all the flushes? */
  73. local_flush_cache_all();
  74. local_flush_tlb_all();
  75. cpu_probe();
  76. /* Fix idle thread fields. */
  77. __asm__ __volatile__("ld [%0], %%g6\n\t"
  78. : : "r" (&current_set[cpuid])
  79. : "memory" /* paranoid */);
  80. /* Attach to the address space of init_task. */
  81. atomic_inc(&init_mm.mm_count);
  82. current->active_mm = &init_mm;
  83. while (!cpu_isset(cpuid, smp_commenced_mask))
  84. mb();
  85. local_irq_enable();
  86. cpu_set(cpuid, cpu_online_map);
  87. }
  88. /*
  89. * Cycle through the processors asking the PROM to start each one.
  90. */
  91. extern struct linux_prom_registers smp_penguin_ctable;
  92. extern unsigned long trapbase_cpu1[];
  93. extern unsigned long trapbase_cpu2[];
  94. extern unsigned long trapbase_cpu3[];
  95. void __init smp4m_boot_cpus(void)
  96. {
  97. smp_setup_percpu_timer();
  98. local_flush_cache_all();
  99. }
  100. int __cpuinit smp4m_boot_one_cpu(int i)
  101. {
  102. extern unsigned long sun4m_cpu_startup;
  103. unsigned long *entry = &sun4m_cpu_startup;
  104. struct task_struct *p;
  105. int timeout;
  106. int cpu_node;
  107. cpu_find_by_mid(i, &cpu_node);
  108. /* Cook up an idler for this guy. */
  109. p = fork_idle(i);
  110. current_set[i] = task_thread_info(p);
  111. /* See trampoline.S for details... */
  112. entry += ((i-1) * 3);
  113. /*
  114. * Initialize the contexts table
  115. * Since the call to prom_startcpu() trashes the structure,
  116. * we need to re-initialize it for each cpu
  117. */
  118. smp_penguin_ctable.which_io = 0;
  119. smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
  120. smp_penguin_ctable.reg_size = 0;
  121. /* whirrr, whirrr, whirrrrrrrrr... */
  122. printk("Starting CPU %d at %p\n", i, entry);
  123. local_flush_cache_all();
  124. prom_startcpu(cpu_node,
  125. &smp_penguin_ctable, 0, (char *)entry);
  126. /* wheee... it's going... */
  127. for(timeout = 0; timeout < 10000; timeout++) {
  128. if(cpu_callin_map[i])
  129. break;
  130. udelay(200);
  131. }
  132. if (!(cpu_callin_map[i])) {
  133. printk("Processor %d is stuck.\n", i);
  134. return -ENODEV;
  135. }
  136. local_flush_cache_all();
  137. return 0;
  138. }
  139. void __init smp4m_smp_done(void)
  140. {
  141. int i, first;
  142. int *prev;
  143. /* setup cpu list for irq rotation */
  144. first = 0;
  145. prev = &first;
  146. for (i = 0; i < NR_CPUS; i++) {
  147. if (cpu_online(i)) {
  148. *prev = i;
  149. prev = &cpu_data(i).next;
  150. }
  151. }
  152. *prev = first;
  153. local_flush_cache_all();
  154. /* Free unneeded trap tables */
  155. if (!cpu_isset(1, cpu_present_map)) {
  156. ClearPageReserved(virt_to_page(trapbase_cpu1));
  157. init_page_count(virt_to_page(trapbase_cpu1));
  158. free_page((unsigned long)trapbase_cpu1);
  159. totalram_pages++;
  160. num_physpages++;
  161. }
  162. if (!cpu_isset(2, cpu_present_map)) {
  163. ClearPageReserved(virt_to_page(trapbase_cpu2));
  164. init_page_count(virt_to_page(trapbase_cpu2));
  165. free_page((unsigned long)trapbase_cpu2);
  166. totalram_pages++;
  167. num_physpages++;
  168. }
  169. if (!cpu_isset(3, cpu_present_map)) {
  170. ClearPageReserved(virt_to_page(trapbase_cpu3));
  171. init_page_count(virt_to_page(trapbase_cpu3));
  172. free_page((unsigned long)trapbase_cpu3);
  173. totalram_pages++;
  174. num_physpages++;
  175. }
  176. /* Ok, they are spinning and ready to go. */
  177. }
  178. /* At each hardware IRQ, we get this called to forward IRQ reception
  179. * to the next processor. The caller must disable the IRQ level being
  180. * serviced globally so that there are no double interrupts received.
  181. *
  182. * XXX See sparc64 irq.c.
  183. */
  184. void smp4m_irq_rotate(int cpu)
  185. {
  186. int next = cpu_data(cpu).next;
  187. if (next != cpu)
  188. set_irq_udt(next);
  189. }
  190. /* Cross calls, in order to work efficiently and atomically do all
  191. * the message passing work themselves, only stopcpu and reschedule
  192. * messages come through here.
  193. */
  194. void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
  195. {
  196. static unsigned long smp_cpu_in_msg[NR_CPUS];
  197. cpumask_t mask;
  198. int me = smp_processor_id();
  199. int irq, i;
  200. if(msg == MSG_RESCHEDULE) {
  201. irq = IRQ_RESCHEDULE;
  202. if(smp_cpu_in_msg[me])
  203. return;
  204. } else if(msg == MSG_STOP_CPU) {
  205. irq = IRQ_STOP_CPU;
  206. } else {
  207. goto barf;
  208. }
  209. smp_cpu_in_msg[me]++;
  210. if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
  211. mask = cpu_online_map;
  212. if(target == MSG_ALL_BUT_SELF)
  213. cpu_clear(me, mask);
  214. for(i = 0; i < 4; i++) {
  215. if (cpu_isset(i, mask))
  216. set_cpu_int(i, irq);
  217. }
  218. } else {
  219. set_cpu_int(target, irq);
  220. }
  221. smp_cpu_in_msg[me]--;
  222. return;
  223. barf:
  224. printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
  225. panic("Bogon SMP message pass.");
  226. }
  227. static struct smp_funcall {
  228. smpfunc_t func;
  229. unsigned long arg1;
  230. unsigned long arg2;
  231. unsigned long arg3;
  232. unsigned long arg4;
  233. unsigned long arg5;
  234. unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
  235. unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
  236. } ccall_info;
  237. static DEFINE_SPINLOCK(cross_call_lock);
  238. /* Cross calls must be serialized, at least currently. */
  239. void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  240. unsigned long arg3, unsigned long arg4, unsigned long arg5)
  241. {
  242. register int ncpus = SUN4M_NCPUS;
  243. unsigned long flags;
  244. spin_lock_irqsave(&cross_call_lock, flags);
  245. /* Init function glue. */
  246. ccall_info.func = func;
  247. ccall_info.arg1 = arg1;
  248. ccall_info.arg2 = arg2;
  249. ccall_info.arg3 = arg3;
  250. ccall_info.arg4 = arg4;
  251. ccall_info.arg5 = arg5;
  252. /* Init receive/complete mapping, plus fire the IPI's off. */
  253. {
  254. cpumask_t mask = cpu_online_map;
  255. register int i;
  256. cpu_clear(smp_processor_id(), mask);
  257. for(i = 0; i < ncpus; i++) {
  258. if (cpu_isset(i, mask)) {
  259. ccall_info.processors_in[i] = 0;
  260. ccall_info.processors_out[i] = 0;
  261. set_cpu_int(i, IRQ_CROSS_CALL);
  262. } else {
  263. ccall_info.processors_in[i] = 1;
  264. ccall_info.processors_out[i] = 1;
  265. }
  266. }
  267. }
  268. {
  269. register int i;
  270. i = 0;
  271. do {
  272. while(!ccall_info.processors_in[i])
  273. barrier();
  274. } while(++i < ncpus);
  275. i = 0;
  276. do {
  277. while(!ccall_info.processors_out[i])
  278. barrier();
  279. } while(++i < ncpus);
  280. }
  281. spin_unlock_irqrestore(&cross_call_lock, flags);
  282. }
  283. /* Running cross calls. */
  284. void smp4m_cross_call_irq(void)
  285. {
  286. int i = smp_processor_id();
  287. ccall_info.processors_in[i] = 1;
  288. ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
  289. ccall_info.arg4, ccall_info.arg5);
  290. ccall_info.processors_out[i] = 1;
  291. }
  292. void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
  293. {
  294. int cpu = smp_processor_id();
  295. clear_profile_irq(cpu);
  296. profile_tick(CPU_PROFILING, regs);
  297. if(!--prof_counter(cpu)) {
  298. int user = user_mode(regs);
  299. irq_enter();
  300. update_process_times(user);
  301. irq_exit();
  302. prof_counter(cpu) = prof_multiplier(cpu);
  303. }
  304. }
  305. extern unsigned int lvl14_resolution;
  306. static void __init smp_setup_percpu_timer(void)
  307. {
  308. int cpu = smp_processor_id();
  309. prof_counter(cpu) = prof_multiplier(cpu) = 1;
  310. load_profile_irq(cpu, lvl14_resolution);
  311. if(cpu == boot_cpu_id)
  312. enable_pil_irq(14);
  313. }
  314. void __init smp4m_blackbox_id(unsigned *addr)
  315. {
  316. int rd = *addr & 0x3e000000;
  317. int rs1 = rd >> 11;
  318. addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
  319. addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
  320. addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
  321. }
  322. void __init smp4m_blackbox_current(unsigned *addr)
  323. {
  324. int rd = *addr & 0x3e000000;
  325. int rs1 = rd >> 11;
  326. addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
  327. addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
  328. addr[4] = 0x8008200c | rd | rs1; /* and reg, 3, reg */
  329. }
  330. void __init sun4m_init_smp(void)
  331. {
  332. BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
  333. BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
  334. BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
  335. BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
  336. BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
  337. }