pmac_smp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * SMP support for power macintosh.
  3. *
  4. * We support both the old "powersurge" SMP architecture
  5. * and the current Core99 (G4 PowerMac) machines.
  6. *
  7. * Note that we don't support the very first rev. of
  8. * Apple/DayStar 2 CPUs board, the one with the funky
  9. * watchdog. Hopefully, none of these should be there except
  10. * maybe internally to Apple. I should probably still add some
  11. * code to detect this card though and disable SMP. --BenH.
  12. *
  13. * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
  14. * and Ben Herrenschmidt <benh@kernel.crashing.org>.
  15. *
  16. * Support for DayStar quad CPU cards
  17. * Copyright (C) XLR8, Inc. 1994-2000
  18. *
  19. * This program is free software; you can redistribute it and/or
  20. * modify it under the terms of the GNU General Public License
  21. * as published by the Free Software Foundation; either version
  22. * 2 of the License, or (at your option) any later version.
  23. */
  24. #include <linux/config.h>
  25. #include <linux/kernel.h>
  26. #include <linux/sched.h>
  27. #include <linux/smp.h>
  28. #include <linux/smp_lock.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/kernel_stat.h>
  31. #include <linux/delay.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/errno.h>
  35. #include <linux/hardirq.h>
  36. #include <linux/cpu.h>
  37. #include <asm/ptrace.h>
  38. #include <asm/atomic.h>
  39. #include <asm/irq.h>
  40. #include <asm/page.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/sections.h>
  43. #include <asm/io.h>
  44. #include <asm/prom.h>
  45. #include <asm/smp.h>
  46. #include <asm/residual.h>
  47. #include <asm/machdep.h>
  48. #include <asm/pmac_feature.h>
  49. #include <asm/time.h>
  50. #include <asm/open_pic.h>
  51. #include <asm/cacheflush.h>
  52. #include <asm/keylargo.h>
  53. /*
  54. * Powersurge (old powermac SMP) support.
  55. */
  56. extern void __secondary_start_pmac_0(void);
  57. /* Addresses for powersurge registers */
  58. #define HAMMERHEAD_BASE 0xf8000000
  59. #define HHEAD_CONFIG 0x90
  60. #define HHEAD_SEC_INTR 0xc0
  61. /* register for interrupting the primary processor on the powersurge */
  62. /* N.B. this is actually the ethernet ROM! */
  63. #define PSURGE_PRI_INTR 0xf3019000
  64. /* register for storing the start address for the secondary processor */
  65. /* N.B. this is the PCI config space address register for the 1st bridge */
  66. #define PSURGE_START 0xf2800000
  67. /* Daystar/XLR8 4-CPU card */
  68. #define PSURGE_QUAD_REG_ADDR 0xf8800000
  69. #define PSURGE_QUAD_IRQ_SET 0
  70. #define PSURGE_QUAD_IRQ_CLR 1
  71. #define PSURGE_QUAD_IRQ_PRIMARY 2
  72. #define PSURGE_QUAD_CKSTOP_CTL 3
  73. #define PSURGE_QUAD_PRIMARY_ARB 4
  74. #define PSURGE_QUAD_BOARD_ID 6
  75. #define PSURGE_QUAD_WHICH_CPU 7
  76. #define PSURGE_QUAD_CKSTOP_RDBK 8
  77. #define PSURGE_QUAD_RESET_CTL 11
  78. #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
  79. #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
  80. #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
  81. #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
  82. /* virtual addresses for the above */
  83. static volatile u8 __iomem *hhead_base;
  84. static volatile u8 __iomem *quad_base;
  85. static volatile u32 __iomem *psurge_pri_intr;
  86. static volatile u8 __iomem *psurge_sec_intr;
  87. static volatile u32 __iomem *psurge_start;
  88. /* values for psurge_type */
  89. #define PSURGE_NONE -1
  90. #define PSURGE_DUAL 0
  91. #define PSURGE_QUAD_OKEE 1
  92. #define PSURGE_QUAD_COTTON 2
  93. #define PSURGE_QUAD_ICEGRASS 3
  94. /* what sort of powersurge board we have */
  95. static int psurge_type = PSURGE_NONE;
  96. /* L2 and L3 cache settings to pass from CPU0 to CPU1 */
  97. volatile static long int core99_l2_cache;
  98. volatile static long int core99_l3_cache;
  99. /* Timebase freeze GPIO */
  100. static unsigned int core99_tb_gpio;
  101. /* Sync flag for HW tb sync */
  102. static volatile int sec_tb_reset = 0;
  103. static unsigned int pri_tb_hi, pri_tb_lo;
  104. static unsigned int pri_tb_stamp;
  105. static void __devinit core99_init_caches(int cpu)
  106. {
  107. if (!cpu_has_feature(CPU_FTR_L2CR))
  108. return;
  109. if (cpu == 0) {
  110. core99_l2_cache = _get_L2CR();
  111. printk("CPU0: L2CR is %lx\n", core99_l2_cache);
  112. } else {
  113. printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
  114. _set_L2CR(0);
  115. _set_L2CR(core99_l2_cache);
  116. printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
  117. }
  118. if (!cpu_has_feature(CPU_FTR_L3CR))
  119. return;
  120. if (cpu == 0){
  121. core99_l3_cache = _get_L3CR();
  122. printk("CPU0: L3CR is %lx\n", core99_l3_cache);
  123. } else {
  124. printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
  125. _set_L3CR(0);
  126. _set_L3CR(core99_l3_cache);
  127. printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
  128. }
  129. }
  130. /*
  131. * Set and clear IPIs for powersurge.
  132. */
  133. static inline void psurge_set_ipi(int cpu)
  134. {
  135. if (psurge_type == PSURGE_NONE)
  136. return;
  137. if (cpu == 0)
  138. in_be32(psurge_pri_intr);
  139. else if (psurge_type == PSURGE_DUAL)
  140. out_8(psurge_sec_intr, 0);
  141. else
  142. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
  143. }
  144. static inline void psurge_clr_ipi(int cpu)
  145. {
  146. if (cpu > 0) {
  147. switch(psurge_type) {
  148. case PSURGE_DUAL:
  149. out_8(psurge_sec_intr, ~0);
  150. case PSURGE_NONE:
  151. break;
  152. default:
  153. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
  154. }
  155. }
  156. }
  157. /*
  158. * On powersurge (old SMP powermac architecture) we don't have
  159. * separate IPIs for separate messages like openpic does. Instead
  160. * we have a bitmap for each processor, where a 1 bit means that
  161. * the corresponding message is pending for that processor.
  162. * Ideally each cpu's entry would be in a different cache line.
  163. * -- paulus.
  164. */
  165. static unsigned long psurge_smp_message[NR_CPUS];
  166. void psurge_smp_message_recv(struct pt_regs *regs)
  167. {
  168. int cpu = smp_processor_id();
  169. int msg;
  170. /* clear interrupt */
  171. psurge_clr_ipi(cpu);
  172. if (num_online_cpus() < 2)
  173. return;
  174. /* make sure there is a message there */
  175. for (msg = 0; msg < 4; msg++)
  176. if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
  177. smp_message_recv(msg, regs);
  178. }
  179. irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
  180. {
  181. psurge_smp_message_recv(regs);
  182. return IRQ_HANDLED;
  183. }
  184. static void smp_psurge_message_pass(int target, int msg)
  185. {
  186. int i;
  187. if (num_online_cpus() < 2)
  188. return;
  189. for (i = 0; i < NR_CPUS; i++) {
  190. if (!cpu_online(i))
  191. continue;
  192. if (target == MSG_ALL
  193. || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
  194. || target == i) {
  195. set_bit(msg, &psurge_smp_message[i]);
  196. psurge_set_ipi(i);
  197. }
  198. }
  199. }
  200. /*
  201. * Determine a quad card presence. We read the board ID register, we
  202. * force the data bus to change to something else, and we read it again.
  203. * It it's stable, then the register probably exist (ugh !)
  204. */
  205. static int __init psurge_quad_probe(void)
  206. {
  207. int type;
  208. unsigned int i;
  209. type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
  210. if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
  211. || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  212. return PSURGE_DUAL;
  213. /* looks OK, try a slightly more rigorous test */
  214. /* bogus is not necessarily cacheline-aligned,
  215. though I don't suppose that really matters. -- paulus */
  216. for (i = 0; i < 100; i++) {
  217. volatile u32 bogus[8];
  218. bogus[(0+i)%8] = 0x00000000;
  219. bogus[(1+i)%8] = 0x55555555;
  220. bogus[(2+i)%8] = 0xFFFFFFFF;
  221. bogus[(3+i)%8] = 0xAAAAAAAA;
  222. bogus[(4+i)%8] = 0x33333333;
  223. bogus[(5+i)%8] = 0xCCCCCCCC;
  224. bogus[(6+i)%8] = 0xCCCCCCCC;
  225. bogus[(7+i)%8] = 0x33333333;
  226. wmb();
  227. asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
  228. mb();
  229. if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  230. return PSURGE_DUAL;
  231. }
  232. return type;
  233. }
  234. static void __init psurge_quad_init(void)
  235. {
  236. int procbits;
  237. if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
  238. procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
  239. if (psurge_type == PSURGE_QUAD_ICEGRASS)
  240. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  241. else
  242. PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
  243. mdelay(33);
  244. out_8(psurge_sec_intr, ~0);
  245. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
  246. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  247. if (psurge_type != PSURGE_QUAD_ICEGRASS)
  248. PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
  249. PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
  250. mdelay(33);
  251. PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
  252. mdelay(33);
  253. PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
  254. mdelay(33);
  255. }
  256. static int __init smp_psurge_probe(void)
  257. {
  258. int i, ncpus;
  259. /* We don't do SMP on the PPC601 -- paulus */
  260. if (PVR_VER(mfspr(SPRN_PVR)) == 1)
  261. return 1;
  262. /*
  263. * The powersurge cpu board can be used in the generation
  264. * of powermacs that have a socket for an upgradeable cpu card,
  265. * including the 7500, 8500, 9500, 9600.
  266. * The device tree doesn't tell you if you have 2 cpus because
  267. * OF doesn't know anything about the 2nd processor.
  268. * Instead we look for magic bits in magic registers,
  269. * in the hammerhead memory controller in the case of the
  270. * dual-cpu powersurge board. -- paulus.
  271. */
  272. if (find_devices("hammerhead") == NULL)
  273. return 1;
  274. hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
  275. quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
  276. psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
  277. psurge_type = psurge_quad_probe();
  278. if (psurge_type != PSURGE_DUAL) {
  279. psurge_quad_init();
  280. /* All released cards using this HW design have 4 CPUs */
  281. ncpus = 4;
  282. } else {
  283. iounmap(quad_base);
  284. if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
  285. /* not a dual-cpu card */
  286. iounmap(hhead_base);
  287. psurge_type = PSURGE_NONE;
  288. return 1;
  289. }
  290. ncpus = 2;
  291. }
  292. psurge_start = ioremap(PSURGE_START, 4);
  293. psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
  294. /* this is not actually strictly necessary -- paulus. */
  295. for (i = 1; i < ncpus; ++i)
  296. smp_hw_index[i] = i;
  297. if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
  298. return ncpus;
  299. }
  300. static void __init smp_psurge_kick_cpu(int nr)
  301. {
  302. unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
  303. unsigned long a;
  304. /* may need to flush here if secondary bats aren't setup */
  305. for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
  306. asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
  307. asm volatile("sync");
  308. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
  309. out_be32(psurge_start, start);
  310. mb();
  311. psurge_set_ipi(nr);
  312. udelay(10);
  313. psurge_clr_ipi(nr);
  314. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
  315. }
  316. /*
  317. * With the dual-cpu powersurge board, the decrementers and timebases
  318. * of both cpus are frozen after the secondary cpu is started up,
  319. * until we give the secondary cpu another interrupt. This routine
  320. * uses this to get the timebases synchronized.
  321. * -- paulus.
  322. */
  323. static void __init psurge_dual_sync_tb(int cpu_nr)
  324. {
  325. int t;
  326. set_dec(tb_ticks_per_jiffy);
  327. set_tb(0, 0);
  328. last_jiffy_stamp(cpu_nr) = 0;
  329. if (cpu_nr > 0) {
  330. mb();
  331. sec_tb_reset = 1;
  332. return;
  333. }
  334. /* wait for the secondary to have reset its TB before proceeding */
  335. for (t = 10000000; t > 0 && !sec_tb_reset; --t)
  336. ;
  337. /* now interrupt the secondary, starting both TBs */
  338. psurge_set_ipi(1);
  339. smp_tb_synchronized = 1;
  340. }
  341. static struct irqaction psurge_irqaction = {
  342. .handler = psurge_primary_intr,
  343. .flags = SA_INTERRUPT,
  344. .mask = CPU_MASK_NONE,
  345. .name = "primary IPI",
  346. };
  347. static void __init smp_psurge_setup_cpu(int cpu_nr)
  348. {
  349. if (cpu_nr == 0) {
  350. /* If we failed to start the second CPU, we should still
  351. * send it an IPI to start the timebase & DEC or we might
  352. * have them stuck.
  353. */
  354. if (num_online_cpus() < 2) {
  355. if (psurge_type == PSURGE_DUAL)
  356. psurge_set_ipi(1);
  357. return;
  358. }
  359. /* reset the entry point so if we get another intr we won't
  360. * try to startup again */
  361. out_be32(psurge_start, 0x100);
  362. if (setup_irq(30, &psurge_irqaction))
  363. printk(KERN_ERR "Couldn't get primary IPI interrupt");
  364. }
  365. if (psurge_type == PSURGE_DUAL)
  366. psurge_dual_sync_tb(cpu_nr);
  367. }
  368. void __init smp_psurge_take_timebase(void)
  369. {
  370. /* Dummy implementation */
  371. }
  372. void __init smp_psurge_give_timebase(void)
  373. {
  374. /* Dummy implementation */
  375. }
  376. static int __init smp_core99_probe(void)
  377. {
  378. #ifdef CONFIG_6xx
  379. extern int powersave_nap;
  380. #endif
  381. struct device_node *cpus, *firstcpu;
  382. int i, ncpus = 0, boot_cpu = -1;
  383. u32 *tbprop = NULL;
  384. if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
  385. cpus = firstcpu = find_type_devices("cpu");
  386. while(cpus != NULL) {
  387. u32 *regprop = (u32 *)get_property(cpus, "reg", NULL);
  388. char *stateprop = (char *)get_property(cpus, "state", NULL);
  389. if (regprop != NULL && stateprop != NULL &&
  390. !strncmp(stateprop, "running", 7))
  391. boot_cpu = *regprop;
  392. ++ncpus;
  393. cpus = cpus->next;
  394. }
  395. if (boot_cpu == -1)
  396. printk(KERN_WARNING "Couldn't detect boot CPU !\n");
  397. if (boot_cpu != 0)
  398. printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu);
  399. if (machine_is_compatible("MacRISC4")) {
  400. extern struct smp_ops_t core99_smp_ops;
  401. core99_smp_ops.take_timebase = smp_generic_take_timebase;
  402. core99_smp_ops.give_timebase = smp_generic_give_timebase;
  403. } else {
  404. if (firstcpu != NULL)
  405. tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL);
  406. if (tbprop)
  407. core99_tb_gpio = *tbprop;
  408. else
  409. core99_tb_gpio = KL_GPIO_TB_ENABLE;
  410. }
  411. if (ncpus > 1) {
  412. openpic_request_IPIs();
  413. for (i = 1; i < ncpus; ++i)
  414. smp_hw_index[i] = i;
  415. #ifdef CONFIG_6xx
  416. powersave_nap = 0;
  417. #endif
  418. core99_init_caches(0);
  419. }
  420. return ncpus;
  421. }
  422. static void __devinit smp_core99_kick_cpu(int nr)
  423. {
  424. unsigned long save_vector, new_vector;
  425. unsigned long flags;
  426. volatile unsigned long *vector
  427. = ((volatile unsigned long *)(KERNELBASE+0x100));
  428. if (nr < 0 || nr > 3)
  429. return;
  430. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
  431. local_irq_save(flags);
  432. local_irq_disable();
  433. /* Save reset vector */
  434. save_vector = *vector;
  435. /* Setup fake reset vector that does
  436. * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
  437. */
  438. new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
  439. *vector = 0x48000002 + new_vector - KERNELBASE;
  440. /* flush data cache and inval instruction cache */
  441. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  442. /* Put some life in our friend */
  443. pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
  444. /* FIXME: We wait a bit for the CPU to take the exception, I should
  445. * instead wait for the entry code to set something for me. Well,
  446. * ideally, all that crap will be done in prom.c and the CPU left
  447. * in a RAM-based wait loop like CHRP.
  448. */
  449. mdelay(1);
  450. /* Restore our exception vector */
  451. *vector = save_vector;
  452. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  453. local_irq_restore(flags);
  454. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
  455. }
  456. static void __devinit smp_core99_setup_cpu(int cpu_nr)
  457. {
  458. /* Setup L2/L3 */
  459. if (cpu_nr != 0)
  460. core99_init_caches(cpu_nr);
  461. /* Setup openpic */
  462. do_openpic_setup_cpu();
  463. if (cpu_nr == 0) {
  464. #ifdef CONFIG_POWER4
  465. extern void g5_phy_disable_cpu1(void);
  466. /* If we didn't start the second CPU, we must take
  467. * it off the bus
  468. */
  469. if (machine_is_compatible("MacRISC4") &&
  470. num_online_cpus() < 2)
  471. g5_phy_disable_cpu1();
  472. #endif /* CONFIG_POWER4 */
  473. if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
  474. }
  475. }
  476. /* not __init, called in sleep/wakeup code */
  477. void smp_core99_take_timebase(void)
  478. {
  479. unsigned long flags;
  480. /* tell the primary we're here */
  481. sec_tb_reset = 1;
  482. mb();
  483. /* wait for the primary to set pri_tb_hi/lo */
  484. while (sec_tb_reset < 2)
  485. mb();
  486. /* set our stuff the same as the primary */
  487. local_irq_save(flags);
  488. set_dec(1);
  489. set_tb(pri_tb_hi, pri_tb_lo);
  490. last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
  491. mb();
  492. /* tell the primary we're done */
  493. sec_tb_reset = 0;
  494. mb();
  495. local_irq_restore(flags);
  496. }
  497. /* not __init, called in sleep/wakeup code */
  498. void smp_core99_give_timebase(void)
  499. {
  500. unsigned long flags;
  501. unsigned int t;
  502. /* wait for the secondary to be in take_timebase */
  503. for (t = 100000; t > 0 && !sec_tb_reset; --t)
  504. udelay(10);
  505. if (!sec_tb_reset) {
  506. printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
  507. return;
  508. }
  509. /* freeze the timebase and read it */
  510. /* disable interrupts so the timebase is disabled for the
  511. shortest possible time */
  512. local_irq_save(flags);
  513. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
  514. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  515. mb();
  516. pri_tb_hi = get_tbu();
  517. pri_tb_lo = get_tbl();
  518. pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
  519. mb();
  520. /* tell the secondary we're ready */
  521. sec_tb_reset = 2;
  522. mb();
  523. /* wait for the secondary to have taken it */
  524. for (t = 100000; t > 0 && sec_tb_reset; --t)
  525. udelay(10);
  526. if (sec_tb_reset)
  527. printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
  528. else
  529. smp_tb_synchronized = 1;
  530. /* Now, restart the timebase by leaving the GPIO to an open collector */
  531. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
  532. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  533. local_irq_restore(flags);
  534. }
  535. /* PowerSurge-style Macs */
  536. struct smp_ops_t psurge_smp_ops = {
  537. .message_pass = smp_psurge_message_pass,
  538. .probe = smp_psurge_probe,
  539. .kick_cpu = smp_psurge_kick_cpu,
  540. .setup_cpu = smp_psurge_setup_cpu,
  541. .give_timebase = smp_psurge_give_timebase,
  542. .take_timebase = smp_psurge_take_timebase,
  543. };
  544. /* Core99 Macs (dual G4s) */
  545. struct smp_ops_t core99_smp_ops = {
  546. .message_pass = smp_openpic_message_pass,
  547. .probe = smp_core99_probe,
  548. .kick_cpu = smp_core99_kick_cpu,
  549. .setup_cpu = smp_core99_setup_cpu,
  550. .give_timebase = smp_core99_give_timebase,
  551. .take_timebase = smp_core99_take_timebase,
  552. };
  553. #ifdef CONFIG_HOTPLUG_CPU
  554. int __cpu_disable(void)
  555. {
  556. cpu_clear(smp_processor_id(), cpu_online_map);
  557. /* XXX reset cpu affinity here */
  558. openpic_set_priority(0xf);
  559. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  560. mb();
  561. udelay(20);
  562. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  563. return 0;
  564. }
  565. extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
  566. static int cpu_dead[NR_CPUS];
  567. void cpu_die(void)
  568. {
  569. local_irq_disable();
  570. cpu_dead[smp_processor_id()] = 1;
  571. mb();
  572. low_cpu_die();
  573. }
  574. void __cpu_die(unsigned int cpu)
  575. {
  576. int timeout;
  577. timeout = 1000;
  578. while (!cpu_dead[cpu]) {
  579. if (--timeout == 0) {
  580. printk("CPU %u refused to die!\n", cpu);
  581. break;
  582. }
  583. msleep(1);
  584. }
  585. cpu_callin_map[cpu] = 0;
  586. cpu_dead[cpu] = 0;
  587. }
  588. #endif