smp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * SMP support for power macintosh.
  3. *
  4. * We support both the old "powersurge" SMP architecture
  5. * and the current Core99 (G4 PowerMac) machines.
  6. *
  7. * Note that we don't support the very first rev. of
  8. * Apple/DayStar 2 CPUs board, the one with the funky
  9. * watchdog. Hopefully, none of these should be there except
  10. * maybe internally to Apple. I should probably still add some
  11. * code to detect this card though and disable SMP. --BenH.
  12. *
  13. * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
  14. * and Ben Herrenschmidt <benh@kernel.crashing.org>.
  15. *
  16. * Support for DayStar quad CPU cards
  17. * Copyright (C) XLR8, Inc. 1994-2000
  18. *
  19. * This program is free software; you can redistribute it and/or
  20. * modify it under the terms of the GNU General Public License
  21. * as published by the Free Software Foundation; either version
  22. * 2 of the License, or (at your option) any later version.
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/sched.h>
  26. #include <linux/smp.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/kernel_stat.h>
  29. #include <linux/delay.h>
  30. #include <linux/init.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/errno.h>
  33. #include <linux/hardirq.h>
  34. #include <linux/cpu.h>
  35. #include <linux/compiler.h>
  36. #include <asm/ptrace.h>
  37. #include <asm/atomic.h>
  38. #include <asm/code-patching.h>
  39. #include <asm/irq.h>
  40. #include <asm/page.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/sections.h>
  43. #include <asm/io.h>
  44. #include <asm/prom.h>
  45. #include <asm/smp.h>
  46. #include <asm/machdep.h>
  47. #include <asm/pmac_feature.h>
  48. #include <asm/time.h>
  49. #include <asm/mpic.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/keylargo.h>
  52. #include <asm/pmac_low_i2c.h>
  53. #include <asm/pmac_pfunc.h>
  54. #include "pmac.h"
  55. #undef DEBUG
  56. #ifdef DEBUG
  57. #define DBG(fmt...) udbg_printf(fmt)
  58. #else
  59. #define DBG(fmt...)
  60. #endif
  61. extern void __secondary_start_pmac_0(void);
  62. extern int pmac_pfunc_base_install(void);
  63. static void (*pmac_tb_freeze)(int freeze);
  64. static u64 timebase;
  65. static int tb_req;
  66. #ifdef CONFIG_PPC32
  67. /*
  68. * Powersurge (old powermac SMP) support.
  69. */
  70. /* Addresses for powersurge registers */
  71. #define HAMMERHEAD_BASE 0xf8000000
  72. #define HHEAD_CONFIG 0x90
  73. #define HHEAD_SEC_INTR 0xc0
  74. /* register for interrupting the primary processor on the powersurge */
  75. /* N.B. this is actually the ethernet ROM! */
  76. #define PSURGE_PRI_INTR 0xf3019000
  77. /* register for storing the start address for the secondary processor */
  78. /* N.B. this is the PCI config space address register for the 1st bridge */
  79. #define PSURGE_START 0xf2800000
  80. /* Daystar/XLR8 4-CPU card */
  81. #define PSURGE_QUAD_REG_ADDR 0xf8800000
  82. #define PSURGE_QUAD_IRQ_SET 0
  83. #define PSURGE_QUAD_IRQ_CLR 1
  84. #define PSURGE_QUAD_IRQ_PRIMARY 2
  85. #define PSURGE_QUAD_CKSTOP_CTL 3
  86. #define PSURGE_QUAD_PRIMARY_ARB 4
  87. #define PSURGE_QUAD_BOARD_ID 6
  88. #define PSURGE_QUAD_WHICH_CPU 7
  89. #define PSURGE_QUAD_CKSTOP_RDBK 8
  90. #define PSURGE_QUAD_RESET_CTL 11
  91. #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
  92. #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
  93. #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
  94. #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
  95. /* virtual addresses for the above */
  96. static volatile u8 __iomem *hhead_base;
  97. static volatile u8 __iomem *quad_base;
  98. static volatile u32 __iomem *psurge_pri_intr;
  99. static volatile u8 __iomem *psurge_sec_intr;
  100. static volatile u32 __iomem *psurge_start;
  101. /* values for psurge_type */
  102. #define PSURGE_NONE -1
  103. #define PSURGE_DUAL 0
  104. #define PSURGE_QUAD_OKEE 1
  105. #define PSURGE_QUAD_COTTON 2
  106. #define PSURGE_QUAD_ICEGRASS 3
  107. /* what sort of powersurge board we have */
  108. static int psurge_type = PSURGE_NONE;
  109. /*
  110. * Set and clear IPIs for powersurge.
  111. */
  112. static inline void psurge_set_ipi(int cpu)
  113. {
  114. if (psurge_type == PSURGE_NONE)
  115. return;
  116. if (cpu == 0)
  117. in_be32(psurge_pri_intr);
  118. else if (psurge_type == PSURGE_DUAL)
  119. out_8(psurge_sec_intr, 0);
  120. else
  121. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
  122. }
  123. static inline void psurge_clr_ipi(int cpu)
  124. {
  125. if (cpu > 0) {
  126. switch(psurge_type) {
  127. case PSURGE_DUAL:
  128. out_8(psurge_sec_intr, ~0);
  129. case PSURGE_NONE:
  130. break;
  131. default:
  132. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
  133. }
  134. }
  135. }
  136. /*
  137. * On powersurge (old SMP powermac architecture) we don't have
  138. * separate IPIs for separate messages like openpic does. Instead
  139. * we have a bitmap for each processor, where a 1 bit means that
  140. * the corresponding message is pending for that processor.
  141. * Ideally each cpu's entry would be in a different cache line.
  142. * -- paulus.
  143. */
  144. static unsigned long psurge_smp_message[NR_CPUS];
  145. void psurge_smp_message_recv(void)
  146. {
  147. int cpu = smp_processor_id();
  148. int msg;
  149. /* clear interrupt */
  150. psurge_clr_ipi(cpu);
  151. if (num_online_cpus() < 2)
  152. return;
  153. /* make sure there is a message there */
  154. for (msg = 0; msg < 4; msg++)
  155. if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
  156. smp_message_recv(msg);
  157. }
  158. irqreturn_t psurge_primary_intr(int irq, void *d)
  159. {
  160. psurge_smp_message_recv();
  161. return IRQ_HANDLED;
  162. }
  163. static void smp_psurge_message_pass(int target, int msg)
  164. {
  165. int i;
  166. if (num_online_cpus() < 2)
  167. return;
  168. for_each_online_cpu(i) {
  169. if (target == MSG_ALL
  170. || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
  171. || target == i) {
  172. set_bit(msg, &psurge_smp_message[i]);
  173. psurge_set_ipi(i);
  174. }
  175. }
  176. }
  177. /*
  178. * Determine a quad card presence. We read the board ID register, we
  179. * force the data bus to change to something else, and we read it again.
  180. * It it's stable, then the register probably exist (ugh !)
  181. */
  182. static int __init psurge_quad_probe(void)
  183. {
  184. int type;
  185. unsigned int i;
  186. type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
  187. if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
  188. || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  189. return PSURGE_DUAL;
  190. /* looks OK, try a slightly more rigorous test */
  191. /* bogus is not necessarily cacheline-aligned,
  192. though I don't suppose that really matters. -- paulus */
  193. for (i = 0; i < 100; i++) {
  194. volatile u32 bogus[8];
  195. bogus[(0+i)%8] = 0x00000000;
  196. bogus[(1+i)%8] = 0x55555555;
  197. bogus[(2+i)%8] = 0xFFFFFFFF;
  198. bogus[(3+i)%8] = 0xAAAAAAAA;
  199. bogus[(4+i)%8] = 0x33333333;
  200. bogus[(5+i)%8] = 0xCCCCCCCC;
  201. bogus[(6+i)%8] = 0xCCCCCCCC;
  202. bogus[(7+i)%8] = 0x33333333;
  203. wmb();
  204. asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
  205. mb();
  206. if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  207. return PSURGE_DUAL;
  208. }
  209. return type;
  210. }
  211. static void __init psurge_quad_init(void)
  212. {
  213. int procbits;
  214. if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
  215. procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
  216. if (psurge_type == PSURGE_QUAD_ICEGRASS)
  217. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  218. else
  219. PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
  220. mdelay(33);
  221. out_8(psurge_sec_intr, ~0);
  222. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
  223. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  224. if (psurge_type != PSURGE_QUAD_ICEGRASS)
  225. PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
  226. PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
  227. mdelay(33);
  228. PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
  229. mdelay(33);
  230. PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
  231. mdelay(33);
  232. }
  233. static int __init smp_psurge_probe(void)
  234. {
  235. int i, ncpus;
  236. struct device_node *dn;
  237. /* We don't do SMP on the PPC601 -- paulus */
  238. if (PVR_VER(mfspr(SPRN_PVR)) == 1)
  239. return 1;
  240. /*
  241. * The powersurge cpu board can be used in the generation
  242. * of powermacs that have a socket for an upgradeable cpu card,
  243. * including the 7500, 8500, 9500, 9600.
  244. * The device tree doesn't tell you if you have 2 cpus because
  245. * OF doesn't know anything about the 2nd processor.
  246. * Instead we look for magic bits in magic registers,
  247. * in the hammerhead memory controller in the case of the
  248. * dual-cpu powersurge board. -- paulus.
  249. */
  250. dn = of_find_node_by_name(NULL, "hammerhead");
  251. if (dn == NULL)
  252. return 1;
  253. of_node_put(dn);
  254. hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
  255. quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
  256. psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
  257. psurge_type = psurge_quad_probe();
  258. if (psurge_type != PSURGE_DUAL) {
  259. psurge_quad_init();
  260. /* All released cards using this HW design have 4 CPUs */
  261. ncpus = 4;
  262. /* No sure how timebase sync works on those, let's use SW */
  263. smp_ops->give_timebase = smp_generic_give_timebase;
  264. smp_ops->take_timebase = smp_generic_take_timebase;
  265. } else {
  266. iounmap(quad_base);
  267. if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
  268. /* not a dual-cpu card */
  269. iounmap(hhead_base);
  270. psurge_type = PSURGE_NONE;
  271. return 1;
  272. }
  273. ncpus = 2;
  274. }
  275. psurge_start = ioremap(PSURGE_START, 4);
  276. psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
  277. /* This is necessary because OF doesn't know about the
  278. * secondary cpu(s), and thus there aren't nodes in the
  279. * device tree for them, and smp_setup_cpu_maps hasn't
  280. * set their bits in cpu_present_mask.
  281. */
  282. if (ncpus > NR_CPUS)
  283. ncpus = NR_CPUS;
  284. for (i = 1; i < ncpus ; ++i)
  285. set_cpu_present(i, true);
  286. if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
  287. return ncpus;
  288. }
  289. static void __init smp_psurge_kick_cpu(int nr)
  290. {
  291. unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
  292. unsigned long a, flags;
  293. int i, j;
  294. /* Defining this here is evil ... but I prefer hiding that
  295. * crap to avoid giving people ideas that they can do the
  296. * same.
  297. */
  298. extern volatile unsigned int cpu_callin_map[NR_CPUS];
  299. /* may need to flush here if secondary bats aren't setup */
  300. for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
  301. asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
  302. asm volatile("sync");
  303. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
  304. /* This is going to freeze the timeebase, we disable interrupts */
  305. local_irq_save(flags);
  306. out_be32(psurge_start, start);
  307. mb();
  308. psurge_set_ipi(nr);
  309. /*
  310. * We can't use udelay here because the timebase is now frozen.
  311. */
  312. for (i = 0; i < 2000; ++i)
  313. asm volatile("nop" : : : "memory");
  314. psurge_clr_ipi(nr);
  315. /*
  316. * Also, because the timebase is frozen, we must not return to the
  317. * caller which will try to do udelay's etc... Instead, we wait -here-
  318. * for the CPU to callin.
  319. */
  320. for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
  321. for (j = 1; j < 10000; j++)
  322. asm volatile("nop" : : : "memory");
  323. asm volatile("sync" : : : "memory");
  324. }
  325. if (!cpu_callin_map[nr])
  326. goto stuck;
  327. /* And we do the TB sync here too for standard dual CPU cards */
  328. if (psurge_type == PSURGE_DUAL) {
  329. while(!tb_req)
  330. barrier();
  331. tb_req = 0;
  332. mb();
  333. timebase = get_tb();
  334. mb();
  335. while (timebase)
  336. barrier();
  337. mb();
  338. }
  339. stuck:
  340. /* now interrupt the secondary, restarting both TBs */
  341. if (psurge_type == PSURGE_DUAL)
  342. psurge_set_ipi(1);
  343. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
  344. }
  345. static struct irqaction psurge_irqaction = {
  346. .handler = psurge_primary_intr,
  347. .flags = IRQF_DISABLED,
  348. .name = "primary IPI",
  349. };
  350. static void __init smp_psurge_setup_cpu(int cpu_nr)
  351. {
  352. if (cpu_nr != 0)
  353. return;
  354. /* reset the entry point so if we get another intr we won't
  355. * try to startup again */
  356. out_be32(psurge_start, 0x100);
  357. if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
  358. printk(KERN_ERR "Couldn't get primary IPI interrupt");
  359. }
  360. void __init smp_psurge_take_timebase(void)
  361. {
  362. if (psurge_type != PSURGE_DUAL)
  363. return;
  364. tb_req = 1;
  365. mb();
  366. while (!timebase)
  367. barrier();
  368. mb();
  369. set_tb(timebase >> 32, timebase & 0xffffffff);
  370. timebase = 0;
  371. mb();
  372. set_dec(tb_ticks_per_jiffy/2);
  373. }
  374. void __init smp_psurge_give_timebase(void)
  375. {
  376. /* Nothing to do here */
  377. }
  378. /* PowerSurge-style Macs */
  379. struct smp_ops_t psurge_smp_ops = {
  380. .message_pass = smp_psurge_message_pass,
  381. .probe = smp_psurge_probe,
  382. .kick_cpu = smp_psurge_kick_cpu,
  383. .setup_cpu = smp_psurge_setup_cpu,
  384. .give_timebase = smp_psurge_give_timebase,
  385. .take_timebase = smp_psurge_take_timebase,
  386. };
  387. #endif /* CONFIG_PPC32 - actually powersurge support */
  388. /*
  389. * Core 99 and later support
  390. */
  391. static void smp_core99_give_timebase(void)
  392. {
  393. unsigned long flags;
  394. local_irq_save(flags);
  395. while(!tb_req)
  396. barrier();
  397. tb_req = 0;
  398. (*pmac_tb_freeze)(1);
  399. mb();
  400. timebase = get_tb();
  401. mb();
  402. while (timebase)
  403. barrier();
  404. mb();
  405. (*pmac_tb_freeze)(0);
  406. mb();
  407. local_irq_restore(flags);
  408. }
  409. static void __devinit smp_core99_take_timebase(void)
  410. {
  411. unsigned long flags;
  412. local_irq_save(flags);
  413. tb_req = 1;
  414. mb();
  415. while (!timebase)
  416. barrier();
  417. mb();
  418. set_tb(timebase >> 32, timebase & 0xffffffff);
  419. timebase = 0;
  420. mb();
  421. local_irq_restore(flags);
  422. }
  423. #ifdef CONFIG_PPC64
  424. /*
  425. * G5s enable/disable the timebase via an i2c-connected clock chip.
  426. */
  427. static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
  428. static u8 pmac_tb_pulsar_addr;
  429. static void smp_core99_cypress_tb_freeze(int freeze)
  430. {
  431. u8 data;
  432. int rc;
  433. /* Strangely, the device-tree says address is 0xd2, but darwin
  434. * accesses 0xd0 ...
  435. */
  436. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  437. pmac_i2c_mode_combined);
  438. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  439. 0xd0 | pmac_i2c_read,
  440. 1, 0x81, &data, 1);
  441. if (rc != 0)
  442. goto bail;
  443. data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
  444. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  445. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  446. 0xd0 | pmac_i2c_write,
  447. 1, 0x81, &data, 1);
  448. bail:
  449. if (rc != 0) {
  450. printk("Cypress Timebase %s rc: %d\n",
  451. freeze ? "freeze" : "unfreeze", rc);
  452. panic("Timebase freeze failed !\n");
  453. }
  454. }
  455. static void smp_core99_pulsar_tb_freeze(int freeze)
  456. {
  457. u8 data;
  458. int rc;
  459. pmac_i2c_setmode(pmac_tb_clock_chip_host,
  460. pmac_i2c_mode_combined);
  461. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  462. pmac_tb_pulsar_addr | pmac_i2c_read,
  463. 1, 0x2e, &data, 1);
  464. if (rc != 0)
  465. goto bail;
  466. data = (data & 0x88) | (freeze ? 0x11 : 0x22);
  467. pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
  468. rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
  469. pmac_tb_pulsar_addr | pmac_i2c_write,
  470. 1, 0x2e, &data, 1);
  471. bail:
  472. if (rc != 0) {
  473. printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
  474. freeze ? "freeze" : "unfreeze", rc);
  475. panic("Timebase freeze failed !\n");
  476. }
  477. }
  478. static void __init smp_core99_setup_i2c_hwsync(int ncpus)
  479. {
  480. struct device_node *cc = NULL;
  481. struct device_node *p;
  482. const char *name = NULL;
  483. const u32 *reg;
  484. int ok;
  485. /* Look for the clock chip */
  486. while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
  487. p = of_get_parent(cc);
  488. ok = p && of_device_is_compatible(p, "uni-n-i2c");
  489. of_node_put(p);
  490. if (!ok)
  491. continue;
  492. pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
  493. if (pmac_tb_clock_chip_host == NULL)
  494. continue;
  495. reg = of_get_property(cc, "reg", NULL);
  496. if (reg == NULL)
  497. continue;
  498. switch (*reg) {
  499. case 0xd2:
  500. if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) {
  501. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  502. pmac_tb_pulsar_addr = 0xd2;
  503. name = "Pulsar";
  504. } else if (of_device_is_compatible(cc, "cy28508")) {
  505. pmac_tb_freeze = smp_core99_cypress_tb_freeze;
  506. name = "Cypress";
  507. }
  508. break;
  509. case 0xd4:
  510. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  511. pmac_tb_pulsar_addr = 0xd4;
  512. name = "Pulsar";
  513. break;
  514. }
  515. if (pmac_tb_freeze != NULL)
  516. break;
  517. }
  518. if (pmac_tb_freeze != NULL) {
  519. /* Open i2c bus for synchronous access */
  520. if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
  521. printk(KERN_ERR "Failed top open i2c bus for clock"
  522. " sync, fallback to software sync !\n");
  523. goto no_i2c_sync;
  524. }
  525. printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
  526. name);
  527. return;
  528. }
  529. no_i2c_sync:
  530. pmac_tb_freeze = NULL;
  531. pmac_tb_clock_chip_host = NULL;
  532. }
  533. /*
  534. * Newer G5s uses a platform function
  535. */
  536. static void smp_core99_pfunc_tb_freeze(int freeze)
  537. {
  538. struct device_node *cpus;
  539. struct pmf_args args;
  540. cpus = of_find_node_by_path("/cpus");
  541. BUG_ON(cpus == NULL);
  542. args.count = 1;
  543. args.u[0].v = !freeze;
  544. pmf_call_function(cpus, "cpu-timebase", &args);
  545. of_node_put(cpus);
  546. }
  547. #else /* CONFIG_PPC64 */
  548. /*
  549. * SMP G4 use a GPIO to enable/disable the timebase.
  550. */
  551. static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
  552. static void smp_core99_gpio_tb_freeze(int freeze)
  553. {
  554. if (freeze)
  555. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
  556. else
  557. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
  558. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  559. }
  560. #endif /* !CONFIG_PPC64 */
  561. /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
  562. volatile static long int core99_l2_cache;
  563. volatile static long int core99_l3_cache;
  564. static void __devinit core99_init_caches(int cpu)
  565. {
  566. #ifndef CONFIG_PPC64
  567. if (!cpu_has_feature(CPU_FTR_L2CR))
  568. return;
  569. if (cpu == 0) {
  570. core99_l2_cache = _get_L2CR();
  571. printk("CPU0: L2CR is %lx\n", core99_l2_cache);
  572. } else {
  573. printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
  574. _set_L2CR(0);
  575. _set_L2CR(core99_l2_cache);
  576. printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
  577. }
  578. if (!cpu_has_feature(CPU_FTR_L3CR))
  579. return;
  580. if (cpu == 0){
  581. core99_l3_cache = _get_L3CR();
  582. printk("CPU0: L3CR is %lx\n", core99_l3_cache);
  583. } else {
  584. printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
  585. _set_L3CR(0);
  586. _set_L3CR(core99_l3_cache);
  587. printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
  588. }
  589. #endif /* !CONFIG_PPC64 */
  590. }
  591. static void __init smp_core99_setup(int ncpus)
  592. {
  593. #ifdef CONFIG_PPC64
  594. /* i2c based HW sync on some G5s */
  595. if (of_machine_is_compatible("PowerMac7,2") ||
  596. of_machine_is_compatible("PowerMac7,3") ||
  597. of_machine_is_compatible("RackMac3,1"))
  598. smp_core99_setup_i2c_hwsync(ncpus);
  599. /* pfunc based HW sync on recent G5s */
  600. if (pmac_tb_freeze == NULL) {
  601. struct device_node *cpus =
  602. of_find_node_by_path("/cpus");
  603. if (cpus &&
  604. of_get_property(cpus, "platform-cpu-timebase", NULL)) {
  605. pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
  606. printk(KERN_INFO "Processor timebase sync using"
  607. " platform function\n");
  608. }
  609. }
  610. #else /* CONFIG_PPC64 */
  611. /* GPIO based HW sync on ppc32 Core99 */
  612. if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) {
  613. struct device_node *cpu;
  614. const u32 *tbprop = NULL;
  615. core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
  616. cpu = of_find_node_by_type(NULL, "cpu");
  617. if (cpu != NULL) {
  618. tbprop = of_get_property(cpu, "timebase-enable", NULL);
  619. if (tbprop)
  620. core99_tb_gpio = *tbprop;
  621. of_node_put(cpu);
  622. }
  623. pmac_tb_freeze = smp_core99_gpio_tb_freeze;
  624. printk(KERN_INFO "Processor timebase sync using"
  625. " GPIO 0x%02x\n", core99_tb_gpio);
  626. }
  627. #endif /* CONFIG_PPC64 */
  628. /* No timebase sync, fallback to software */
  629. if (pmac_tb_freeze == NULL) {
  630. smp_ops->give_timebase = smp_generic_give_timebase;
  631. smp_ops->take_timebase = smp_generic_take_timebase;
  632. printk(KERN_INFO "Processor timebase sync using software\n");
  633. }
  634. #ifndef CONFIG_PPC64
  635. {
  636. int i;
  637. /* XXX should get this from reg properties */
  638. for (i = 1; i < ncpus; ++i)
  639. set_hard_smp_processor_id(i, i);
  640. }
  641. #endif
  642. /* 32 bits SMP can't NAP */
  643. if (!of_machine_is_compatible("MacRISC4"))
  644. powersave_nap = 0;
  645. }
  646. static int __init smp_core99_probe(void)
  647. {
  648. struct device_node *cpus;
  649. int ncpus = 0;
  650. if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
  651. /* Count CPUs in the device-tree */
  652. for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
  653. ++ncpus;
  654. printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
  655. /* Nothing more to do if less than 2 of them */
  656. if (ncpus <= 1)
  657. return 1;
  658. /* We need to perform some early initialisations before we can start
  659. * setting up SMP as we are running before initcalls
  660. */
  661. pmac_pfunc_base_install();
  662. pmac_i2c_init();
  663. /* Setup various bits like timebase sync method, ability to nap, ... */
  664. smp_core99_setup(ncpus);
  665. /* Install IPIs */
  666. mpic_request_ipis();
  667. /* Collect l2cr and l3cr values from CPU 0 */
  668. core99_init_caches(0);
  669. return ncpus;
  670. }
  671. static void __devinit smp_core99_kick_cpu(int nr)
  672. {
  673. unsigned int save_vector;
  674. unsigned long target, flags;
  675. unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
  676. if (nr < 0 || nr > 3)
  677. return;
  678. if (ppc_md.progress)
  679. ppc_md.progress("smp_core99_kick_cpu", 0x346);
  680. local_irq_save(flags);
  681. /* Save reset vector */
  682. save_vector = *vector;
  683. /* Setup fake reset vector that does
  684. * b __secondary_start_pmac_0 + nr*8
  685. */
  686. target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
  687. patch_branch(vector, target, BRANCH_SET_LINK);
  688. /* Put some life in our friend */
  689. pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
  690. /* FIXME: We wait a bit for the CPU to take the exception, I should
  691. * instead wait for the entry code to set something for me. Well,
  692. * ideally, all that crap will be done in prom.c and the CPU left
  693. * in a RAM-based wait loop like CHRP.
  694. */
  695. mdelay(1);
  696. /* Restore our exception vector */
  697. *vector = save_vector;
  698. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  699. local_irq_restore(flags);
  700. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
  701. }
  702. static void __devinit smp_core99_setup_cpu(int cpu_nr)
  703. {
  704. /* Setup L2/L3 */
  705. if (cpu_nr != 0)
  706. core99_init_caches(cpu_nr);
  707. /* Setup openpic */
  708. mpic_setup_this_cpu();
  709. if (cpu_nr == 0) {
  710. #ifdef CONFIG_PPC64
  711. extern void g5_phy_disable_cpu1(void);
  712. /* Close i2c bus if it was used for tb sync */
  713. if (pmac_tb_clock_chip_host) {
  714. pmac_i2c_close(pmac_tb_clock_chip_host);
  715. pmac_tb_clock_chip_host = NULL;
  716. }
  717. /* If we didn't start the second CPU, we must take
  718. * it off the bus
  719. */
  720. if (of_machine_is_compatible("MacRISC4") &&
  721. num_online_cpus() < 2)
  722. g5_phy_disable_cpu1();
  723. #endif /* CONFIG_PPC64 */
  724. if (ppc_md.progress)
  725. ppc_md.progress("core99_setup_cpu 0 done", 0x349);
  726. }
  727. }
  728. #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
  729. int smp_core99_cpu_disable(void)
  730. {
  731. set_cpu_online(smp_processor_id(), false);
  732. /* XXX reset cpu affinity here */
  733. mpic_cpu_set_priority(0xf);
  734. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  735. mb();
  736. udelay(20);
  737. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  738. return 0;
  739. }
  740. static int cpu_dead[NR_CPUS];
  741. void pmac32_cpu_die(void)
  742. {
  743. local_irq_disable();
  744. cpu_dead[smp_processor_id()] = 1;
  745. mb();
  746. low_cpu_die();
  747. }
  748. void smp_core99_cpu_die(unsigned int cpu)
  749. {
  750. int timeout;
  751. timeout = 1000;
  752. while (!cpu_dead[cpu]) {
  753. if (--timeout == 0) {
  754. printk("CPU %u refused to die!\n", cpu);
  755. break;
  756. }
  757. msleep(1);
  758. }
  759. cpu_dead[cpu] = 0;
  760. }
  761. #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */
  762. /* Core99 Macs (dual G4s and G5s) */
  763. struct smp_ops_t core99_smp_ops = {
  764. .message_pass = smp_mpic_message_pass,
  765. .probe = smp_core99_probe,
  766. .kick_cpu = smp_core99_kick_cpu,
  767. .setup_cpu = smp_core99_setup_cpu,
  768. .give_timebase = smp_core99_give_timebase,
  769. .take_timebase = smp_core99_take_timebase,
  770. #if defined(CONFIG_HOTPLUG_CPU)
  771. # if defined(CONFIG_PPC32)
  772. .cpu_disable = smp_core99_cpu_disable,
  773. .cpu_die = smp_core99_cpu_die,
  774. # endif
  775. # if defined(CONFIG_PPC64)
  776. .cpu_disable = generic_cpu_disable,
  777. .cpu_die = generic_cpu_die,
  778. /* intentionally do *NOT* assign cpu_enable,
  779. * the generic code will use kick_cpu then! */
  780. # endif
  781. #endif
  782. };
  783. void __init pmac_setup_smp(void)
  784. {
  785. struct device_node *np;
  786. /* Check for Core99 */
  787. np = of_find_node_by_name(NULL, "uni-n");
  788. if (!np)
  789. np = of_find_node_by_name(NULL, "u3");
  790. if (!np)
  791. np = of_find_node_by_name(NULL, "u4");
  792. if (np) {
  793. of_node_put(np);
  794. smp_ops = &core99_smp_ops;
  795. }
  796. #ifdef CONFIG_PPC32
  797. else {
  798. /* We have to set bits in cpu_possible_mask here since the
  799. * secondary CPU(s) aren't in the device tree. Various
  800. * things won't be initialized for CPUs not in the possible
  801. * map, so we really need to fix it up here.
  802. */
  803. int cpu;
  804. for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
  805. set_cpu_possible(cpu, true);
  806. smp_ops = &psurge_smp_ops;
  807. }
  808. #endif /* CONFIG_PPC32 */
  809. }