smp.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. /*
  2. * SMP support for power macintosh.
  3. *
  4. * We support both the old "powersurge" SMP architecture
  5. * and the current Core99 (G4 PowerMac) machines.
  6. *
  7. * Note that we don't support the very first rev. of
  8. * Apple/DayStar 2 CPUs board, the one with the funky
  9. * watchdog. Hopefully, none of these should be there except
  10. * maybe internally to Apple. I should probably still add some
  11. * code to detect this card though and disable SMP. --BenH.
  12. *
  13. * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
  14. * and Ben Herrenschmidt <benh@kernel.crashing.org>.
  15. *
  16. * Support for DayStar quad CPU cards
  17. * Copyright (C) XLR8, Inc. 1994-2000
  18. *
  19. * This program is free software; you can redistribute it and/or
  20. * modify it under the terms of the GNU General Public License
  21. * as published by the Free Software Foundation; either version
  22. * 2 of the License, or (at your option) any later version.
  23. */
  24. #include <linux/config.h>
  25. #include <linux/kernel.h>
  26. #include <linux/sched.h>
  27. #include <linux/smp.h>
  28. #include <linux/smp_lock.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/kernel_stat.h>
  31. #include <linux/delay.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/errno.h>
  35. #include <linux/hardirq.h>
  36. #include <linux/cpu.h>
  37. #include <linux/compiler.h>
  38. #include <asm/ptrace.h>
  39. #include <asm/atomic.h>
  40. #include <asm/irq.h>
  41. #include <asm/page.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/sections.h>
  44. #include <asm/io.h>
  45. #include <asm/prom.h>
  46. #include <asm/smp.h>
  47. #include <asm/machdep.h>
  48. #include <asm/pmac_feature.h>
  49. #include <asm/time.h>
  50. #include <asm/mpic.h>
  51. #include <asm/cacheflush.h>
  52. #include <asm/keylargo.h>
  53. #include <asm/pmac_low_i2c.h>
  54. #undef DEBUG
  55. #ifdef DEBUG
  56. #define DBG(fmt...) udbg_printf(fmt)
  57. #else
  58. #define DBG(fmt...)
  59. #endif
  60. extern void __secondary_start_pmac_0(void);
  61. #ifdef CONFIG_PPC32
  62. /* Sync flag for HW tb sync */
  63. static volatile int sec_tb_reset = 0;
  64. /*
  65. * Powersurge (old powermac SMP) support.
  66. */
  67. /* Addresses for powersurge registers */
  68. #define HAMMERHEAD_BASE 0xf8000000
  69. #define HHEAD_CONFIG 0x90
  70. #define HHEAD_SEC_INTR 0xc0
  71. /* register for interrupting the primary processor on the powersurge */
  72. /* N.B. this is actually the ethernet ROM! */
  73. #define PSURGE_PRI_INTR 0xf3019000
  74. /* register for storing the start address for the secondary processor */
  75. /* N.B. this is the PCI config space address register for the 1st bridge */
  76. #define PSURGE_START 0xf2800000
  77. /* Daystar/XLR8 4-CPU card */
  78. #define PSURGE_QUAD_REG_ADDR 0xf8800000
  79. #define PSURGE_QUAD_IRQ_SET 0
  80. #define PSURGE_QUAD_IRQ_CLR 1
  81. #define PSURGE_QUAD_IRQ_PRIMARY 2
  82. #define PSURGE_QUAD_CKSTOP_CTL 3
  83. #define PSURGE_QUAD_PRIMARY_ARB 4
  84. #define PSURGE_QUAD_BOARD_ID 6
  85. #define PSURGE_QUAD_WHICH_CPU 7
  86. #define PSURGE_QUAD_CKSTOP_RDBK 8
  87. #define PSURGE_QUAD_RESET_CTL 11
  88. #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
  89. #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
  90. #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
  91. #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
  92. /* virtual addresses for the above */
  93. static volatile u8 __iomem *hhead_base;
  94. static volatile u8 __iomem *quad_base;
  95. static volatile u32 __iomem *psurge_pri_intr;
  96. static volatile u8 __iomem *psurge_sec_intr;
  97. static volatile u32 __iomem *psurge_start;
  98. /* values for psurge_type */
  99. #define PSURGE_NONE -1
  100. #define PSURGE_DUAL 0
  101. #define PSURGE_QUAD_OKEE 1
  102. #define PSURGE_QUAD_COTTON 2
  103. #define PSURGE_QUAD_ICEGRASS 3
  104. /* what sort of powersurge board we have */
  105. static int psurge_type = PSURGE_NONE;
  106. /*
  107. * Set and clear IPIs for powersurge.
  108. */
  109. static inline void psurge_set_ipi(int cpu)
  110. {
  111. if (psurge_type == PSURGE_NONE)
  112. return;
  113. if (cpu == 0)
  114. in_be32(psurge_pri_intr);
  115. else if (psurge_type == PSURGE_DUAL)
  116. out_8(psurge_sec_intr, 0);
  117. else
  118. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
  119. }
  120. static inline void psurge_clr_ipi(int cpu)
  121. {
  122. if (cpu > 0) {
  123. switch(psurge_type) {
  124. case PSURGE_DUAL:
  125. out_8(psurge_sec_intr, ~0);
  126. case PSURGE_NONE:
  127. break;
  128. default:
  129. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
  130. }
  131. }
  132. }
  133. /*
  134. * On powersurge (old SMP powermac architecture) we don't have
  135. * separate IPIs for separate messages like openpic does. Instead
  136. * we have a bitmap for each processor, where a 1 bit means that
  137. * the corresponding message is pending for that processor.
  138. * Ideally each cpu's entry would be in a different cache line.
  139. * -- paulus.
  140. */
  141. static unsigned long psurge_smp_message[NR_CPUS];
  142. void psurge_smp_message_recv(struct pt_regs *regs)
  143. {
  144. int cpu = smp_processor_id();
  145. int msg;
  146. /* clear interrupt */
  147. psurge_clr_ipi(cpu);
  148. if (num_online_cpus() < 2)
  149. return;
  150. /* make sure there is a message there */
  151. for (msg = 0; msg < 4; msg++)
  152. if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
  153. smp_message_recv(msg, regs);
  154. }
  155. irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
  156. {
  157. psurge_smp_message_recv(regs);
  158. return IRQ_HANDLED;
  159. }
  160. static void smp_psurge_message_pass(int target, int msg)
  161. {
  162. int i;
  163. if (num_online_cpus() < 2)
  164. return;
  165. for (i = 0; i < NR_CPUS; i++) {
  166. if (!cpu_online(i))
  167. continue;
  168. if (target == MSG_ALL
  169. || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
  170. || target == i) {
  171. set_bit(msg, &psurge_smp_message[i]);
  172. psurge_set_ipi(i);
  173. }
  174. }
  175. }
  176. /*
  177. * Determine a quad card presence. We read the board ID register, we
  178. * force the data bus to change to something else, and we read it again.
  179. * It it's stable, then the register probably exist (ugh !)
  180. */
  181. static int __init psurge_quad_probe(void)
  182. {
  183. int type;
  184. unsigned int i;
  185. type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
  186. if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
  187. || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  188. return PSURGE_DUAL;
  189. /* looks OK, try a slightly more rigorous test */
  190. /* bogus is not necessarily cacheline-aligned,
  191. though I don't suppose that really matters. -- paulus */
  192. for (i = 0; i < 100; i++) {
  193. volatile u32 bogus[8];
  194. bogus[(0+i)%8] = 0x00000000;
  195. bogus[(1+i)%8] = 0x55555555;
  196. bogus[(2+i)%8] = 0xFFFFFFFF;
  197. bogus[(3+i)%8] = 0xAAAAAAAA;
  198. bogus[(4+i)%8] = 0x33333333;
  199. bogus[(5+i)%8] = 0xCCCCCCCC;
  200. bogus[(6+i)%8] = 0xCCCCCCCC;
  201. bogus[(7+i)%8] = 0x33333333;
  202. wmb();
  203. asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
  204. mb();
  205. if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
  206. return PSURGE_DUAL;
  207. }
  208. return type;
  209. }
  210. static void __init psurge_quad_init(void)
  211. {
  212. int procbits;
  213. if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
  214. procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
  215. if (psurge_type == PSURGE_QUAD_ICEGRASS)
  216. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  217. else
  218. PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
  219. mdelay(33);
  220. out_8(psurge_sec_intr, ~0);
  221. PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
  222. PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
  223. if (psurge_type != PSURGE_QUAD_ICEGRASS)
  224. PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
  225. PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
  226. mdelay(33);
  227. PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
  228. mdelay(33);
  229. PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
  230. mdelay(33);
  231. }
  232. static int __init smp_psurge_probe(void)
  233. {
  234. int i, ncpus;
  235. /* We don't do SMP on the PPC601 -- paulus */
  236. if (PVR_VER(mfspr(SPRN_PVR)) == 1)
  237. return 1;
  238. /*
  239. * The powersurge cpu board can be used in the generation
  240. * of powermacs that have a socket for an upgradeable cpu card,
  241. * including the 7500, 8500, 9500, 9600.
  242. * The device tree doesn't tell you if you have 2 cpus because
  243. * OF doesn't know anything about the 2nd processor.
  244. * Instead we look for magic bits in magic registers,
  245. * in the hammerhead memory controller in the case of the
  246. * dual-cpu powersurge board. -- paulus.
  247. */
  248. if (find_devices("hammerhead") == NULL)
  249. return 1;
  250. hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
  251. quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
  252. psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
  253. psurge_type = psurge_quad_probe();
  254. if (psurge_type != PSURGE_DUAL) {
  255. psurge_quad_init();
  256. /* All released cards using this HW design have 4 CPUs */
  257. ncpus = 4;
  258. } else {
  259. iounmap(quad_base);
  260. if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
  261. /* not a dual-cpu card */
  262. iounmap(hhead_base);
  263. psurge_type = PSURGE_NONE;
  264. return 1;
  265. }
  266. ncpus = 2;
  267. }
  268. psurge_start = ioremap(PSURGE_START, 4);
  269. psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
  270. /*
  271. * This is necessary because OF doesn't know about the
  272. * secondary cpu(s), and thus there aren't nodes in the
  273. * device tree for them, and smp_setup_cpu_maps hasn't
  274. * set their bits in cpu_possible_map and cpu_present_map.
  275. */
  276. if (ncpus > NR_CPUS)
  277. ncpus = NR_CPUS;
  278. for (i = 1; i < ncpus ; ++i) {
  279. cpu_set(i, cpu_present_map);
  280. cpu_set(i, cpu_possible_map);
  281. set_hard_smp_processor_id(i, i);
  282. }
  283. if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
  284. return ncpus;
  285. }
  286. static void __init smp_psurge_kick_cpu(int nr)
  287. {
  288. unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
  289. unsigned long a;
  290. /* may need to flush here if secondary bats aren't setup */
  291. for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
  292. asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
  293. asm volatile("sync");
  294. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
  295. out_be32(psurge_start, start);
  296. mb();
  297. psurge_set_ipi(nr);
  298. udelay(10);
  299. psurge_clr_ipi(nr);
  300. if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
  301. }
  302. /*
  303. * With the dual-cpu powersurge board, the decrementers and timebases
  304. * of both cpus are frozen after the secondary cpu is started up,
  305. * until we give the secondary cpu another interrupt. This routine
  306. * uses this to get the timebases synchronized.
  307. * -- paulus.
  308. */
  309. static void __init psurge_dual_sync_tb(int cpu_nr)
  310. {
  311. int t;
  312. set_dec(tb_ticks_per_jiffy);
  313. /* XXX fixme */
  314. set_tb(0, 0);
  315. last_jiffy_stamp(cpu_nr) = 0;
  316. if (cpu_nr > 0) {
  317. mb();
  318. sec_tb_reset = 1;
  319. return;
  320. }
  321. /* wait for the secondary to have reset its TB before proceeding */
  322. for (t = 10000000; t > 0 && !sec_tb_reset; --t)
  323. ;
  324. /* now interrupt the secondary, starting both TBs */
  325. psurge_set_ipi(1);
  326. }
  327. static struct irqaction psurge_irqaction = {
  328. .handler = psurge_primary_intr,
  329. .flags = SA_INTERRUPT,
  330. .mask = CPU_MASK_NONE,
  331. .name = "primary IPI",
  332. };
  333. static void __init smp_psurge_setup_cpu(int cpu_nr)
  334. {
  335. if (cpu_nr == 0) {
  336. /* If we failed to start the second CPU, we should still
  337. * send it an IPI to start the timebase & DEC or we might
  338. * have them stuck.
  339. */
  340. if (num_online_cpus() < 2) {
  341. if (psurge_type == PSURGE_DUAL)
  342. psurge_set_ipi(1);
  343. return;
  344. }
  345. /* reset the entry point so if we get another intr we won't
  346. * try to startup again */
  347. out_be32(psurge_start, 0x100);
  348. if (setup_irq(30, &psurge_irqaction))
  349. printk(KERN_ERR "Couldn't get primary IPI interrupt");
  350. }
  351. if (psurge_type == PSURGE_DUAL)
  352. psurge_dual_sync_tb(cpu_nr);
  353. }
  354. void __init smp_psurge_take_timebase(void)
  355. {
  356. /* Dummy implementation */
  357. }
  358. void __init smp_psurge_give_timebase(void)
  359. {
  360. /* Dummy implementation */
  361. }
  362. /* PowerSurge-style Macs */
  363. struct smp_ops_t psurge_smp_ops = {
  364. .message_pass = smp_psurge_message_pass,
  365. .probe = smp_psurge_probe,
  366. .kick_cpu = smp_psurge_kick_cpu,
  367. .setup_cpu = smp_psurge_setup_cpu,
  368. .give_timebase = smp_psurge_give_timebase,
  369. .take_timebase = smp_psurge_take_timebase,
  370. };
  371. #endif /* CONFIG_PPC32 - actually powersurge support */
  372. #ifdef CONFIG_PPC64
  373. /*
  374. * G5s enable/disable the timebase via an i2c-connected clock chip.
  375. */
  376. static struct device_node *pmac_tb_clock_chip_host;
  377. static u8 pmac_tb_pulsar_addr;
  378. static void (*pmac_tb_freeze)(int freeze);
  379. static DEFINE_SPINLOCK(timebase_lock);
  380. static unsigned long timebase;
  381. static void smp_core99_cypress_tb_freeze(int freeze)
  382. {
  383. u8 data;
  384. int rc;
  385. /* Strangely, the device-tree says address is 0xd2, but darwin
  386. * accesses 0xd0 ...
  387. */
  388. pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
  389. rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
  390. 0xd0 | pmac_low_i2c_read,
  391. 0x81, &data, 1);
  392. if (rc != 0)
  393. goto bail;
  394. data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
  395. pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
  396. rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
  397. 0xd0 | pmac_low_i2c_write,
  398. 0x81, &data, 1);
  399. bail:
  400. if (rc != 0) {
  401. printk("Cypress Timebase %s rc: %d\n",
  402. freeze ? "freeze" : "unfreeze", rc);
  403. panic("Timebase freeze failed !\n");
  404. }
  405. }
  406. static void smp_core99_pulsar_tb_freeze(int freeze)
  407. {
  408. u8 data;
  409. int rc;
  410. pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
  411. rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
  412. pmac_tb_pulsar_addr | pmac_low_i2c_read,
  413. 0x2e, &data, 1);
  414. if (rc != 0)
  415. goto bail;
  416. data = (data & 0x88) | (freeze ? 0x11 : 0x22);
  417. pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
  418. rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
  419. pmac_tb_pulsar_addr | pmac_low_i2c_write,
  420. 0x2e, &data, 1);
  421. bail:
  422. if (rc != 0) {
  423. printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
  424. freeze ? "freeze" : "unfreeze", rc);
  425. panic("Timebase freeze failed !\n");
  426. }
  427. }
  428. static void smp_core99_give_timebase(void)
  429. {
  430. /* Open i2c bus for synchronous access */
  431. if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
  432. panic("Can't open i2c for TB sync !\n");
  433. spin_lock(&timebase_lock);
  434. (*pmac_tb_freeze)(1);
  435. mb();
  436. timebase = get_tb();
  437. spin_unlock(&timebase_lock);
  438. while (timebase)
  439. barrier();
  440. spin_lock(&timebase_lock);
  441. (*pmac_tb_freeze)(0);
  442. spin_unlock(&timebase_lock);
  443. /* Close i2c bus */
  444. pmac_low_i2c_close(pmac_tb_clock_chip_host);
  445. }
  446. static void __devinit smp_core99_take_timebase(void)
  447. {
  448. while (!timebase)
  449. barrier();
  450. spin_lock(&timebase_lock);
  451. set_tb(timebase >> 32, timebase & 0xffffffff);
  452. timebase = 0;
  453. spin_unlock(&timebase_lock);
  454. }
  455. static void __init smp_core99_setup(int ncpus)
  456. {
  457. struct device_node *cc = NULL;
  458. struct device_node *p;
  459. u32 *reg;
  460. int ok;
  461. /* HW sync only on these platforms */
  462. if (!machine_is_compatible("PowerMac7,2") &&
  463. !machine_is_compatible("PowerMac7,3") &&
  464. !machine_is_compatible("RackMac3,1"))
  465. return;
  466. /* Look for the clock chip */
  467. while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
  468. p = of_get_parent(cc);
  469. ok = p && device_is_compatible(p, "uni-n-i2c");
  470. of_node_put(p);
  471. if (!ok)
  472. continue;
  473. reg = (u32 *)get_property(cc, "reg", NULL);
  474. if (reg == NULL)
  475. continue;
  476. switch (*reg) {
  477. case 0xd2:
  478. if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
  479. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  480. pmac_tb_pulsar_addr = 0xd2;
  481. printk(KERN_INFO "Timebase clock is Pulsar chip\n");
  482. } else if (device_is_compatible(cc, "cy28508")) {
  483. pmac_tb_freeze = smp_core99_cypress_tb_freeze;
  484. printk(KERN_INFO "Timebase clock is Cypress chip\n");
  485. }
  486. break;
  487. case 0xd4:
  488. pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
  489. pmac_tb_pulsar_addr = 0xd4;
  490. printk(KERN_INFO "Timebase clock is Pulsar chip\n");
  491. break;
  492. }
  493. if (pmac_tb_freeze != NULL) {
  494. pmac_tb_clock_chip_host = of_get_parent(cc);
  495. of_node_put(cc);
  496. break;
  497. }
  498. }
  499. if (pmac_tb_freeze == NULL) {
  500. smp_ops->give_timebase = smp_generic_give_timebase;
  501. smp_ops->take_timebase = smp_generic_take_timebase;
  502. }
  503. }
  504. /* nothing to do here, caches are already set up by service processor */
  505. static inline void __devinit core99_init_caches(int cpu)
  506. {
  507. }
  508. #else /* CONFIG_PPC64 */
  509. /*
  510. * SMP G4 powermacs use a GPIO to enable/disable the timebase.
  511. */
  512. static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
  513. static unsigned int pri_tb_hi, pri_tb_lo;
  514. static unsigned int pri_tb_stamp;
  515. /* not __init, called in sleep/wakeup code */
  516. void smp_core99_give_timebase(void)
  517. {
  518. unsigned long flags;
  519. unsigned int t;
  520. /* wait for the secondary to be in take_timebase */
  521. for (t = 100000; t > 0 && !sec_tb_reset; --t)
  522. udelay(10);
  523. if (!sec_tb_reset) {
  524. printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
  525. return;
  526. }
  527. /* freeze the timebase and read it */
  528. /* disable interrupts so the timebase is disabled for the
  529. shortest possible time */
  530. local_irq_save(flags);
  531. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
  532. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  533. mb();
  534. pri_tb_hi = get_tbu();
  535. pri_tb_lo = get_tbl();
  536. pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
  537. mb();
  538. /* tell the secondary we're ready */
  539. sec_tb_reset = 2;
  540. mb();
  541. /* wait for the secondary to have taken it */
  542. /* note: can't use udelay here, since it needs the timebase running */
  543. for (t = 10000000; t > 0 && sec_tb_reset; --t)
  544. barrier();
  545. if (sec_tb_reset)
  546. /* XXX BUG_ON here? */
  547. printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
  548. /* Now, restart the timebase by leaving the GPIO to an open collector */
  549. pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
  550. pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
  551. local_irq_restore(flags);
  552. }
  553. /* not __init, called in sleep/wakeup code */
  554. void smp_core99_take_timebase(void)
  555. {
  556. unsigned long flags;
  557. /* tell the primary we're here */
  558. sec_tb_reset = 1;
  559. mb();
  560. /* wait for the primary to set pri_tb_hi/lo */
  561. while (sec_tb_reset < 2)
  562. mb();
  563. /* set our stuff the same as the primary */
  564. local_irq_save(flags);
  565. set_dec(1);
  566. set_tb(pri_tb_hi, pri_tb_lo);
  567. last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
  568. mb();
  569. /* tell the primary we're done */
  570. sec_tb_reset = 0;
  571. mb();
  572. local_irq_restore(flags);
  573. }
  574. /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
  575. volatile static long int core99_l2_cache;
  576. volatile static long int core99_l3_cache;
  577. static void __devinit core99_init_caches(int cpu)
  578. {
  579. if (!cpu_has_feature(CPU_FTR_L2CR))
  580. return;
  581. if (cpu == 0) {
  582. core99_l2_cache = _get_L2CR();
  583. printk("CPU0: L2CR is %lx\n", core99_l2_cache);
  584. } else {
  585. printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
  586. _set_L2CR(0);
  587. _set_L2CR(core99_l2_cache);
  588. printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
  589. }
  590. if (!cpu_has_feature(CPU_FTR_L3CR))
  591. return;
  592. if (cpu == 0){
  593. core99_l3_cache = _get_L3CR();
  594. printk("CPU0: L3CR is %lx\n", core99_l3_cache);
  595. } else {
  596. printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
  597. _set_L3CR(0);
  598. _set_L3CR(core99_l3_cache);
  599. printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
  600. }
  601. }
  602. static void __init smp_core99_setup(int ncpus)
  603. {
  604. struct device_node *cpu;
  605. u32 *tbprop = NULL;
  606. int i;
  607. core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
  608. cpu = of_find_node_by_type(NULL, "cpu");
  609. if (cpu != NULL) {
  610. tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
  611. if (tbprop)
  612. core99_tb_gpio = *tbprop;
  613. of_node_put(cpu);
  614. }
  615. /* XXX should get this from reg properties */
  616. for (i = 1; i < ncpus; ++i)
  617. smp_hw_index[i] = i;
  618. powersave_nap = 0;
  619. }
  620. #endif
  621. static int __init smp_core99_probe(void)
  622. {
  623. struct device_node *cpus;
  624. int ncpus = 0;
  625. if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
  626. /* Count CPUs in the device-tree */
  627. for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
  628. ++ncpus;
  629. printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
  630. /* Nothing more to do if less than 2 of them */
  631. if (ncpus <= 1)
  632. return 1;
  633. smp_core99_setup(ncpus);
  634. mpic_request_ipis();
  635. core99_init_caches(0);
  636. return ncpus;
  637. }
  638. static void __devinit smp_core99_kick_cpu(int nr)
  639. {
  640. unsigned int save_vector;
  641. unsigned long new_vector;
  642. unsigned long flags;
  643. volatile unsigned int *vector
  644. = ((volatile unsigned int *)(KERNELBASE+0x100));
  645. if (nr < 0 || nr > 3)
  646. return;
  647. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
  648. local_irq_save(flags);
  649. local_irq_disable();
  650. /* Save reset vector */
  651. save_vector = *vector;
  652. /* Setup fake reset vector that does
  653. * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
  654. */
  655. new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
  656. *vector = 0x48000002 + new_vector - KERNELBASE;
  657. /* flush data cache and inval instruction cache */
  658. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  659. /* Put some life in our friend */
  660. pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
  661. /* FIXME: We wait a bit for the CPU to take the exception, I should
  662. * instead wait for the entry code to set something for me. Well,
  663. * ideally, all that crap will be done in prom.c and the CPU left
  664. * in a RAM-based wait loop like CHRP.
  665. */
  666. mdelay(1);
  667. /* Restore our exception vector */
  668. *vector = save_vector;
  669. flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
  670. local_irq_restore(flags);
  671. if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
  672. }
  673. static void __devinit smp_core99_setup_cpu(int cpu_nr)
  674. {
  675. /* Setup L2/L3 */
  676. if (cpu_nr != 0)
  677. core99_init_caches(cpu_nr);
  678. /* Setup openpic */
  679. mpic_setup_this_cpu();
  680. if (cpu_nr == 0) {
  681. #ifdef CONFIG_POWER4
  682. extern void g5_phy_disable_cpu1(void);
  683. /* If we didn't start the second CPU, we must take
  684. * it off the bus
  685. */
  686. if (machine_is_compatible("MacRISC4") &&
  687. num_online_cpus() < 2)
  688. g5_phy_disable_cpu1();
  689. #endif /* CONFIG_POWER4 */
  690. if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
  691. }
  692. }
  693. #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
  694. int smp_core99_cpu_disable(void)
  695. {
  696. cpu_clear(smp_processor_id(), cpu_online_map);
  697. /* XXX reset cpu affinity here */
  698. mpic_cpu_set_priority(0xf);
  699. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  700. mb();
  701. udelay(20);
  702. asm volatile("mtdec %0" : : "r" (0x7fffffff));
  703. return 0;
  704. }
  705. extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
  706. static int cpu_dead[NR_CPUS];
  707. void cpu_die(void)
  708. {
  709. local_irq_disable();
  710. cpu_dead[smp_processor_id()] = 1;
  711. mb();
  712. low_cpu_die();
  713. }
  714. void smp_core99_cpu_die(unsigned int cpu)
  715. {
  716. int timeout;
  717. timeout = 1000;
  718. while (!cpu_dead[cpu]) {
  719. if (--timeout == 0) {
  720. printk("CPU %u refused to die!\n", cpu);
  721. break;
  722. }
  723. msleep(1);
  724. }
  725. cpu_dead[cpu] = 0;
  726. }
  727. #endif
  728. /* Core99 Macs (dual G4s and G5s) */
  729. struct smp_ops_t core99_smp_ops = {
  730. .message_pass = smp_mpic_message_pass,
  731. .probe = smp_core99_probe,
  732. .kick_cpu = smp_core99_kick_cpu,
  733. .setup_cpu = smp_core99_setup_cpu,
  734. .give_timebase = smp_core99_give_timebase,
  735. .take_timebase = smp_core99_take_timebase,
  736. #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
  737. .cpu_disable = smp_core99_cpu_disable,
  738. .cpu_die = smp_core99_cpu_die,
  739. #endif
  740. };