hdpu.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * Board setup routines for the Sky Computers HDPU Compute Blade.
  3. *
  4. * Written by Brian Waite <waite@skycomputers.com>
  5. *
  6. * Based on code done by - Mark A. Greer <mgreer@mvista.com>
  7. * Rabeeh Khoury - rabeeh@galileo.co.il
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. */
  14. #include <linux/pci.h>
  15. #include <linux/delay.h>
  16. #include <linux/irq.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/initrd.h>
  20. #include <linux/root_dev.h>
  21. #include <linux/smp.h>
  22. #include <asm/time.h>
  23. #include <asm/machdep.h>
  24. #include <asm/todc.h>
  25. #include <asm/mv64x60.h>
  26. #include <asm/ppcboot.h>
  27. #include <platforms/hdpu.h>
  28. #include <linux/mv643xx.h>
  29. #include <linux/hdpu_features.h>
  30. #include <linux/device.h>
  31. #include <linux/mtd/physmap.h>
  32. #define BOARD_VENDOR "Sky Computers"
  33. #define BOARD_MACHINE "HDPU-CB-A"
  34. bd_t ppcboot_bd;
  35. int ppcboot_bd_valid = 0;
  36. static mv64x60_handle_t bh;
  37. extern char cmd_line[];
  38. unsigned long hdpu_find_end_of_memory(void);
  39. void hdpu_mpsc_progress(char *s, unsigned short hex);
  40. void hdpu_heartbeat(void);
  41. static void parse_bootinfo(unsigned long r3,
  42. unsigned long r4, unsigned long r5,
  43. unsigned long r6, unsigned long r7);
  44. static void hdpu_set_l1pe(void);
  45. static void hdpu_cpustate_set(unsigned char new_state);
  46. #ifdef CONFIG_SMP
  47. static DEFINE_SPINLOCK(timebase_lock);
  48. static unsigned int timebase_upper = 0, timebase_lower = 0;
  49. extern int smp_tb_synchronized;
  50. void __devinit hdpu_tben_give(void);
  51. void __devinit hdpu_tben_take(void);
  52. #endif
  53. static int __init
  54. hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
  55. {
  56. struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
  57. if (hose->index == 0) {
  58. static char pci_irq_table[][4] = {
  59. {HDPU_PCI_0_IRQ, 0, 0, 0},
  60. {HDPU_PCI_0_IRQ, 0, 0, 0},
  61. };
  62. const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4;
  63. return PCI_IRQ_TABLE_LOOKUP;
  64. } else {
  65. static char pci_irq_table[][4] = {
  66. {HDPU_PCI_1_IRQ, 0, 0, 0},
  67. };
  68. const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
  69. return PCI_IRQ_TABLE_LOOKUP;
  70. }
  71. }
  72. static void __init hdpu_intr_setup(void)
  73. {
  74. mv64x60_write(&bh, MV64x60_GPP_IO_CNTL,
  75. (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
  76. (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) |
  77. (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) |
  78. (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) |
  79. (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29)));
  80. /* XXXX Erranum FEr PCI-#8 */
  81. mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9));
  82. mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9));
  83. /*
  84. * Dismiss and then enable interrupt on GPP interrupt cause
  85. * for CPU #0
  86. */
  87. mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13)));
  88. mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13));
  89. /*
  90. * Dismiss and then enable interrupt on CPU #0 high cause reg
  91. * BIT25 summarizes GPP interrupts 8-15
  92. */
  93. mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25));
  94. }
  95. static void __init hdpu_setup_peripherals(void)
  96. {
  97. unsigned int val;
  98. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  99. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  100. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  101. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
  102. HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0);
  103. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN);
  104. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
  105. HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0);
  106. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
  107. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  108. HDPU_INTERNAL_SRAM_BASE,
  109. HDPU_INTERNAL_SRAM_SIZE, 0);
  110. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  111. bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN);
  112. mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0);
  113. mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3));
  114. mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3));
  115. mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL,
  116. ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24)));
  117. /* Enable pipelining */
  118. mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13));
  119. /* Enable Snoop Pipelining */
  120. mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24));
  121. /*
  122. * Change DRAM read buffer assignment.
  123. * Assign read buffer 0 dedicated only for CPU,
  124. * and the rest read buffer 1.
  125. */
  126. val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG);
  127. val = val & 0x03ffffff;
  128. val = val | 0xf8000000;
  129. mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val);
  130. /*
  131. * Configure internal SRAM -
  132. * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set
  133. * Parity enabled.
  134. * Parity error propagation
  135. * Arbitration not parked for CPU only
  136. * Other bits are reserved.
  137. */
  138. #ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT
  139. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2);
  140. #else
  141. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0);
  142. #endif
  143. hdpu_intr_setup();
  144. }
  145. static void __init hdpu_setup_bridge(void)
  146. {
  147. struct mv64x60_setup_info si;
  148. int i;
  149. memset(&si, 0, sizeof(si));
  150. si.phys_reg_base = HDPU_BRIDGE_REG_BASE;
  151. si.pci_0.enable_bus = 1;
  152. si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR;
  153. si.pci_0.pci_io.pci_base_hi = 0;
  154. si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR;
  155. si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE;
  156. si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  157. si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR;
  158. si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR;
  159. si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR;
  160. si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE;
  161. si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  162. si.pci_0.pci_cmd_bits = 0;
  163. si.pci_0.latency_timer = 0x80;
  164. si.pci_1.enable_bus = 1;
  165. si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR;
  166. si.pci_1.pci_io.pci_base_hi = 0;
  167. si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR;
  168. si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE;
  169. si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  170. si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR;
  171. si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR;
  172. si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR;
  173. si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE;
  174. si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  175. si.pci_1.pci_cmd_bits = 0;
  176. si.pci_1.latency_timer = 0x80;
  177. for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) {
  178. #if defined(CONFIG_NOT_COHERENT_CACHE)
  179. si.cpu_prot_options[i] = 0;
  180. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE;
  181. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE;
  182. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE;
  183. si.pci_1.acc_cntl_options[i] =
  184. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  185. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  186. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  187. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  188. si.pci_0.acc_cntl_options[i] =
  189. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  190. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  191. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  192. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  193. #else
  194. si.cpu_prot_options[i] = 0;
  195. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB; /* errata */
  196. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB; /* errata */
  197. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB; /* errata */
  198. si.pci_0.acc_cntl_options[i] =
  199. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  200. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  201. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  202. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  203. si.pci_1.acc_cntl_options[i] =
  204. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  205. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  206. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  207. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  208. #endif
  209. }
  210. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI);
  211. /* Lookup PCI host bridges */
  212. mv64x60_init(&bh, &si);
  213. pci_dram_offset = 0; /* System mem at same addr on PCI & cpu bus */
  214. ppc_md.pci_swizzle = common_swizzle;
  215. ppc_md.pci_map_irq = hdpu_map_irq;
  216. mv64x60_set_bus(&bh, 0, 0);
  217. bh.hose_a->first_busno = 0;
  218. bh.hose_a->last_busno = 0xff;
  219. bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0);
  220. bh.hose_b->first_busno = bh.hose_a->last_busno + 1;
  221. mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno);
  222. bh.hose_b->last_busno = 0xff;
  223. bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b,
  224. bh.hose_b->first_busno);
  225. ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
  226. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG);
  227. /*
  228. * Enabling of PCI internal-vs-external arbitration
  229. * is a platform- and errata-dependent decision.
  230. */
  231. return;
  232. }
  233. #if defined(CONFIG_SERIAL_MPSC_CONSOLE)
  234. static void __init hdpu_early_serial_map(void)
  235. {
  236. #ifdef CONFIG_KGDB
  237. static char first_time = 1;
  238. #if defined(CONFIG_KGDB_TTYS0)
  239. #define KGDB_PORT 0
  240. #elif defined(CONFIG_KGDB_TTYS1)
  241. #define KGDB_PORT 1
  242. #else
  243. #error "Invalid kgdb_tty port"
  244. #endif
  245. if (first_time) {
  246. gt_early_mpsc_init(KGDB_PORT,
  247. B9600 | CS8 | CREAD | HUPCL | CLOCAL);
  248. first_time = 0;
  249. }
  250. return;
  251. #endif
  252. }
  253. #endif
  254. static void hdpu_init2(void)
  255. {
  256. return;
  257. }
  258. #if defined(CONFIG_MV643XX_ETH)
  259. static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
  260. {
  261. struct mv643xx_eth_platform_data *eth_pd;
  262. eth_pd = pd->dev.platform_data;
  263. eth_pd->force_phy_addr = 1;
  264. eth_pd->phy_addr = pd->id;
  265. eth_pd->speed = SPEED_100;
  266. eth_pd->duplex = DUPLEX_FULL;
  267. eth_pd->tx_queue_size = 400;
  268. eth_pd->rx_queue_size = 800;
  269. }
  270. #endif
  271. static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd)
  272. {
  273. struct mpsc_pdata *pdata;
  274. pdata = (struct mpsc_pdata *)pd->dev.platform_data;
  275. pdata->max_idle = 40;
  276. if (ppcboot_bd_valid)
  277. pdata->default_baud = ppcboot_bd.bi_baudrate;
  278. else
  279. pdata->default_baud = HDPU_DEFAULT_BAUD;
  280. pdata->brg_clk_src = HDPU_MPSC_CLK_SRC;
  281. pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ;
  282. }
  283. #if defined(CONFIG_HDPU_FEATURES)
  284. static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd)
  285. {
  286. struct platform_device *pds[1];
  287. pds[0] = pd;
  288. mv64x60_pd_fixup(&bh, pds, 1);
  289. }
  290. #endif
  291. static int hdpu_platform_notify(struct device *dev)
  292. {
  293. static struct {
  294. char *bus_id;
  295. void ((*rtn) (struct platform_device * pdev));
  296. } dev_map[] = {
  297. {
  298. MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata},
  299. #if defined(CONFIG_MV643XX_ETH)
  300. {
  301. MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata},
  302. #endif
  303. #if defined(CONFIG_HDPU_FEATURES)
  304. {
  305. HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata},
  306. #endif
  307. };
  308. struct platform_device *pdev;
  309. int i;
  310. if (dev && dev->bus_id)
  311. for (i = 0; i < ARRAY_SIZE(dev_map); i++)
  312. if (!strncmp(dev->bus_id, dev_map[i].bus_id,
  313. BUS_ID_SIZE)) {
  314. pdev = container_of(dev,
  315. struct platform_device,
  316. dev);
  317. dev_map[i].rtn(pdev);
  318. }
  319. return 0;
  320. }
  321. static void __init hdpu_setup_arch(void)
  322. {
  323. if (ppc_md.progress)
  324. ppc_md.progress("hdpu_setup_arch: enter", 0);
  325. #ifdef CONFIG_BLK_DEV_INITRD
  326. if (initrd_start)
  327. ROOT_DEV = Root_RAM0;
  328. else
  329. #endif
  330. #ifdef CONFIG_ROOT_NFS
  331. ROOT_DEV = Root_NFS;
  332. #else
  333. ROOT_DEV = Root_SDA2;
  334. #endif
  335. ppc_md.heartbeat = hdpu_heartbeat;
  336. ppc_md.heartbeat_reset = HZ;
  337. ppc_md.heartbeat_count = 1;
  338. if (ppc_md.progress)
  339. ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0);
  340. /* Enable L1 Parity Bits */
  341. hdpu_set_l1pe();
  342. /* Enable L2 and L3 caches (if 745x) */
  343. _set_L2CR(0x80080000);
  344. if (ppc_md.progress)
  345. ppc_md.progress("hdpu_setup_arch: enter", 0);
  346. hdpu_setup_bridge();
  347. hdpu_setup_peripherals();
  348. #ifdef CONFIG_SERIAL_MPSC_CONSOLE
  349. hdpu_early_serial_map();
  350. #endif
  351. printk("SKY HDPU Compute Blade \n");
  352. if (ppc_md.progress)
  353. ppc_md.progress("hdpu_setup_arch: exit", 0);
  354. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK);
  355. return;
  356. }
  357. static void __init hdpu_init_irq(void)
  358. {
  359. mv64360_init_irq();
  360. }
  361. static void __init hdpu_set_l1pe()
  362. {
  363. unsigned long ictrl;
  364. asm volatile ("mfspr %0, 1011":"=r" (ictrl):);
  365. ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP;
  366. asm volatile ("mtspr 1011, %0"::"r" (ictrl));
  367. }
  368. /*
  369. * Set BAT 1 to map 0xf1000000 to end of physical memory space.
  370. */
  371. static __inline__ void hdpu_set_bat(void)
  372. {
  373. mb();
  374. mtspr(SPRN_DBAT1U, 0xf10001fe);
  375. mtspr(SPRN_DBAT1L, 0xf100002a);
  376. mb();
  377. return;
  378. }
  379. unsigned long __init hdpu_find_end_of_memory(void)
  380. {
  381. return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
  382. MV64x60_TYPE_MV64360);
  383. }
  384. static void hdpu_reset_board(void)
  385. {
  386. volatile int infinite = 1;
  387. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET);
  388. local_irq_disable();
  389. /* Clear all the LEDs */
  390. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) |
  391. (1 << 5) | (1 << 6)));
  392. /* disable and invalidate the L2 cache */
  393. _set_L2CR(0);
  394. _set_L2CR(0x200000);
  395. /* flush and disable L1 I/D cache */
  396. __asm__ __volatile__
  397. ("\n"
  398. "mfspr 3,1008\n"
  399. "ori 5,5,0xcc00\n"
  400. "ori 4,3,0xc00\n"
  401. "andc 5,3,5\n"
  402. "sync\n"
  403. "mtspr 1008,4\n"
  404. "isync\n" "sync\n" "mtspr 1008,5\n" "isync\n" "sync\n");
  405. /* Hit the reset bit */
  406. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3));
  407. while (infinite)
  408. infinite = infinite;
  409. return;
  410. }
  411. static void hdpu_restart(char *cmd)
  412. {
  413. volatile ulong i = 10000000;
  414. hdpu_reset_board();
  415. while (i-- > 0) ;
  416. panic("restart failed\n");
  417. }
  418. static void hdpu_halt(void)
  419. {
  420. local_irq_disable();
  421. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT);
  422. /* Clear all the LEDs */
  423. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) |
  424. (1 << 6)));
  425. while (1) ;
  426. /* NOTREACHED */
  427. }
  428. static void hdpu_power_off(void)
  429. {
  430. hdpu_halt();
  431. /* NOTREACHED */
  432. }
  433. static int hdpu_show_cpuinfo(struct seq_file *m)
  434. {
  435. uint pvid;
  436. pvid = mfspr(SPRN_PVR);
  437. seq_printf(m, "vendor\t\t: Sky Computers\n");
  438. seq_printf(m, "machine\t\t: HDPU Compute Blade\n");
  439. seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
  440. pvid, (pvid & (1 << 15) ? "IBM" : "Motorola"));
  441. return 0;
  442. }
  443. static void __init hdpu_calibrate_decr(void)
  444. {
  445. ulong freq;
  446. if (ppcboot_bd_valid)
  447. freq = ppcboot_bd.bi_busfreq / 4;
  448. else
  449. freq = 133000000;
  450. printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
  451. freq / 1000000, freq % 1000000);
  452. tb_ticks_per_jiffy = freq / HZ;
  453. tb_to_us = mulhwu_scale_factor(freq, 1000000);
  454. return;
  455. }
  456. static void parse_bootinfo(unsigned long r3,
  457. unsigned long r4, unsigned long r5,
  458. unsigned long r6, unsigned long r7)
  459. {
  460. bd_t *bd = NULL;
  461. char *cmdline_start = NULL;
  462. int cmdline_len = 0;
  463. if (r3) {
  464. if ((r3 & 0xf0000000) == 0)
  465. r3 += KERNELBASE;
  466. if ((r3 & 0xf0000000) == KERNELBASE) {
  467. bd = (void *)r3;
  468. memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd));
  469. ppcboot_bd_valid = 1;
  470. }
  471. }
  472. #ifdef CONFIG_BLK_DEV_INITRD
  473. if (r4 && r5 && r5 > r4) {
  474. if ((r4 & 0xf0000000) == 0)
  475. r4 += KERNELBASE;
  476. if ((r5 & 0xf0000000) == 0)
  477. r5 += KERNELBASE;
  478. if ((r4 & 0xf0000000) == KERNELBASE) {
  479. initrd_start = r4;
  480. initrd_end = r5;
  481. initrd_below_start_ok = 1;
  482. }
  483. }
  484. #endif /* CONFIG_BLK_DEV_INITRD */
  485. if (r6 && r7 && r7 > r6) {
  486. if ((r6 & 0xf0000000) == 0)
  487. r6 += KERNELBASE;
  488. if ((r7 & 0xf0000000) == 0)
  489. r7 += KERNELBASE;
  490. if ((r6 & 0xf0000000) == KERNELBASE) {
  491. cmdline_start = (void *)r6;
  492. cmdline_len = (r7 - r6);
  493. strncpy(cmd_line, cmdline_start, cmdline_len);
  494. }
  495. }
  496. }
  497. void hdpu_heartbeat(void)
  498. {
  499. if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
  500. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5));
  501. else
  502. mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5));
  503. ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
  504. }
  505. static void __init hdpu_map_io(void)
  506. {
  507. io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO);
  508. }
  509. #ifdef CONFIG_SMP
  510. char hdpu_smp0[] = "SMP Cpu #0";
  511. char hdpu_smp1[] = "SMP Cpu #1";
  512. static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id)
  513. {
  514. volatile unsigned int doorbell;
  515. doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL);
  516. /* Ack the doorbell interrupts */
  517. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell);
  518. if (doorbell & 1) {
  519. smp_message_recv(0);
  520. }
  521. if (doorbell & 2) {
  522. smp_message_recv(1);
  523. }
  524. if (doorbell & 4) {
  525. smp_message_recv(2);
  526. }
  527. if (doorbell & 8) {
  528. smp_message_recv(3);
  529. }
  530. return IRQ_HANDLED;
  531. }
  532. static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id)
  533. {
  534. volatile unsigned int doorbell;
  535. doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL);
  536. /* Ack the doorbell interrupts */
  537. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell);
  538. if (doorbell & 1) {
  539. smp_message_recv(0);
  540. }
  541. if (doorbell & 2) {
  542. smp_message_recv(1);
  543. }
  544. if (doorbell & 4) {
  545. smp_message_recv(2);
  546. }
  547. if (doorbell & 8) {
  548. smp_message_recv(3);
  549. }
  550. return IRQ_HANDLED;
  551. }
  552. static void smp_hdpu_CPU_two(void)
  553. {
  554. __asm__ __volatile__
  555. ("\n"
  556. "lis 3,0x0000\n"
  557. "ori 3,3,0x00c0\n"
  558. "mtspr 26, 3\n" "li 4,0\n" "mtspr 27,4\n" "rfi");
  559. }
  560. static int smp_hdpu_probe(void)
  561. {
  562. int *cpu_count_reg;
  563. int num_cpus = 0;
  564. cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE);
  565. if (cpu_count_reg) {
  566. num_cpus = (*cpu_count_reg >> 20) & 0x3;
  567. iounmap(cpu_count_reg);
  568. }
  569. /* Validate the bits in the CPLD. If we could not map the reg, return 2.
  570. * If the register reported 0 or 3, return 2.
  571. * Older CPLD revisions set these bits to all ones (val = 3).
  572. */
  573. if ((num_cpus < 1) || (num_cpus > 2)) {
  574. printk
  575. ("Unable to determine the number of processors %d . deafulting to 2.\n",
  576. num_cpus);
  577. num_cpus = 2;
  578. }
  579. return num_cpus;
  580. }
  581. static void
  582. smp_hdpu_message_pass(int target, int msg)
  583. {
  584. if (msg > 0x3) {
  585. printk("SMP %d: smp_message_pass: unknown msg %d\n",
  586. smp_processor_id(), msg);
  587. return;
  588. }
  589. switch (target) {
  590. case MSG_ALL:
  591. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  592. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  593. break;
  594. case MSG_ALL_BUT_SELF:
  595. if (smp_processor_id())
  596. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  597. else
  598. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  599. break;
  600. default:
  601. if (target == 0)
  602. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  603. else
  604. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  605. break;
  606. }
  607. }
  608. static void smp_hdpu_kick_cpu(int nr)
  609. {
  610. volatile unsigned int *bootaddr;
  611. if (ppc_md.progress)
  612. ppc_md.progress("smp_hdpu_kick_cpu", 0);
  613. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK);
  614. /* Disable BootCS. Must also reduce the windows size to zero. */
  615. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  616. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0);
  617. bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE);
  618. if (!bootaddr) {
  619. if (ppc_md.progress)
  620. ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0);
  621. return;
  622. }
  623. memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20);
  624. /* map SRAM to 0xfff00000 */
  625. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  626. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  627. 0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0);
  628. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  629. /* Enable CPU1 arbitration */
  630. mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9));
  631. /*
  632. * Wait 100mSecond until other CPU has reached __secondary_start.
  633. * When it reaches, it is permittable to rever the SRAM mapping etc...
  634. */
  635. mdelay(100);
  636. *(unsigned long *)KERNELBASE = nr;
  637. asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory");
  638. iounmap(bootaddr);
  639. /* Set up window for internal sram (256KByte insize) */
  640. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  641. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  642. HDPU_INTERNAL_SRAM_BASE,
  643. HDPU_INTERNAL_SRAM_SIZE, 0);
  644. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  645. /*
  646. * Set up windows for embedded FLASH (using boot CS window).
  647. */
  648. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  649. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  650. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  651. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  652. }
  653. static void smp_hdpu_setup_cpu(int cpu_nr)
  654. {
  655. if (cpu_nr == 0) {
  656. if (ppc_md.progress)
  657. ppc_md.progress("smp_hdpu_setup_cpu 0", 0);
  658. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
  659. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
  660. request_irq(60, hdpu_smp_cpu0_int_handler,
  661. IRQF_DISABLED, hdpu_smp0, 0);
  662. }
  663. if (cpu_nr == 1) {
  664. if (ppc_md.progress)
  665. ppc_md.progress("smp_hdpu_setup_cpu 1", 0);
  666. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR |
  667. CPUSTATE_KERNEL_CPU1_OK);
  668. /* Enable L1 Parity Bits */
  669. hdpu_set_l1pe();
  670. /* Enable L2 cache */
  671. _set_L2CR(0);
  672. _set_L2CR(0x80080000);
  673. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0);
  674. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
  675. request_irq(28, hdpu_smp_cpu1_int_handler,
  676. IRQF_DISABLED, hdpu_smp1, 0);
  677. }
  678. }
  679. void __devinit hdpu_tben_give()
  680. {
  681. volatile unsigned long *val = 0;
  682. /* By writing 0 to the TBEN_BASE, the timebases is frozen */
  683. val = ioremap(HDPU_TBEN_BASE, 4);
  684. *val = 0;
  685. mb();
  686. spin_lock(&timebase_lock);
  687. timebase_upper = get_tbu();
  688. timebase_lower = get_tbl();
  689. spin_unlock(&timebase_lock);
  690. while (timebase_upper || timebase_lower)
  691. barrier();
  692. /* By writing 1 to the TBEN_BASE, the timebases is thawed */
  693. *val = 1;
  694. mb();
  695. iounmap(val);
  696. }
  697. void __devinit hdpu_tben_take()
  698. {
  699. while (!(timebase_upper || timebase_lower))
  700. barrier();
  701. spin_lock(&timebase_lock);
  702. set_tb(timebase_upper, timebase_lower);
  703. timebase_upper = 0;
  704. timebase_lower = 0;
  705. spin_unlock(&timebase_lock);
  706. }
  707. static struct smp_ops_t hdpu_smp_ops = {
  708. .message_pass = smp_hdpu_message_pass,
  709. .probe = smp_hdpu_probe,
  710. .kick_cpu = smp_hdpu_kick_cpu,
  711. .setup_cpu = smp_hdpu_setup_cpu,
  712. .give_timebase = hdpu_tben_give,
  713. .take_timebase = hdpu_tben_take,
  714. };
  715. #endif /* CONFIG_SMP */
  716. void __init
  717. platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
  718. unsigned long r6, unsigned long r7)
  719. {
  720. parse_bootinfo(r3, r4, r5, r6, r7);
  721. isa_mem_base = 0;
  722. ppc_md.setup_arch = hdpu_setup_arch;
  723. ppc_md.init = hdpu_init2;
  724. ppc_md.show_cpuinfo = hdpu_show_cpuinfo;
  725. ppc_md.init_IRQ = hdpu_init_irq;
  726. ppc_md.get_irq = mv64360_get_irq;
  727. ppc_md.restart = hdpu_restart;
  728. ppc_md.power_off = hdpu_power_off;
  729. ppc_md.halt = hdpu_halt;
  730. ppc_md.find_end_of_memory = hdpu_find_end_of_memory;
  731. ppc_md.calibrate_decr = hdpu_calibrate_decr;
  732. ppc_md.setup_io_mappings = hdpu_map_io;
  733. bh.p_base = CONFIG_MV64X60_NEW_BASE;
  734. bh.v_base = (unsigned long *)bh.p_base;
  735. hdpu_set_bat();
  736. #if defined(CONFIG_SERIAL_TEXT_DEBUG)
  737. ppc_md.progress = hdpu_mpsc_progress; /* embedded UART */
  738. mv64x60_progress_init(bh.p_base);
  739. #endif /* CONFIG_SERIAL_TEXT_DEBUG */
  740. #ifdef CONFIG_SMP
  741. smp_ops = &hdpu_smp_ops;
  742. #endif /* CONFIG_SMP */
  743. #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
  744. platform_notify = hdpu_platform_notify;
  745. #endif
  746. return;
  747. }
  748. #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
  749. /* SMP safe version of the serial text debug routine. Uses Semaphore 0 */
  750. void hdpu_mpsc_progress(char *s, unsigned short hex)
  751. {
  752. while (mv64x60_read(&bh, MV64360_WHO_AM_I) !=
  753. mv64x60_read(&bh, MV64360_SEMAPHORE_0)) {
  754. }
  755. mv64x60_mpsc_progress(s, hex);
  756. mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff);
  757. }
  758. #endif
  759. static void hdpu_cpustate_set(unsigned char new_state)
  760. {
  761. unsigned int state = (new_state << 21);
  762. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21));
  763. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state);
  764. }
  765. #ifdef CONFIG_MTD_PHYSMAP
  766. static struct mtd_partition hdpu_partitions[] = {
  767. {
  768. .name = "Root FS",
  769. .size = 0x03400000,
  770. .offset = 0,
  771. .mask_flags = 0,
  772. },{
  773. .name = "User FS",
  774. .size = 0x00800000,
  775. .offset = 0x03400000,
  776. .mask_flags = 0,
  777. },{
  778. .name = "Kernel Image",
  779. .size = 0x002C0000,
  780. .offset = 0x03C00000,
  781. .mask_flags = 0,
  782. },{
  783. .name = "bootEnv",
  784. .size = 0x00040000,
  785. .offset = 0x03EC0000,
  786. .mask_flags = 0,
  787. },{
  788. .name = "bootROM",
  789. .size = 0x00100000,
  790. .offset = 0x03F00000,
  791. .mask_flags = 0,
  792. }
  793. };
  794. static int __init hdpu_setup_mtd(void)
  795. {
  796. physmap_set_partitions(hdpu_partitions, 5);
  797. return 0;
  798. }
  799. arch_initcall(hdpu_setup_mtd);
  800. #endif
  801. #ifdef CONFIG_HDPU_FEATURES
  802. static struct resource hdpu_cpustate_resources[] = {
  803. [0] = {
  804. .name = "addr base",
  805. .start = MV64x60_GPP_VALUE_SET,
  806. .end = MV64x60_GPP_VALUE_CLR + 1,
  807. .flags = IORESOURCE_MEM,
  808. },
  809. };
  810. static struct resource hdpu_nexus_resources[] = {
  811. [0] = {
  812. .name = "nexus register",
  813. .start = HDPU_NEXUS_ID_BASE,
  814. .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE,
  815. .flags = IORESOURCE_MEM,
  816. },
  817. };
  818. static struct platform_device hdpu_cpustate_device = {
  819. .name = HDPU_CPUSTATE_NAME,
  820. .id = 0,
  821. .num_resources = ARRAY_SIZE(hdpu_cpustate_resources),
  822. .resource = hdpu_cpustate_resources,
  823. };
  824. static struct platform_device hdpu_nexus_device = {
  825. .name = HDPU_NEXUS_NAME,
  826. .id = 0,
  827. .num_resources = ARRAY_SIZE(hdpu_nexus_resources),
  828. .resource = hdpu_nexus_resources,
  829. };
  830. static int __init hdpu_add_pds(void)
  831. {
  832. platform_device_register(&hdpu_cpustate_device);
  833. platform_device_register(&hdpu_nexus_device);
  834. return 0;
  835. }
  836. arch_initcall(hdpu_add_pds);
  837. #endif