hdpu.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /*
  2. * arch/ppc/platforms/hdpu_setup.c
  3. *
  4. * Board setup routines for the Sky Computers HDPU Compute Blade.
  5. *
  6. * Written by Brian Waite <waite@skycomputers.com>
  7. *
  8. * Based on code done by - Mark A. Greer <mgreer@mvista.com>
  9. * Rabeeh Khoury - rabeeh@galileo.co.il
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; either version 2 of the License, or (at your
  14. * option) any later version.
  15. */
  16. #include <linux/config.h>
  17. #include <linux/pci.h>
  18. #include <linux/delay.h>
  19. #include <linux/irq.h>
  20. #include <linux/ide.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/initrd.h>
  23. #include <linux/root_dev.h>
  24. #include <linux/smp.h>
  25. #include <asm/time.h>
  26. #include <asm/machdep.h>
  27. #include <asm/todc.h>
  28. #include <asm/mv64x60.h>
  29. #include <asm/ppcboot.h>
  30. #include <platforms/hdpu.h>
  31. #include <linux/mv643xx.h>
  32. #include <linux/hdpu_features.h>
  33. #include <linux/device.h>
  34. #include <linux/mtd/physmap.h>
  35. #define BOARD_VENDOR "Sky Computers"
  36. #define BOARD_MACHINE "HDPU-CB-A"
  37. bd_t ppcboot_bd;
  38. int ppcboot_bd_valid = 0;
  39. static mv64x60_handle_t bh;
  40. extern char cmd_line[];
  41. unsigned long hdpu_find_end_of_memory(void);
  42. void hdpu_mpsc_progress(char *s, unsigned short hex);
  43. void hdpu_heartbeat(void);
  44. static void parse_bootinfo(unsigned long r3,
  45. unsigned long r4, unsigned long r5,
  46. unsigned long r6, unsigned long r7);
  47. static void hdpu_set_l1pe(void);
  48. static void hdpu_cpustate_set(unsigned char new_state);
  49. #ifdef CONFIG_SMP
  50. static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
  51. static unsigned int timebase_upper = 0, timebase_lower = 0;
  52. extern int smp_tb_synchronized;
  53. void __devinit hdpu_tben_give(void);
  54. void __devinit hdpu_tben_take(void);
  55. #endif
  56. static int __init
  57. hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
  58. {
  59. struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
  60. if (hose->index == 0) {
  61. static char pci_irq_table[][4] = {
  62. {HDPU_PCI_0_IRQ, 0, 0, 0},
  63. {HDPU_PCI_0_IRQ, 0, 0, 0},
  64. };
  65. const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4;
  66. return PCI_IRQ_TABLE_LOOKUP;
  67. } else {
  68. static char pci_irq_table[][4] = {
  69. {HDPU_PCI_1_IRQ, 0, 0, 0},
  70. };
  71. const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
  72. return PCI_IRQ_TABLE_LOOKUP;
  73. }
  74. }
  75. static void __init hdpu_intr_setup(void)
  76. {
  77. mv64x60_write(&bh, MV64x60_GPP_IO_CNTL,
  78. (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
  79. (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) |
  80. (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) |
  81. (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) |
  82. (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29)));
  83. /* XXXX Erranum FEr PCI-#8 */
  84. mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9));
  85. mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9));
  86. /*
  87. * Dismiss and then enable interrupt on GPP interrupt cause
  88. * for CPU #0
  89. */
  90. mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13)));
  91. mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13));
  92. /*
  93. * Dismiss and then enable interrupt on CPU #0 high cause reg
  94. * BIT25 summarizes GPP interrupts 8-15
  95. */
  96. mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25));
  97. }
  98. static void __init hdpu_setup_peripherals(void)
  99. {
  100. unsigned int val;
  101. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  102. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  103. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  104. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
  105. HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0);
  106. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN);
  107. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
  108. HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0);
  109. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
  110. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  111. HDPU_INTERNAL_SRAM_BASE,
  112. HDPU_INTERNAL_SRAM_SIZE, 0);
  113. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  114. bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN);
  115. mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0);
  116. mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3));
  117. mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3));
  118. mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL,
  119. ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24)));
  120. /* Enable pipelining */
  121. mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13));
  122. /* Enable Snoop Pipelineing */
  123. mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24));
  124. /*
  125. * Change DRAM read buffer assignment.
  126. * Assign read buffer 0 dedicated only for CPU,
  127. * and the rest read buffer 1.
  128. */
  129. val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG);
  130. val = val & 0x03ffffff;
  131. val = val | 0xf8000000;
  132. mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val);
  133. /*
  134. * Configure internal SRAM -
  135. * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set
  136. * Parity enabled.
  137. * Parity error propagation
  138. * Arbitration not parked for CPU only
  139. * Other bits are reserved.
  140. */
  141. #ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT
  142. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2);
  143. #else
  144. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0);
  145. #endif
  146. hdpu_intr_setup();
  147. }
  148. static void __init hdpu_setup_bridge(void)
  149. {
  150. struct mv64x60_setup_info si;
  151. int i;
  152. memset(&si, 0, sizeof(si));
  153. si.phys_reg_base = HDPU_BRIDGE_REG_BASE;
  154. si.pci_0.enable_bus = 1;
  155. si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR;
  156. si.pci_0.pci_io.pci_base_hi = 0;
  157. si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR;
  158. si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE;
  159. si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  160. si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR;
  161. si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR;
  162. si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR;
  163. si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE;
  164. si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  165. si.pci_0.pci_cmd_bits = 0;
  166. si.pci_0.latency_timer = 0x80;
  167. si.pci_1.enable_bus = 1;
  168. si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR;
  169. si.pci_1.pci_io.pci_base_hi = 0;
  170. si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR;
  171. si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE;
  172. si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  173. si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR;
  174. si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR;
  175. si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR;
  176. si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE;
  177. si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  178. si.pci_1.pci_cmd_bits = 0;
  179. si.pci_1.latency_timer = 0x80;
  180. for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) {
  181. #if defined(CONFIG_NOT_COHERENT_CACHE)
  182. si.cpu_prot_options[i] = 0;
  183. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE;
  184. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE;
  185. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE;
  186. si.pci_1.acc_cntl_options[i] =
  187. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  188. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  189. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  190. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  191. si.pci_0.acc_cntl_options[i] =
  192. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  193. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  194. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  195. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  196. #else
  197. si.cpu_prot_options[i] = 0;
  198. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB; /* errata */
  199. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB; /* errata */
  200. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB; /* errata */
  201. si.pci_0.acc_cntl_options[i] =
  202. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  203. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  204. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  205. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  206. si.pci_1.acc_cntl_options[i] =
  207. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  208. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  209. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  210. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  211. #endif
  212. }
  213. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI);
  214. /* Lookup PCI host bridges */
  215. mv64x60_init(&bh, &si);
  216. pci_dram_offset = 0; /* System mem at same addr on PCI & cpu bus */
  217. ppc_md.pci_swizzle = common_swizzle;
  218. ppc_md.pci_map_irq = hdpu_map_irq;
  219. mv64x60_set_bus(&bh, 0, 0);
  220. bh.hose_a->first_busno = 0;
  221. bh.hose_a->last_busno = 0xff;
  222. bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0);
  223. bh.hose_b->first_busno = bh.hose_a->last_busno + 1;
  224. mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno);
  225. bh.hose_b->last_busno = 0xff;
  226. bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b,
  227. bh.hose_b->first_busno);
  228. ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
  229. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG);
  230. /*
  231. * Enabling of PCI internal-vs-external arbitration
  232. * is a platform- and errata-dependent decision.
  233. */
  234. return;
  235. }
  236. #if defined(CONFIG_SERIAL_MPSC_CONSOLE)
  237. static void __init hdpu_early_serial_map(void)
  238. {
  239. #ifdef CONFIG_KGDB
  240. static char first_time = 1;
  241. #if defined(CONFIG_KGDB_TTYS0)
  242. #define KGDB_PORT 0
  243. #elif defined(CONFIG_KGDB_TTYS1)
  244. #define KGDB_PORT 1
  245. #else
  246. #error "Invalid kgdb_tty port"
  247. #endif
  248. if (first_time) {
  249. gt_early_mpsc_init(KGDB_PORT,
  250. B9600 | CS8 | CREAD | HUPCL | CLOCAL);
  251. first_time = 0;
  252. }
  253. return;
  254. #endif
  255. }
  256. #endif
  257. static void hdpu_init2(void)
  258. {
  259. return;
  260. }
  261. #if defined(CONFIG_MV643XX_ETH)
  262. static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
  263. {
  264. struct mv643xx_eth_platform_data *eth_pd;
  265. eth_pd = pd->dev.platform_data;
  266. eth_pd->port_serial_control =
  267. mv64x60_read(&bh, MV643XX_ETH_PORT_SERIAL_CONTROL_REG(pd->id) & ~1);
  268. eth_pd->force_phy_addr = 1;
  269. eth_pd->phy_addr = pd->id;
  270. eth_pd->tx_queue_size = 400;
  271. eth_pd->rx_queue_size = 800;
  272. }
  273. #endif
  274. static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd)
  275. {
  276. struct mpsc_pdata *pdata;
  277. pdata = (struct mpsc_pdata *)pd->dev.platform_data;
  278. pdata->max_idle = 40;
  279. if (ppcboot_bd_valid)
  280. pdata->default_baud = ppcboot_bd.bi_baudrate;
  281. else
  282. pdata->default_baud = HDPU_DEFAULT_BAUD;
  283. pdata->brg_clk_src = HDPU_MPSC_CLK_SRC;
  284. pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ;
  285. }
  286. #if defined(CONFIG_HDPU_FEATURES)
  287. static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd)
  288. {
  289. struct platform_device *pds[1];
  290. pds[0] = pd;
  291. mv64x60_pd_fixup(&bh, pds, 1);
  292. }
  293. #endif
  294. static int __init hdpu_platform_notify(struct device *dev)
  295. {
  296. static struct {
  297. char *bus_id;
  298. void ((*rtn) (struct platform_device * pdev));
  299. } dev_map[] = {
  300. {
  301. MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata},
  302. #if defined(CONFIG_MV643XX_ETH)
  303. {
  304. MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata},
  305. #endif
  306. #if defined(CONFIG_HDPU_FEATURES)
  307. {
  308. HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata},
  309. #endif
  310. };
  311. struct platform_device *pdev;
  312. int i;
  313. if (dev && dev->bus_id)
  314. for (i = 0; i < ARRAY_SIZE(dev_map); i++)
  315. if (!strncmp(dev->bus_id, dev_map[i].bus_id,
  316. BUS_ID_SIZE)) {
  317. pdev = container_of(dev,
  318. struct platform_device,
  319. dev);
  320. dev_map[i].rtn(pdev);
  321. }
  322. return 0;
  323. }
  324. static void __init hdpu_setup_arch(void)
  325. {
  326. if (ppc_md.progress)
  327. ppc_md.progress("hdpu_setup_arch: enter", 0);
  328. #ifdef CONFIG_BLK_DEV_INITRD
  329. if (initrd_start)
  330. ROOT_DEV = Root_RAM0;
  331. else
  332. #endif
  333. #ifdef CONFIG_ROOT_NFS
  334. ROOT_DEV = Root_NFS;
  335. #else
  336. ROOT_DEV = Root_SDA2;
  337. #endif
  338. ppc_md.heartbeat = hdpu_heartbeat;
  339. ppc_md.heartbeat_reset = HZ;
  340. ppc_md.heartbeat_count = 1;
  341. if (ppc_md.progress)
  342. ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0);
  343. /* Enable L1 Parity Bits */
  344. hdpu_set_l1pe();
  345. /* Enable L2 and L3 caches (if 745x) */
  346. _set_L2CR(0x80080000);
  347. if (ppc_md.progress)
  348. ppc_md.progress("hdpu_setup_arch: enter", 0);
  349. hdpu_setup_bridge();
  350. hdpu_setup_peripherals();
  351. #ifdef CONFIG_SERIAL_MPSC_CONSOLE
  352. hdpu_early_serial_map();
  353. #endif
  354. printk("SKY HDPU Compute Blade \n");
  355. if (ppc_md.progress)
  356. ppc_md.progress("hdpu_setup_arch: exit", 0);
  357. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK);
  358. return;
  359. }
  360. static void __init hdpu_init_irq(void)
  361. {
  362. mv64360_init_irq();
  363. }
  364. static void __init hdpu_set_l1pe()
  365. {
  366. unsigned long ictrl;
  367. asm volatile ("mfspr %0, 1011":"=r" (ictrl):);
  368. ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP;
  369. asm volatile ("mtspr 1011, %0"::"r" (ictrl));
  370. }
  371. /*
  372. * Set BAT 1 to map 0xf1000000 to end of physical memory space.
  373. */
  374. static __inline__ void hdpu_set_bat(void)
  375. {
  376. mb();
  377. mtspr(SPRN_DBAT1U, 0xf10001fe);
  378. mtspr(SPRN_DBAT1L, 0xf100002a);
  379. mb();
  380. return;
  381. }
  382. unsigned long __init hdpu_find_end_of_memory(void)
  383. {
  384. return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
  385. MV64x60_TYPE_MV64360);
  386. }
  387. static void hdpu_reset_board(void)
  388. {
  389. volatile int infinite = 1;
  390. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET);
  391. local_irq_disable();
  392. /* Clear all the LEDs */
  393. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) |
  394. (1 << 5) | (1 << 6)));
  395. /* disable and invalidate the L2 cache */
  396. _set_L2CR(0);
  397. _set_L2CR(0x200000);
  398. /* flush and disable L1 I/D cache */
  399. __asm__ __volatile__
  400. ("\n"
  401. "mfspr 3,1008\n"
  402. "ori 5,5,0xcc00\n"
  403. "ori 4,3,0xc00\n"
  404. "andc 5,3,5\n"
  405. "sync\n"
  406. "mtspr 1008,4\n"
  407. "isync\n" "sync\n" "mtspr 1008,5\n" "isync\n" "sync\n");
  408. /* Hit the reset bit */
  409. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3));
  410. while (infinite)
  411. infinite = infinite;
  412. return;
  413. }
  414. static void hdpu_restart(char *cmd)
  415. {
  416. volatile ulong i = 10000000;
  417. hdpu_reset_board();
  418. while (i-- > 0) ;
  419. panic("restart failed\n");
  420. }
  421. static void hdpu_halt(void)
  422. {
  423. local_irq_disable();
  424. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT);
  425. /* Clear all the LEDs */
  426. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) |
  427. (1 << 6)));
  428. while (1) ;
  429. /* NOTREACHED */
  430. }
  431. static void hdpu_power_off(void)
  432. {
  433. hdpu_halt();
  434. /* NOTREACHED */
  435. }
  436. static int hdpu_show_cpuinfo(struct seq_file *m)
  437. {
  438. uint pvid;
  439. pvid = mfspr(SPRN_PVR);
  440. seq_printf(m, "vendor\t\t: Sky Computers\n");
  441. seq_printf(m, "machine\t\t: HDPU Compute Blade\n");
  442. seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
  443. pvid, (pvid & (1 << 15) ? "IBM" : "Motorola"));
  444. return 0;
  445. }
  446. static void __init hdpu_calibrate_decr(void)
  447. {
  448. ulong freq;
  449. if (ppcboot_bd_valid)
  450. freq = ppcboot_bd.bi_busfreq / 4;
  451. else
  452. freq = 133000000;
  453. printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
  454. freq / 1000000, freq % 1000000);
  455. tb_ticks_per_jiffy = freq / HZ;
  456. tb_to_us = mulhwu_scale_factor(freq, 1000000);
  457. return;
  458. }
  459. static void parse_bootinfo(unsigned long r3,
  460. unsigned long r4, unsigned long r5,
  461. unsigned long r6, unsigned long r7)
  462. {
  463. bd_t *bd = NULL;
  464. char *cmdline_start = NULL;
  465. int cmdline_len = 0;
  466. if (r3) {
  467. if ((r3 & 0xf0000000) == 0)
  468. r3 += KERNELBASE;
  469. if ((r3 & 0xf0000000) == KERNELBASE) {
  470. bd = (void *)r3;
  471. memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd));
  472. ppcboot_bd_valid = 1;
  473. }
  474. }
  475. #ifdef CONFIG_BLK_DEV_INITRD
  476. if (r4 && r5 && r5 > r4) {
  477. if ((r4 & 0xf0000000) == 0)
  478. r4 += KERNELBASE;
  479. if ((r5 & 0xf0000000) == 0)
  480. r5 += KERNELBASE;
  481. if ((r4 & 0xf0000000) == KERNELBASE) {
  482. initrd_start = r4;
  483. initrd_end = r5;
  484. initrd_below_start_ok = 1;
  485. }
  486. }
  487. #endif /* CONFIG_BLK_DEV_INITRD */
  488. if (r6 && r7 && r7 > r6) {
  489. if ((r6 & 0xf0000000) == 0)
  490. r6 += KERNELBASE;
  491. if ((r7 & 0xf0000000) == 0)
  492. r7 += KERNELBASE;
  493. if ((r6 & 0xf0000000) == KERNELBASE) {
  494. cmdline_start = (void *)r6;
  495. cmdline_len = (r7 - r6);
  496. strncpy(cmd_line, cmdline_start, cmdline_len);
  497. }
  498. }
  499. }
  500. #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
  501. static int hdpu_ide_check_region(ide_ioreg_t from, unsigned int extent)
  502. {
  503. return check_region(from, extent);
  504. }
  505. static void
  506. hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
  507. {
  508. request_region(from, extent, name);
  509. return;
  510. }
  511. static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent)
  512. {
  513. release_region(from, extent);
  514. return;
  515. }
  516. static void __init
  517. hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
  518. ide_ioreg_t ctrl_port, int *irq)
  519. {
  520. struct pci_dev *dev;
  521. pci_for_each_dev(dev) {
  522. if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
  523. ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
  524. hw->irq = dev->irq;
  525. if (irq != NULL) {
  526. *irq = dev->irq;
  527. }
  528. }
  529. }
  530. return;
  531. }
  532. #endif
  533. void hdpu_heartbeat(void)
  534. {
  535. if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
  536. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5));
  537. else
  538. mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5));
  539. ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
  540. }
  541. static void __init hdpu_map_io(void)
  542. {
  543. io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO);
  544. }
  545. #ifdef CONFIG_SMP
  546. char hdpu_smp0[] = "SMP Cpu #0";
  547. char hdpu_smp1[] = "SMP Cpu #1";
  548. static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id,
  549. struct pt_regs *regs)
  550. {
  551. volatile unsigned int doorbell;
  552. doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL);
  553. /* Ack the doorbell interrupts */
  554. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell);
  555. if (doorbell & 1) {
  556. smp_message_recv(0, regs);
  557. }
  558. if (doorbell & 2) {
  559. smp_message_recv(1, regs);
  560. }
  561. if (doorbell & 4) {
  562. smp_message_recv(2, regs);
  563. }
  564. if (doorbell & 8) {
  565. smp_message_recv(3, regs);
  566. }
  567. return IRQ_HANDLED;
  568. }
  569. static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id,
  570. struct pt_regs *regs)
  571. {
  572. volatile unsigned int doorbell;
  573. doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL);
  574. /* Ack the doorbell interrupts */
  575. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell);
  576. if (doorbell & 1) {
  577. smp_message_recv(0, regs);
  578. }
  579. if (doorbell & 2) {
  580. smp_message_recv(1, regs);
  581. }
  582. if (doorbell & 4) {
  583. smp_message_recv(2, regs);
  584. }
  585. if (doorbell & 8) {
  586. smp_message_recv(3, regs);
  587. }
  588. return IRQ_HANDLED;
  589. }
  590. static void smp_hdpu_CPU_two(void)
  591. {
  592. __asm__ __volatile__
  593. ("\n"
  594. "lis 3,0x0000\n"
  595. "ori 3,3,0x00c0\n"
  596. "mtspr 26, 3\n" "li 4,0\n" "mtspr 27,4\n" "rfi");
  597. }
  598. static int smp_hdpu_probe(void)
  599. {
  600. int *cpu_count_reg;
  601. int num_cpus = 0;
  602. cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE);
  603. if (cpu_count_reg) {
  604. num_cpus = (*cpu_count_reg >> 20) & 0x3;
  605. iounmap(cpu_count_reg);
  606. }
  607. /* Validate the bits in the CPLD. If we could not map the reg, return 2.
  608. * If the register reported 0 or 3, return 2.
  609. * Older CPLD revisions set these bits to all ones (val = 3).
  610. */
  611. if ((num_cpus < 1) || (num_cpus > 2)) {
  612. printk
  613. ("Unable to determine the number of processors %d . deafulting to 2.\n",
  614. num_cpus);
  615. num_cpus = 2;
  616. }
  617. return num_cpus;
  618. }
  619. static void
  620. smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait)
  621. {
  622. if (msg > 0x3) {
  623. printk("SMP %d: smp_message_pass: unknown msg %d\n",
  624. smp_processor_id(), msg);
  625. return;
  626. }
  627. switch (target) {
  628. case MSG_ALL:
  629. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  630. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  631. break;
  632. case MSG_ALL_BUT_SELF:
  633. if (smp_processor_id())
  634. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  635. else
  636. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  637. break;
  638. default:
  639. if (target == 0)
  640. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  641. else
  642. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  643. break;
  644. }
  645. }
  646. static void smp_hdpu_kick_cpu(int nr)
  647. {
  648. volatile unsigned int *bootaddr;
  649. if (ppc_md.progress)
  650. ppc_md.progress("smp_hdpu_kick_cpu", 0);
  651. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK);
  652. /* Disable BootCS. Must also reduce the windows size to zero. */
  653. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  654. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0);
  655. bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE);
  656. if (!bootaddr) {
  657. if (ppc_md.progress)
  658. ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0);
  659. return;
  660. }
  661. memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20);
  662. /* map SRAM to 0xfff00000 */
  663. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  664. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  665. 0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0);
  666. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  667. /* Enable CPU1 arbitration */
  668. mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9));
  669. /*
  670. * Wait 100mSecond until other CPU has reached __secondary_start.
  671. * When it reaches, it is permittable to rever the SRAM mapping etc...
  672. */
  673. mdelay(100);
  674. *(unsigned long *)KERNELBASE = nr;
  675. asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory");
  676. iounmap(bootaddr);
  677. /* Set up window for internal sram (256KByte insize) */
  678. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  679. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  680. HDPU_INTERNAL_SRAM_BASE,
  681. HDPU_INTERNAL_SRAM_SIZE, 0);
  682. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  683. /*
  684. * Set up windows for embedded FLASH (using boot CS window).
  685. */
  686. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  687. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  688. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  689. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  690. }
  691. static void smp_hdpu_setup_cpu(int cpu_nr)
  692. {
  693. if (cpu_nr == 0) {
  694. if (ppc_md.progress)
  695. ppc_md.progress("smp_hdpu_setup_cpu 0", 0);
  696. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
  697. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
  698. request_irq(60, hdpu_smp_cpu0_int_handler,
  699. SA_INTERRUPT, hdpu_smp0, 0);
  700. }
  701. if (cpu_nr == 1) {
  702. if (ppc_md.progress)
  703. ppc_md.progress("smp_hdpu_setup_cpu 1", 0);
  704. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR |
  705. CPUSTATE_KERNEL_CPU1_OK);
  706. /* Enable L1 Parity Bits */
  707. hdpu_set_l1pe();
  708. /* Enable L2 cache */
  709. _set_L2CR(0);
  710. _set_L2CR(0x80080000);
  711. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0);
  712. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
  713. request_irq(28, hdpu_smp_cpu1_int_handler,
  714. SA_INTERRUPT, hdpu_smp1, 0);
  715. }
  716. }
  717. void __devinit hdpu_tben_give()
  718. {
  719. volatile unsigned long *val = 0;
  720. /* By writing 0 to the TBEN_BASE, the timebases is frozen */
  721. val = ioremap(HDPU_TBEN_BASE, 4);
  722. *val = 0;
  723. mb();
  724. spin_lock(&timebase_lock);
  725. timebase_upper = get_tbu();
  726. timebase_lower = get_tbl();
  727. spin_unlock(&timebase_lock);
  728. while (timebase_upper || timebase_lower)
  729. barrier();
  730. /* By writing 1 to the TBEN_BASE, the timebases is thawed */
  731. *val = 1;
  732. mb();
  733. iounmap(val);
  734. }
  735. void __devinit hdpu_tben_take()
  736. {
  737. while (!(timebase_upper || timebase_lower))
  738. barrier();
  739. spin_lock(&timebase_lock);
  740. set_tb(timebase_upper, timebase_lower);
  741. timebase_upper = 0;
  742. timebase_lower = 0;
  743. spin_unlock(&timebase_lock);
  744. }
  745. static struct smp_ops_t hdpu_smp_ops = {
  746. .message_pass = smp_hdpu_message_pass,
  747. .probe = smp_hdpu_probe,
  748. .kick_cpu = smp_hdpu_kick_cpu,
  749. .setup_cpu = smp_hdpu_setup_cpu,
  750. .give_timebase = hdpu_tben_give,
  751. .take_timebase = hdpu_tben_take,
  752. };
  753. #endif /* CONFIG_SMP */
  754. void __init
  755. platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
  756. unsigned long r6, unsigned long r7)
  757. {
  758. parse_bootinfo(r3, r4, r5, r6, r7);
  759. isa_mem_base = 0;
  760. ppc_md.setup_arch = hdpu_setup_arch;
  761. ppc_md.init = hdpu_init2;
  762. ppc_md.show_cpuinfo = hdpu_show_cpuinfo;
  763. ppc_md.init_IRQ = hdpu_init_irq;
  764. ppc_md.get_irq = mv64360_get_irq;
  765. ppc_md.restart = hdpu_restart;
  766. ppc_md.power_off = hdpu_power_off;
  767. ppc_md.halt = hdpu_halt;
  768. ppc_md.find_end_of_memory = hdpu_find_end_of_memory;
  769. ppc_md.calibrate_decr = hdpu_calibrate_decr;
  770. ppc_md.setup_io_mappings = hdpu_map_io;
  771. bh.p_base = CONFIG_MV64X60_NEW_BASE;
  772. bh.v_base = (unsigned long *)bh.p_base;
  773. hdpu_set_bat();
  774. #if defined(CONFIG_SERIAL_TEXT_DEBUG)
  775. ppc_md.progress = hdpu_mpsc_progress; /* embedded UART */
  776. mv64x60_progress_init(bh.p_base);
  777. #endif /* CONFIG_SERIAL_TEXT_DEBUG */
  778. #ifdef CONFIG_SMP
  779. ppc_md.smp_ops = &hdpu_smp_ops;
  780. #endif /* CONFIG_SMP */
  781. #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
  782. platform_notify = hdpu_platform_notify;
  783. #endif
  784. return;
  785. }
  786. #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
  787. /* SMP safe version of the serial text debug routine. Uses Semaphore 0 */
  788. void hdpu_mpsc_progress(char *s, unsigned short hex)
  789. {
  790. while (mv64x60_read(&bh, MV64360_WHO_AM_I) !=
  791. mv64x60_read(&bh, MV64360_SEMAPHORE_0)) {
  792. }
  793. mv64x60_mpsc_progress(s, hex);
  794. mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff);
  795. }
  796. #endif
  797. static void hdpu_cpustate_set(unsigned char new_state)
  798. {
  799. unsigned int state = (new_state << 21);
  800. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21));
  801. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state);
  802. }
  803. #ifdef CONFIG_MTD_PHYSMAP
  804. static struct mtd_partition hdpu_partitions[] = {
  805. {
  806. .name = "Root FS",
  807. .size = 0x03400000,
  808. .offset = 0,
  809. .mask_flags = 0,
  810. },{
  811. .name = "User FS",
  812. .size = 0x00800000,
  813. .offset = 0x03400000,
  814. .mask_flags = 0,
  815. },{
  816. .name = "Kernel Image",
  817. .size = 0x002C0000,
  818. .offset = 0x03C00000,
  819. .mask_flags = 0,
  820. },{
  821. .name = "bootEnv",
  822. .size = 0x00040000,
  823. .offset = 0x03EC0000,
  824. .mask_flags = 0,
  825. },{
  826. .name = "bootROM",
  827. .size = 0x00100000,
  828. .offset = 0x03F00000,
  829. .mask_flags = 0,
  830. }
  831. };
  832. static int __init hdpu_setup_mtd(void)
  833. {
  834. physmap_set_partitions(hdpu_partitions, 5);
  835. return 0;
  836. }
  837. arch_initcall(hdpu_setup_mtd);
  838. #endif
  839. #ifdef CONFIG_HDPU_FEATURES
  840. static struct resource hdpu_cpustate_resources[] = {
  841. [0] = {
  842. .name = "addr base",
  843. .start = MV64x60_GPP_VALUE_SET,
  844. .end = MV64x60_GPP_VALUE_CLR + 1,
  845. .flags = IORESOURCE_MEM,
  846. },
  847. };
  848. static struct resource hdpu_nexus_resources[] = {
  849. [0] = {
  850. .name = "nexus register",
  851. .start = HDPU_NEXUS_ID_BASE,
  852. .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE,
  853. .flags = IORESOURCE_MEM,
  854. },
  855. };
  856. static struct platform_device hdpu_cpustate_device = {
  857. .name = HDPU_CPUSTATE_NAME,
  858. .id = 0,
  859. .num_resources = ARRAY_SIZE(hdpu_cpustate_resources),
  860. .resource = hdpu_cpustate_resources,
  861. };
  862. static struct platform_device hdpu_nexus_device = {
  863. .name = HDPU_NEXUS_NAME,
  864. .id = 0,
  865. .num_resources = ARRAY_SIZE(hdpu_nexus_resources),
  866. .resource = hdpu_nexus_resources,
  867. };
  868. static int __init hdpu_add_pds(void)
  869. {
  870. platform_device_register(&hdpu_cpustate_device);
  871. platform_device_register(&hdpu_nexus_device);
  872. return 0;
  873. }
  874. arch_initcall(hdpu_add_pds);
  875. #endif