hdpu.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * arch/ppc/platforms/hdpu_setup.c
  3. *
  4. * Board setup routines for the Sky Computers HDPU Compute Blade.
  5. *
  6. * Written by Brian Waite <waite@skycomputers.com>
  7. *
  8. * Based on code done by - Mark A. Greer <mgreer@mvista.com>
  9. * Rabeeh Khoury - rabeeh@galileo.co.il
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; either version 2 of the License, or (at your
  14. * option) any later version.
  15. */
  16. #include <linux/config.h>
  17. #include <linux/pci.h>
  18. #include <linux/delay.h>
  19. #include <linux/irq.h>
  20. #include <linux/ide.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/initrd.h>
  24. #include <linux/root_dev.h>
  25. #include <linux/smp.h>
  26. #include <asm/time.h>
  27. #include <asm/machdep.h>
  28. #include <asm/todc.h>
  29. #include <asm/mv64x60.h>
  30. #include <asm/ppcboot.h>
  31. #include <platforms/hdpu.h>
  32. #include <linux/mv643xx.h>
  33. #include <linux/hdpu_features.h>
  34. #include <linux/device.h>
  35. #include <linux/mtd/physmap.h>
  36. #define BOARD_VENDOR "Sky Computers"
  37. #define BOARD_MACHINE "HDPU-CB-A"
  38. bd_t ppcboot_bd;
  39. int ppcboot_bd_valid = 0;
  40. static mv64x60_handle_t bh;
  41. extern char cmd_line[];
  42. unsigned long hdpu_find_end_of_memory(void);
  43. void hdpu_mpsc_progress(char *s, unsigned short hex);
  44. void hdpu_heartbeat(void);
  45. static void parse_bootinfo(unsigned long r3,
  46. unsigned long r4, unsigned long r5,
  47. unsigned long r6, unsigned long r7);
  48. static void hdpu_set_l1pe(void);
  49. static void hdpu_cpustate_set(unsigned char new_state);
  50. #ifdef CONFIG_SMP
  51. static DEFINE_SPINLOCK(timebase_lock);
  52. static unsigned int timebase_upper = 0, timebase_lower = 0;
  53. extern int smp_tb_synchronized;
  54. void __devinit hdpu_tben_give(void);
  55. void __devinit hdpu_tben_take(void);
  56. #endif
  57. static int __init
  58. hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
  59. {
  60. struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
  61. if (hose->index == 0) {
  62. static char pci_irq_table[][4] = {
  63. {HDPU_PCI_0_IRQ, 0, 0, 0},
  64. {HDPU_PCI_0_IRQ, 0, 0, 0},
  65. };
  66. const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4;
  67. return PCI_IRQ_TABLE_LOOKUP;
  68. } else {
  69. static char pci_irq_table[][4] = {
  70. {HDPU_PCI_1_IRQ, 0, 0, 0},
  71. };
  72. const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
  73. return PCI_IRQ_TABLE_LOOKUP;
  74. }
  75. }
  76. static void __init hdpu_intr_setup(void)
  77. {
  78. mv64x60_write(&bh, MV64x60_GPP_IO_CNTL,
  79. (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
  80. (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) |
  81. (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) |
  82. (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) |
  83. (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29)));
  84. /* XXXX Erranum FEr PCI-#8 */
  85. mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9));
  86. mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9));
  87. /*
  88. * Dismiss and then enable interrupt on GPP interrupt cause
  89. * for CPU #0
  90. */
  91. mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13)));
  92. mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13));
  93. /*
  94. * Dismiss and then enable interrupt on CPU #0 high cause reg
  95. * BIT25 summarizes GPP interrupts 8-15
  96. */
  97. mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25));
  98. }
  99. static void __init hdpu_setup_peripherals(void)
  100. {
  101. unsigned int val;
  102. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  103. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  104. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  105. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
  106. HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0);
  107. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN);
  108. mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
  109. HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0);
  110. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
  111. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  112. HDPU_INTERNAL_SRAM_BASE,
  113. HDPU_INTERNAL_SRAM_SIZE, 0);
  114. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  115. bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN);
  116. mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0);
  117. mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3));
  118. mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3));
  119. mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL,
  120. ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24)));
  121. /* Enable pipelining */
  122. mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13));
  123. /* Enable Snoop Pipelineing */
  124. mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24));
  125. /*
  126. * Change DRAM read buffer assignment.
  127. * Assign read buffer 0 dedicated only for CPU,
  128. * and the rest read buffer 1.
  129. */
  130. val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG);
  131. val = val & 0x03ffffff;
  132. val = val | 0xf8000000;
  133. mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val);
  134. /*
  135. * Configure internal SRAM -
  136. * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set
  137. * Parity enabled.
  138. * Parity error propagation
  139. * Arbitration not parked for CPU only
  140. * Other bits are reserved.
  141. */
  142. #ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT
  143. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2);
  144. #else
  145. mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0);
  146. #endif
  147. hdpu_intr_setup();
  148. }
  149. static void __init hdpu_setup_bridge(void)
  150. {
  151. struct mv64x60_setup_info si;
  152. int i;
  153. memset(&si, 0, sizeof(si));
  154. si.phys_reg_base = HDPU_BRIDGE_REG_BASE;
  155. si.pci_0.enable_bus = 1;
  156. si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR;
  157. si.pci_0.pci_io.pci_base_hi = 0;
  158. si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR;
  159. si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE;
  160. si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  161. si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR;
  162. si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR;
  163. si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR;
  164. si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE;
  165. si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  166. si.pci_0.pci_cmd_bits = 0;
  167. si.pci_0.latency_timer = 0x80;
  168. si.pci_1.enable_bus = 1;
  169. si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR;
  170. si.pci_1.pci_io.pci_base_hi = 0;
  171. si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR;
  172. si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE;
  173. si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
  174. si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR;
  175. si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR;
  176. si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR;
  177. si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE;
  178. si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
  179. si.pci_1.pci_cmd_bits = 0;
  180. si.pci_1.latency_timer = 0x80;
  181. for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) {
  182. #if defined(CONFIG_NOT_COHERENT_CACHE)
  183. si.cpu_prot_options[i] = 0;
  184. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE;
  185. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE;
  186. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE;
  187. si.pci_1.acc_cntl_options[i] =
  188. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  189. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  190. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  191. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  192. si.pci_0.acc_cntl_options[i] =
  193. MV64360_PCI_ACC_CNTL_SNOOP_NONE |
  194. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  195. MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
  196. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  197. #else
  198. si.cpu_prot_options[i] = 0;
  199. si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB; /* errata */
  200. si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB; /* errata */
  201. si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB; /* errata */
  202. si.pci_0.acc_cntl_options[i] =
  203. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  204. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  205. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  206. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  207. si.pci_1.acc_cntl_options[i] =
  208. MV64360_PCI_ACC_CNTL_SNOOP_WB |
  209. MV64360_PCI_ACC_CNTL_SWAP_NONE |
  210. MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
  211. MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
  212. #endif
  213. }
  214. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI);
  215. /* Lookup PCI host bridges */
  216. mv64x60_init(&bh, &si);
  217. pci_dram_offset = 0; /* System mem at same addr on PCI & cpu bus */
  218. ppc_md.pci_swizzle = common_swizzle;
  219. ppc_md.pci_map_irq = hdpu_map_irq;
  220. mv64x60_set_bus(&bh, 0, 0);
  221. bh.hose_a->first_busno = 0;
  222. bh.hose_a->last_busno = 0xff;
  223. bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0);
  224. bh.hose_b->first_busno = bh.hose_a->last_busno + 1;
  225. mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno);
  226. bh.hose_b->last_busno = 0xff;
  227. bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b,
  228. bh.hose_b->first_busno);
  229. ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
  230. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG);
  231. /*
  232. * Enabling of PCI internal-vs-external arbitration
  233. * is a platform- and errata-dependent decision.
  234. */
  235. return;
  236. }
  237. #if defined(CONFIG_SERIAL_MPSC_CONSOLE)
  238. static void __init hdpu_early_serial_map(void)
  239. {
  240. #ifdef CONFIG_KGDB
  241. static char first_time = 1;
  242. #if defined(CONFIG_KGDB_TTYS0)
  243. #define KGDB_PORT 0
  244. #elif defined(CONFIG_KGDB_TTYS1)
  245. #define KGDB_PORT 1
  246. #else
  247. #error "Invalid kgdb_tty port"
  248. #endif
  249. if (first_time) {
  250. gt_early_mpsc_init(KGDB_PORT,
  251. B9600 | CS8 | CREAD | HUPCL | CLOCAL);
  252. first_time = 0;
  253. }
  254. return;
  255. #endif
  256. }
  257. #endif
  258. static void hdpu_init2(void)
  259. {
  260. return;
  261. }
  262. #if defined(CONFIG_MV643XX_ETH)
  263. static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
  264. {
  265. struct mv643xx_eth_platform_data *eth_pd;
  266. eth_pd = pd->dev.platform_data;
  267. eth_pd->port_serial_control =
  268. mv64x60_read(&bh, MV643XX_ETH_PORT_SERIAL_CONTROL_REG(pd->id) & ~1);
  269. eth_pd->force_phy_addr = 1;
  270. eth_pd->phy_addr = pd->id;
  271. eth_pd->tx_queue_size = 400;
  272. eth_pd->rx_queue_size = 800;
  273. }
  274. #endif
  275. static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd)
  276. {
  277. struct mpsc_pdata *pdata;
  278. pdata = (struct mpsc_pdata *)pd->dev.platform_data;
  279. pdata->max_idle = 40;
  280. if (ppcboot_bd_valid)
  281. pdata->default_baud = ppcboot_bd.bi_baudrate;
  282. else
  283. pdata->default_baud = HDPU_DEFAULT_BAUD;
  284. pdata->brg_clk_src = HDPU_MPSC_CLK_SRC;
  285. pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ;
  286. }
  287. #if defined(CONFIG_HDPU_FEATURES)
  288. static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd)
  289. {
  290. struct platform_device *pds[1];
  291. pds[0] = pd;
  292. mv64x60_pd_fixup(&bh, pds, 1);
  293. }
  294. #endif
  295. static int __init hdpu_platform_notify(struct device *dev)
  296. {
  297. static struct {
  298. char *bus_id;
  299. void ((*rtn) (struct platform_device * pdev));
  300. } dev_map[] = {
  301. {
  302. MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata},
  303. #if defined(CONFIG_MV643XX_ETH)
  304. {
  305. MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata},
  306. #endif
  307. #if defined(CONFIG_HDPU_FEATURES)
  308. {
  309. HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata},
  310. #endif
  311. };
  312. struct platform_device *pdev;
  313. int i;
  314. if (dev && dev->bus_id)
  315. for (i = 0; i < ARRAY_SIZE(dev_map); i++)
  316. if (!strncmp(dev->bus_id, dev_map[i].bus_id,
  317. BUS_ID_SIZE)) {
  318. pdev = container_of(dev,
  319. struct platform_device,
  320. dev);
  321. dev_map[i].rtn(pdev);
  322. }
  323. return 0;
  324. }
  325. static void __init hdpu_setup_arch(void)
  326. {
  327. if (ppc_md.progress)
  328. ppc_md.progress("hdpu_setup_arch: enter", 0);
  329. #ifdef CONFIG_BLK_DEV_INITRD
  330. if (initrd_start)
  331. ROOT_DEV = Root_RAM0;
  332. else
  333. #endif
  334. #ifdef CONFIG_ROOT_NFS
  335. ROOT_DEV = Root_NFS;
  336. #else
  337. ROOT_DEV = Root_SDA2;
  338. #endif
  339. ppc_md.heartbeat = hdpu_heartbeat;
  340. ppc_md.heartbeat_reset = HZ;
  341. ppc_md.heartbeat_count = 1;
  342. if (ppc_md.progress)
  343. ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0);
  344. /* Enable L1 Parity Bits */
  345. hdpu_set_l1pe();
  346. /* Enable L2 and L3 caches (if 745x) */
  347. _set_L2CR(0x80080000);
  348. if (ppc_md.progress)
  349. ppc_md.progress("hdpu_setup_arch: enter", 0);
  350. hdpu_setup_bridge();
  351. hdpu_setup_peripherals();
  352. #ifdef CONFIG_SERIAL_MPSC_CONSOLE
  353. hdpu_early_serial_map();
  354. #endif
  355. printk("SKY HDPU Compute Blade \n");
  356. if (ppc_md.progress)
  357. ppc_md.progress("hdpu_setup_arch: exit", 0);
  358. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK);
  359. return;
  360. }
  361. static void __init hdpu_init_irq(void)
  362. {
  363. mv64360_init_irq();
  364. }
  365. static void __init hdpu_set_l1pe()
  366. {
  367. unsigned long ictrl;
  368. asm volatile ("mfspr %0, 1011":"=r" (ictrl):);
  369. ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP;
  370. asm volatile ("mtspr 1011, %0"::"r" (ictrl));
  371. }
  372. /*
  373. * Set BAT 1 to map 0xf1000000 to end of physical memory space.
  374. */
  375. static __inline__ void hdpu_set_bat(void)
  376. {
  377. mb();
  378. mtspr(SPRN_DBAT1U, 0xf10001fe);
  379. mtspr(SPRN_DBAT1L, 0xf100002a);
  380. mb();
  381. return;
  382. }
  383. unsigned long __init hdpu_find_end_of_memory(void)
  384. {
  385. return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
  386. MV64x60_TYPE_MV64360);
  387. }
  388. static void hdpu_reset_board(void)
  389. {
  390. volatile int infinite = 1;
  391. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET);
  392. local_irq_disable();
  393. /* Clear all the LEDs */
  394. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) |
  395. (1 << 5) | (1 << 6)));
  396. /* disable and invalidate the L2 cache */
  397. _set_L2CR(0);
  398. _set_L2CR(0x200000);
  399. /* flush and disable L1 I/D cache */
  400. __asm__ __volatile__
  401. ("\n"
  402. "mfspr 3,1008\n"
  403. "ori 5,5,0xcc00\n"
  404. "ori 4,3,0xc00\n"
  405. "andc 5,3,5\n"
  406. "sync\n"
  407. "mtspr 1008,4\n"
  408. "isync\n" "sync\n" "mtspr 1008,5\n" "isync\n" "sync\n");
  409. /* Hit the reset bit */
  410. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3));
  411. while (infinite)
  412. infinite = infinite;
  413. return;
  414. }
  415. static void hdpu_restart(char *cmd)
  416. {
  417. volatile ulong i = 10000000;
  418. hdpu_reset_board();
  419. while (i-- > 0) ;
  420. panic("restart failed\n");
  421. }
  422. static void hdpu_halt(void)
  423. {
  424. local_irq_disable();
  425. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT);
  426. /* Clear all the LEDs */
  427. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) |
  428. (1 << 6)));
  429. while (1) ;
  430. /* NOTREACHED */
  431. }
  432. static void hdpu_power_off(void)
  433. {
  434. hdpu_halt();
  435. /* NOTREACHED */
  436. }
  437. static int hdpu_show_cpuinfo(struct seq_file *m)
  438. {
  439. uint pvid;
  440. pvid = mfspr(SPRN_PVR);
  441. seq_printf(m, "vendor\t\t: Sky Computers\n");
  442. seq_printf(m, "machine\t\t: HDPU Compute Blade\n");
  443. seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
  444. pvid, (pvid & (1 << 15) ? "IBM" : "Motorola"));
  445. return 0;
  446. }
  447. static void __init hdpu_calibrate_decr(void)
  448. {
  449. ulong freq;
  450. if (ppcboot_bd_valid)
  451. freq = ppcboot_bd.bi_busfreq / 4;
  452. else
  453. freq = 133000000;
  454. printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
  455. freq / 1000000, freq % 1000000);
  456. tb_ticks_per_jiffy = freq / HZ;
  457. tb_to_us = mulhwu_scale_factor(freq, 1000000);
  458. return;
  459. }
  460. static void parse_bootinfo(unsigned long r3,
  461. unsigned long r4, unsigned long r5,
  462. unsigned long r6, unsigned long r7)
  463. {
  464. bd_t *bd = NULL;
  465. char *cmdline_start = NULL;
  466. int cmdline_len = 0;
  467. if (r3) {
  468. if ((r3 & 0xf0000000) == 0)
  469. r3 += KERNELBASE;
  470. if ((r3 & 0xf0000000) == KERNELBASE) {
  471. bd = (void *)r3;
  472. memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd));
  473. ppcboot_bd_valid = 1;
  474. }
  475. }
  476. #ifdef CONFIG_BLK_DEV_INITRD
  477. if (r4 && r5 && r5 > r4) {
  478. if ((r4 & 0xf0000000) == 0)
  479. r4 += KERNELBASE;
  480. if ((r5 & 0xf0000000) == 0)
  481. r5 += KERNELBASE;
  482. if ((r4 & 0xf0000000) == KERNELBASE) {
  483. initrd_start = r4;
  484. initrd_end = r5;
  485. initrd_below_start_ok = 1;
  486. }
  487. }
  488. #endif /* CONFIG_BLK_DEV_INITRD */
  489. if (r6 && r7 && r7 > r6) {
  490. if ((r6 & 0xf0000000) == 0)
  491. r6 += KERNELBASE;
  492. if ((r7 & 0xf0000000) == 0)
  493. r7 += KERNELBASE;
  494. if ((r6 & 0xf0000000) == KERNELBASE) {
  495. cmdline_start = (void *)r6;
  496. cmdline_len = (r7 - r6);
  497. strncpy(cmd_line, cmdline_start, cmdline_len);
  498. }
  499. }
  500. }
  501. #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
  502. static void
  503. hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
  504. {
  505. request_region(from, extent, name);
  506. return;
  507. }
  508. static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent)
  509. {
  510. release_region(from, extent);
  511. return;
  512. }
  513. static void __init
  514. hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
  515. ide_ioreg_t ctrl_port, int *irq)
  516. {
  517. struct pci_dev *dev;
  518. pci_for_each_dev(dev) {
  519. if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
  520. ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
  521. hw->irq = dev->irq;
  522. if (irq != NULL) {
  523. *irq = dev->irq;
  524. }
  525. }
  526. }
  527. return;
  528. }
  529. #endif
  530. void hdpu_heartbeat(void)
  531. {
  532. if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
  533. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5));
  534. else
  535. mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5));
  536. ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
  537. }
  538. static void __init hdpu_map_io(void)
  539. {
  540. io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO);
  541. }
  542. #ifdef CONFIG_SMP
  543. char hdpu_smp0[] = "SMP Cpu #0";
  544. char hdpu_smp1[] = "SMP Cpu #1";
  545. static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id,
  546. struct pt_regs *regs)
  547. {
  548. volatile unsigned int doorbell;
  549. doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL);
  550. /* Ack the doorbell interrupts */
  551. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell);
  552. if (doorbell & 1) {
  553. smp_message_recv(0, regs);
  554. }
  555. if (doorbell & 2) {
  556. smp_message_recv(1, regs);
  557. }
  558. if (doorbell & 4) {
  559. smp_message_recv(2, regs);
  560. }
  561. if (doorbell & 8) {
  562. smp_message_recv(3, regs);
  563. }
  564. return IRQ_HANDLED;
  565. }
  566. static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id,
  567. struct pt_regs *regs)
  568. {
  569. volatile unsigned int doorbell;
  570. doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL);
  571. /* Ack the doorbell interrupts */
  572. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell);
  573. if (doorbell & 1) {
  574. smp_message_recv(0, regs);
  575. }
  576. if (doorbell & 2) {
  577. smp_message_recv(1, regs);
  578. }
  579. if (doorbell & 4) {
  580. smp_message_recv(2, regs);
  581. }
  582. if (doorbell & 8) {
  583. smp_message_recv(3, regs);
  584. }
  585. return IRQ_HANDLED;
  586. }
  587. static void smp_hdpu_CPU_two(void)
  588. {
  589. __asm__ __volatile__
  590. ("\n"
  591. "lis 3,0x0000\n"
  592. "ori 3,3,0x00c0\n"
  593. "mtspr 26, 3\n" "li 4,0\n" "mtspr 27,4\n" "rfi");
  594. }
  595. static int smp_hdpu_probe(void)
  596. {
  597. int *cpu_count_reg;
  598. int num_cpus = 0;
  599. cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE);
  600. if (cpu_count_reg) {
  601. num_cpus = (*cpu_count_reg >> 20) & 0x3;
  602. iounmap(cpu_count_reg);
  603. }
  604. /* Validate the bits in the CPLD. If we could not map the reg, return 2.
  605. * If the register reported 0 or 3, return 2.
  606. * Older CPLD revisions set these bits to all ones (val = 3).
  607. */
  608. if ((num_cpus < 1) || (num_cpus > 2)) {
  609. printk
  610. ("Unable to determine the number of processors %d . deafulting to 2.\n",
  611. num_cpus);
  612. num_cpus = 2;
  613. }
  614. return num_cpus;
  615. }
  616. static void
  617. smp_hdpu_message_pass(int target, int msg)
  618. {
  619. if (msg > 0x3) {
  620. printk("SMP %d: smp_message_pass: unknown msg %d\n",
  621. smp_processor_id(), msg);
  622. return;
  623. }
  624. switch (target) {
  625. case MSG_ALL:
  626. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  627. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  628. break;
  629. case MSG_ALL_BUT_SELF:
  630. if (smp_processor_id())
  631. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  632. else
  633. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  634. break;
  635. default:
  636. if (target == 0)
  637. mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
  638. else
  639. mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
  640. break;
  641. }
  642. }
  643. static void smp_hdpu_kick_cpu(int nr)
  644. {
  645. volatile unsigned int *bootaddr;
  646. if (ppc_md.progress)
  647. ppc_md.progress("smp_hdpu_kick_cpu", 0);
  648. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK);
  649. /* Disable BootCS. Must also reduce the windows size to zero. */
  650. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  651. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0);
  652. bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE);
  653. if (!bootaddr) {
  654. if (ppc_md.progress)
  655. ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0);
  656. return;
  657. }
  658. memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20);
  659. /* map SRAM to 0xfff00000 */
  660. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  661. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  662. 0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0);
  663. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  664. /* Enable CPU1 arbitration */
  665. mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9));
  666. /*
  667. * Wait 100mSecond until other CPU has reached __secondary_start.
  668. * When it reaches, it is permittable to rever the SRAM mapping etc...
  669. */
  670. mdelay(100);
  671. *(unsigned long *)KERNELBASE = nr;
  672. asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory");
  673. iounmap(bootaddr);
  674. /* Set up window for internal sram (256KByte insize) */
  675. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  676. mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
  677. HDPU_INTERNAL_SRAM_BASE,
  678. HDPU_INTERNAL_SRAM_SIZE, 0);
  679. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
  680. /*
  681. * Set up windows for embedded FLASH (using boot CS window).
  682. */
  683. bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  684. mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
  685. HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
  686. bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
  687. }
  688. static void smp_hdpu_setup_cpu(int cpu_nr)
  689. {
  690. if (cpu_nr == 0) {
  691. if (ppc_md.progress)
  692. ppc_md.progress("smp_hdpu_setup_cpu 0", 0);
  693. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
  694. mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
  695. request_irq(60, hdpu_smp_cpu0_int_handler,
  696. SA_INTERRUPT, hdpu_smp0, 0);
  697. }
  698. if (cpu_nr == 1) {
  699. if (ppc_md.progress)
  700. ppc_md.progress("smp_hdpu_setup_cpu 1", 0);
  701. hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR |
  702. CPUSTATE_KERNEL_CPU1_OK);
  703. /* Enable L1 Parity Bits */
  704. hdpu_set_l1pe();
  705. /* Enable L2 cache */
  706. _set_L2CR(0);
  707. _set_L2CR(0x80080000);
  708. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0);
  709. mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
  710. request_irq(28, hdpu_smp_cpu1_int_handler,
  711. SA_INTERRUPT, hdpu_smp1, 0);
  712. }
  713. }
  714. void __devinit hdpu_tben_give()
  715. {
  716. volatile unsigned long *val = 0;
  717. /* By writing 0 to the TBEN_BASE, the timebases is frozen */
  718. val = ioremap(HDPU_TBEN_BASE, 4);
  719. *val = 0;
  720. mb();
  721. spin_lock(&timebase_lock);
  722. timebase_upper = get_tbu();
  723. timebase_lower = get_tbl();
  724. spin_unlock(&timebase_lock);
  725. while (timebase_upper || timebase_lower)
  726. barrier();
  727. /* By writing 1 to the TBEN_BASE, the timebases is thawed */
  728. *val = 1;
  729. mb();
  730. iounmap(val);
  731. }
  732. void __devinit hdpu_tben_take()
  733. {
  734. while (!(timebase_upper || timebase_lower))
  735. barrier();
  736. spin_lock(&timebase_lock);
  737. set_tb(timebase_upper, timebase_lower);
  738. timebase_upper = 0;
  739. timebase_lower = 0;
  740. spin_unlock(&timebase_lock);
  741. }
  742. static struct smp_ops_t hdpu_smp_ops = {
  743. .message_pass = smp_hdpu_message_pass,
  744. .probe = smp_hdpu_probe,
  745. .kick_cpu = smp_hdpu_kick_cpu,
  746. .setup_cpu = smp_hdpu_setup_cpu,
  747. .give_timebase = hdpu_tben_give,
  748. .take_timebase = hdpu_tben_take,
  749. };
  750. #endif /* CONFIG_SMP */
  751. void __init
  752. platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
  753. unsigned long r6, unsigned long r7)
  754. {
  755. parse_bootinfo(r3, r4, r5, r6, r7);
  756. isa_mem_base = 0;
  757. ppc_md.setup_arch = hdpu_setup_arch;
  758. ppc_md.init = hdpu_init2;
  759. ppc_md.show_cpuinfo = hdpu_show_cpuinfo;
  760. ppc_md.init_IRQ = hdpu_init_irq;
  761. ppc_md.get_irq = mv64360_get_irq;
  762. ppc_md.restart = hdpu_restart;
  763. ppc_md.power_off = hdpu_power_off;
  764. ppc_md.halt = hdpu_halt;
  765. ppc_md.find_end_of_memory = hdpu_find_end_of_memory;
  766. ppc_md.calibrate_decr = hdpu_calibrate_decr;
  767. ppc_md.setup_io_mappings = hdpu_map_io;
  768. bh.p_base = CONFIG_MV64X60_NEW_BASE;
  769. bh.v_base = (unsigned long *)bh.p_base;
  770. hdpu_set_bat();
  771. #if defined(CONFIG_SERIAL_TEXT_DEBUG)
  772. ppc_md.progress = hdpu_mpsc_progress; /* embedded UART */
  773. mv64x60_progress_init(bh.p_base);
  774. #endif /* CONFIG_SERIAL_TEXT_DEBUG */
  775. #ifdef CONFIG_SMP
  776. smp_ops = &hdpu_smp_ops;
  777. #endif /* CONFIG_SMP */
  778. #if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
  779. platform_notify = hdpu_platform_notify;
  780. #endif
  781. return;
  782. }
  783. #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
  784. /* SMP safe version of the serial text debug routine. Uses Semaphore 0 */
  785. void hdpu_mpsc_progress(char *s, unsigned short hex)
  786. {
  787. while (mv64x60_read(&bh, MV64360_WHO_AM_I) !=
  788. mv64x60_read(&bh, MV64360_SEMAPHORE_0)) {
  789. }
  790. mv64x60_mpsc_progress(s, hex);
  791. mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff);
  792. }
  793. #endif
  794. static void hdpu_cpustate_set(unsigned char new_state)
  795. {
  796. unsigned int state = (new_state << 21);
  797. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21));
  798. mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state);
  799. }
  800. #ifdef CONFIG_MTD_PHYSMAP
  801. static struct mtd_partition hdpu_partitions[] = {
  802. {
  803. .name = "Root FS",
  804. .size = 0x03400000,
  805. .offset = 0,
  806. .mask_flags = 0,
  807. },{
  808. .name = "User FS",
  809. .size = 0x00800000,
  810. .offset = 0x03400000,
  811. .mask_flags = 0,
  812. },{
  813. .name = "Kernel Image",
  814. .size = 0x002C0000,
  815. .offset = 0x03C00000,
  816. .mask_flags = 0,
  817. },{
  818. .name = "bootEnv",
  819. .size = 0x00040000,
  820. .offset = 0x03EC0000,
  821. .mask_flags = 0,
  822. },{
  823. .name = "bootROM",
  824. .size = 0x00100000,
  825. .offset = 0x03F00000,
  826. .mask_flags = 0,
  827. }
  828. };
  829. static int __init hdpu_setup_mtd(void)
  830. {
  831. physmap_set_partitions(hdpu_partitions, 5);
  832. return 0;
  833. }
  834. arch_initcall(hdpu_setup_mtd);
  835. #endif
  836. #ifdef CONFIG_HDPU_FEATURES
  837. static struct resource hdpu_cpustate_resources[] = {
  838. [0] = {
  839. .name = "addr base",
  840. .start = MV64x60_GPP_VALUE_SET,
  841. .end = MV64x60_GPP_VALUE_CLR + 1,
  842. .flags = IORESOURCE_MEM,
  843. },
  844. };
  845. static struct resource hdpu_nexus_resources[] = {
  846. [0] = {
  847. .name = "nexus register",
  848. .start = HDPU_NEXUS_ID_BASE,
  849. .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE,
  850. .flags = IORESOURCE_MEM,
  851. },
  852. };
  853. static struct platform_device hdpu_cpustate_device = {
  854. .name = HDPU_CPUSTATE_NAME,
  855. .id = 0,
  856. .num_resources = ARRAY_SIZE(hdpu_cpustate_resources),
  857. .resource = hdpu_cpustate_resources,
  858. };
  859. static struct platform_device hdpu_nexus_device = {
  860. .name = HDPU_NEXUS_NAME,
  861. .id = 0,
  862. .num_resources = ARRAY_SIZE(hdpu_nexus_resources),
  863. .resource = hdpu_nexus_resources,
  864. };
  865. static int __init hdpu_add_pds(void)
  866. {
  867. platform_device_register(&hdpu_cpustate_device);
  868. platform_device_register(&hdpu_nexus_device);
  869. return 0;
  870. }
  871. arch_initcall(hdpu_add_pds);
  872. #endif