setup.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2007 Cavium Networks
  7. * Copyright (C) 2008, 2009 Wind River Systems
  8. * written by Ralf Baechle <ralf@linux-mips.org>
  9. */
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/console.h>
  13. #include <linux/delay.h>
  14. #include <linux/export.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/serial.h>
  18. #include <linux/smp.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h> /* for memset */
  21. #include <linux/tty.h>
  22. #include <linux/time.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/serial_core.h>
  25. #include <linux/serial_8250.h>
  26. #include <linux/of_fdt.h>
  27. #include <linux/libfdt.h>
  28. #include <linux/kexec.h>
  29. #include <asm/processor.h>
  30. #include <asm/reboot.h>
  31. #include <asm/smp-ops.h>
  32. #include <asm/irq_cpu.h>
  33. #include <asm/mipsregs.h>
  34. #include <asm/bootinfo.h>
  35. #include <asm/sections.h>
  36. #include <asm/time.h>
  37. #include <asm/octeon/octeon.h>
  38. #include <asm/octeon/pci-octeon.h>
  39. #include <asm/octeon/cvmx-mio-defs.h>
  40. extern struct plat_smp_ops octeon_smp_ops;
  41. #ifdef CONFIG_PCI
  42. extern void pci_console_init(const char *arg);
  43. #endif
  44. static unsigned long long MAX_MEMORY = 512ull << 20;
  45. struct octeon_boot_descriptor *octeon_boot_desc_ptr;
  46. struct cvmx_bootinfo *octeon_bootinfo;
  47. EXPORT_SYMBOL(octeon_bootinfo);
  48. static unsigned long long RESERVE_LOW_MEM = 0ull;
  49. #ifdef CONFIG_KEXEC
  50. #ifdef CONFIG_SMP
  51. /*
  52. * Wait for relocation code is prepared and send
  53. * secondary CPUs to spin until kernel is relocated.
  54. */
  55. static void octeon_kexec_smp_down(void *ignored)
  56. {
  57. int cpu = smp_processor_id();
  58. local_irq_disable();
  59. set_cpu_online(cpu, false);
  60. while (!atomic_read(&kexec_ready_to_reboot))
  61. cpu_relax();
  62. asm volatile (
  63. " sync \n"
  64. " synci ($0) \n");
  65. relocated_kexec_smp_wait(NULL);
  66. }
  67. #endif
  68. #define OCTEON_DDR0_BASE (0x0ULL)
  69. #define OCTEON_DDR0_SIZE (0x010000000ULL)
  70. #define OCTEON_DDR1_BASE (0x410000000ULL)
  71. #define OCTEON_DDR1_SIZE (0x010000000ULL)
  72. #define OCTEON_DDR2_BASE (0x020000000ULL)
  73. #define OCTEON_DDR2_SIZE (0x3e0000000ULL)
  74. #define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
  75. static struct kimage *kimage_ptr;
  76. static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
  77. {
  78. int64_t addr;
  79. struct cvmx_bootmem_desc *bootmem_desc;
  80. bootmem_desc = cvmx_bootmem_get_desc();
  81. if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
  82. mem_size = OCTEON_MAX_PHY_MEM_SIZE;
  83. pr_err("Error: requested memory too large,"
  84. "truncating to maximum size\n");
  85. }
  86. bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
  87. bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
  88. addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
  89. bootmem_desc->head_addr = 0;
  90. if (mem_size <= OCTEON_DDR0_SIZE) {
  91. __cvmx_bootmem_phy_free(addr,
  92. mem_size - RESERVE_LOW_MEM -
  93. low_reserved_bytes, 0);
  94. return;
  95. }
  96. __cvmx_bootmem_phy_free(addr,
  97. OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
  98. low_reserved_bytes, 0);
  99. mem_size -= OCTEON_DDR0_SIZE;
  100. if (mem_size > OCTEON_DDR1_SIZE) {
  101. __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
  102. __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
  103. mem_size - OCTEON_DDR1_SIZE, 0);
  104. } else
  105. __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
  106. }
  107. static int octeon_kexec_prepare(struct kimage *image)
  108. {
  109. int i;
  110. char *bootloader = "kexec";
  111. octeon_boot_desc_ptr->argc = 0;
  112. for (i = 0; i < image->nr_segments; i++) {
  113. if (!strncmp(bootloader, (char *)image->segment[i].buf,
  114. strlen(bootloader))) {
  115. /*
  116. * convert command line string to array
  117. * of parameters (as bootloader does).
  118. */
  119. int argc = 0, offt;
  120. char *str = (char *)image->segment[i].buf;
  121. char *ptr = strchr(str, ' ');
  122. while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
  123. *ptr = '\0';
  124. if (ptr[1] != ' ') {
  125. offt = (int)(ptr - str + 1);
  126. octeon_boot_desc_ptr->argv[argc] =
  127. image->segment[i].mem + offt;
  128. argc++;
  129. }
  130. ptr = strchr(ptr + 1, ' ');
  131. }
  132. octeon_boot_desc_ptr->argc = argc;
  133. break;
  134. }
  135. }
  136. /*
  137. * Information about segments will be needed during pre-boot memory
  138. * initialization.
  139. */
  140. kimage_ptr = image;
  141. return 0;
  142. }
  143. static void octeon_generic_shutdown(void)
  144. {
  145. int i;
  146. #ifdef CONFIG_SMP
  147. int cpu;
  148. #endif
  149. struct cvmx_bootmem_desc *bootmem_desc;
  150. void *named_block_array_ptr;
  151. bootmem_desc = cvmx_bootmem_get_desc();
  152. named_block_array_ptr =
  153. cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
  154. #ifdef CONFIG_SMP
  155. /* disable watchdogs */
  156. for_each_online_cpu(cpu)
  157. cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
  158. #else
  159. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  160. #endif
  161. if (kimage_ptr != kexec_crash_image) {
  162. memset(named_block_array_ptr,
  163. 0x0,
  164. CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
  165. sizeof(struct cvmx_bootmem_named_block_desc));
  166. /*
  167. * Mark all memory (except low 0x100000 bytes) as free.
  168. * It is the same thing that bootloader does.
  169. */
  170. kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
  171. 0x100000);
  172. /*
  173. * Allocate all segments to avoid their corruption during boot.
  174. */
  175. for (i = 0; i < kimage_ptr->nr_segments; i++)
  176. cvmx_bootmem_alloc_address(
  177. kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
  178. kimage_ptr->segment[i].mem - PAGE_SIZE,
  179. PAGE_SIZE);
  180. } else {
  181. /*
  182. * Do not mark all memory as free. Free only named sections
  183. * leaving the rest of memory unchanged.
  184. */
  185. struct cvmx_bootmem_named_block_desc *ptr =
  186. (struct cvmx_bootmem_named_block_desc *)
  187. named_block_array_ptr;
  188. for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
  189. if (ptr[i].size)
  190. cvmx_bootmem_free_named(ptr[i].name);
  191. }
  192. kexec_args[2] = 1UL; /* running on octeon_main_processor */
  193. kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
  194. #ifdef CONFIG_SMP
  195. secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
  196. secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
  197. #endif
  198. }
  199. static void octeon_shutdown(void)
  200. {
  201. octeon_generic_shutdown();
  202. #ifdef CONFIG_SMP
  203. smp_call_function(octeon_kexec_smp_down, NULL, 0);
  204. smp_wmb();
  205. while (num_online_cpus() > 1) {
  206. cpu_relax();
  207. mdelay(1);
  208. }
  209. #endif
  210. }
  211. static void octeon_crash_shutdown(struct pt_regs *regs)
  212. {
  213. octeon_generic_shutdown();
  214. default_machine_crash_shutdown(regs);
  215. }
  216. #endif /* CONFIG_KEXEC */
  217. #ifdef CONFIG_CAVIUM_RESERVE32
  218. uint64_t octeon_reserve32_memory;
  219. EXPORT_SYMBOL(octeon_reserve32_memory);
  220. #endif
  221. #ifdef CONFIG_KEXEC
  222. /* crashkernel cmdline parameter is parsed _after_ memory setup
  223. * we also parse it here (workaround for EHB5200) */
  224. static uint64_t crashk_size, crashk_base;
  225. #endif
  226. static int octeon_uart;
  227. extern asmlinkage void handle_int(void);
  228. extern asmlinkage void plat_irq_dispatch(void);
  229. /**
  230. * Return non zero if we are currently running in the Octeon simulator
  231. *
  232. * Returns
  233. */
  234. int octeon_is_simulation(void)
  235. {
  236. return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
  237. }
  238. EXPORT_SYMBOL(octeon_is_simulation);
  239. /**
  240. * Return true if Octeon is in PCI Host mode. This means
  241. * Linux can control the PCI bus.
  242. *
  243. * Returns Non zero if Octeon in host mode.
  244. */
  245. int octeon_is_pci_host(void)
  246. {
  247. #ifdef CONFIG_PCI
  248. return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
  249. #else
  250. return 0;
  251. #endif
  252. }
  253. /**
  254. * Get the clock rate of Octeon
  255. *
  256. * Returns Clock rate in HZ
  257. */
  258. uint64_t octeon_get_clock_rate(void)
  259. {
  260. struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
  261. return sysinfo->cpu_clock_hz;
  262. }
  263. EXPORT_SYMBOL(octeon_get_clock_rate);
  264. static u64 octeon_io_clock_rate;
  265. u64 octeon_get_io_clock_rate(void)
  266. {
  267. return octeon_io_clock_rate;
  268. }
  269. EXPORT_SYMBOL(octeon_get_io_clock_rate);
  270. /**
  271. * Write to the LCD display connected to the bootbus. This display
  272. * exists on most Cavium evaluation boards. If it doesn't exist, then
  273. * this function doesn't do anything.
  274. *
  275. * @s: String to write
  276. */
  277. void octeon_write_lcd(const char *s)
  278. {
  279. if (octeon_bootinfo->led_display_base_addr) {
  280. void __iomem *lcd_address =
  281. ioremap_nocache(octeon_bootinfo->led_display_base_addr,
  282. 8);
  283. int i;
  284. for (i = 0; i < 8; i++, s++) {
  285. if (*s)
  286. iowrite8(*s, lcd_address + i);
  287. else
  288. iowrite8(' ', lcd_address + i);
  289. }
  290. iounmap(lcd_address);
  291. }
  292. }
  293. /**
  294. * Return the console uart passed by the bootloader
  295. *
  296. * Returns uart (0 or 1)
  297. */
  298. int octeon_get_boot_uart(void)
  299. {
  300. int uart;
  301. #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
  302. uart = 1;
  303. #else
  304. uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
  305. 1 : 0;
  306. #endif
  307. return uart;
  308. }
  309. /**
  310. * Get the coremask Linux was booted on.
  311. *
  312. * Returns Core mask
  313. */
  314. int octeon_get_boot_coremask(void)
  315. {
  316. return octeon_boot_desc_ptr->core_mask;
  317. }
  318. /**
  319. * Check the hardware BIST results for a CPU
  320. */
  321. void octeon_check_cpu_bist(void)
  322. {
  323. const int coreid = cvmx_get_core_num();
  324. unsigned long long mask;
  325. unsigned long long bist_val;
  326. /* Check BIST results for COP0 registers */
  327. mask = 0x1f00000000ull;
  328. bist_val = read_octeon_c0_icacheerr();
  329. if (bist_val & mask)
  330. pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
  331. coreid, bist_val);
  332. bist_val = read_octeon_c0_dcacheerr();
  333. if (bist_val & 1)
  334. pr_err("Core%d L1 Dcache parity error: "
  335. "CacheErr(dcache) = 0x%llx\n",
  336. coreid, bist_val);
  337. mask = 0xfc00000000000000ull;
  338. bist_val = read_c0_cvmmemctl();
  339. if (bist_val & mask)
  340. pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
  341. coreid, bist_val);
  342. write_octeon_c0_dcacheerr(0);
  343. }
  344. /**
  345. * Reboot Octeon
  346. *
  347. * @command: Command to pass to the bootloader. Currently ignored.
  348. */
  349. static void octeon_restart(char *command)
  350. {
  351. /* Disable all watchdogs before soft reset. They don't get cleared */
  352. #ifdef CONFIG_SMP
  353. int cpu;
  354. for_each_online_cpu(cpu)
  355. cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
  356. #else
  357. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  358. #endif
  359. mb();
  360. while (1)
  361. cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
  362. }
  363. /**
  364. * Permanently stop a core.
  365. *
  366. * @arg: Ignored.
  367. */
  368. static void octeon_kill_core(void *arg)
  369. {
  370. if (octeon_is_simulation())
  371. /* A break instruction causes the simulator stop a core */
  372. asm volatile ("break" ::: "memory");
  373. local_irq_disable();
  374. /* Disable watchdog on this core. */
  375. cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
  376. /* Spin in a low power mode. */
  377. while (true)
  378. asm volatile ("wait" ::: "memory");
  379. }
  380. /**
  381. * Halt the system
  382. */
  383. static void octeon_halt(void)
  384. {
  385. smp_call_function(octeon_kill_core, NULL, 0);
  386. switch (octeon_bootinfo->board_type) {
  387. case CVMX_BOARD_TYPE_NAO38:
  388. /* Driving a 1 to GPIO 12 shuts off this board */
  389. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
  390. cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
  391. break;
  392. default:
  393. octeon_write_lcd("PowerOff");
  394. break;
  395. }
  396. octeon_kill_core(NULL);
  397. }
  398. /**
  399. * Return a string representing the system type
  400. *
  401. * Returns
  402. */
  403. const char *octeon_board_type_string(void)
  404. {
  405. static char name[80];
  406. sprintf(name, "%s (%s)",
  407. cvmx_board_type_to_string(octeon_bootinfo->board_type),
  408. octeon_model_get_string(read_c0_prid()));
  409. return name;
  410. }
  411. const char *get_system_type(void)
  412. __attribute__ ((alias("octeon_board_type_string")));
  413. void octeon_user_io_init(void)
  414. {
  415. union octeon_cvmemctl cvmmemctl;
  416. union cvmx_iob_fau_timeout fau_timeout;
  417. union cvmx_pow_nw_tim nm_tim;
  418. /* Get the current settings for CP0_CVMMEMCTL_REG */
  419. cvmmemctl.u64 = read_c0_cvmmemctl();
  420. /* R/W If set, marked write-buffer entries time out the same
  421. * as as other entries; if clear, marked write-buffer entries
  422. * use the maximum timeout. */
  423. cvmmemctl.s.dismarkwblongto = 1;
  424. /* R/W If set, a merged store does not clear the write-buffer
  425. * entry timeout state. */
  426. cvmmemctl.s.dismrgclrwbto = 0;
  427. /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
  428. * word location for an IOBDMA. The other 8 bits come from the
  429. * SCRADDR field of the IOBDMA. */
  430. cvmmemctl.s.iobdmascrmsb = 0;
  431. /* R/W If set, SYNCWS and SYNCS only order marked stores; if
  432. * clear, SYNCWS and SYNCS only order unmarked
  433. * stores. SYNCWSMARKED has no effect when DISSYNCWS is
  434. * set. */
  435. cvmmemctl.s.syncwsmarked = 0;
  436. /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
  437. cvmmemctl.s.dissyncws = 0;
  438. /* R/W If set, no stall happens on write buffer full. */
  439. if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
  440. cvmmemctl.s.diswbfst = 1;
  441. else
  442. cvmmemctl.s.diswbfst = 0;
  443. /* R/W If set (and SX set), supervisor-level loads/stores can
  444. * use XKPHYS addresses with <48>==0 */
  445. cvmmemctl.s.xkmemenas = 0;
  446. /* R/W If set (and UX set), user-level loads/stores can use
  447. * XKPHYS addresses with VA<48>==0 */
  448. cvmmemctl.s.xkmemenau = 0;
  449. /* R/W If set (and SX set), supervisor-level loads/stores can
  450. * use XKPHYS addresses with VA<48>==1 */
  451. cvmmemctl.s.xkioenas = 0;
  452. /* R/W If set (and UX set), user-level loads/stores can use
  453. * XKPHYS addresses with VA<48>==1 */
  454. cvmmemctl.s.xkioenau = 0;
  455. /* R/W If set, all stores act as SYNCW (NOMERGE must be set
  456. * when this is set) RW, reset to 0. */
  457. cvmmemctl.s.allsyncw = 0;
  458. /* R/W If set, no stores merge, and all stores reach the
  459. * coherent bus in order. */
  460. cvmmemctl.s.nomerge = 0;
  461. /* R/W Selects the bit in the counter used for DID time-outs 0
  462. * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
  463. * between 1x and 2x this interval. For example, with
  464. * DIDTTO=3, expiration interval is between 16K and 32K. */
  465. cvmmemctl.s.didtto = 0;
  466. /* R/W If set, the (mem) CSR clock never turns off. */
  467. cvmmemctl.s.csrckalwys = 0;
  468. /* R/W If set, mclk never turns off. */
  469. cvmmemctl.s.mclkalwys = 0;
  470. /* R/W Selects the bit in the counter used for write buffer
  471. * flush time-outs (WBFLT+11) is the bit position in an
  472. * internal counter used to determine expiration. The write
  473. * buffer expires between 1x and 2x this interval. For
  474. * example, with WBFLT = 0, a write buffer expires between 2K
  475. * and 4K cycles after the write buffer entry is allocated. */
  476. cvmmemctl.s.wbfltime = 0;
  477. /* R/W If set, do not put Istream in the L2 cache. */
  478. cvmmemctl.s.istrnol2 = 0;
  479. /*
  480. * R/W The write buffer threshold. As per erratum Core-14752
  481. * for CN63XX, a sc/scd might fail if the write buffer is
  482. * full. Lowering WBTHRESH greatly lowers the chances of the
  483. * write buffer ever being full and triggering the erratum.
  484. */
  485. if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
  486. cvmmemctl.s.wbthresh = 4;
  487. else
  488. cvmmemctl.s.wbthresh = 10;
  489. /* R/W If set, CVMSEG is available for loads/stores in
  490. * kernel/debug mode. */
  491. #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
  492. cvmmemctl.s.cvmsegenak = 1;
  493. #else
  494. cvmmemctl.s.cvmsegenak = 0;
  495. #endif
  496. /* R/W If set, CVMSEG is available for loads/stores in
  497. * supervisor mode. */
  498. cvmmemctl.s.cvmsegenas = 0;
  499. /* R/W If set, CVMSEG is available for loads/stores in user
  500. * mode. */
  501. cvmmemctl.s.cvmsegenau = 0;
  502. /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
  503. * is max legal value. */
  504. cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
  505. write_c0_cvmmemctl(cvmmemctl.u64);
  506. if (smp_processor_id() == 0)
  507. pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
  508. CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
  509. CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
  510. /* Set a default for the hardware timeouts */
  511. fau_timeout.u64 = 0;
  512. fau_timeout.s.tout_val = 0xfff;
  513. /* Disable tagwait FAU timeout */
  514. fau_timeout.s.tout_enb = 0;
  515. cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
  516. nm_tim.u64 = 0;
  517. /* 4096 cycles */
  518. nm_tim.s.nw_tim = 3;
  519. cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
  520. write_octeon_c0_icacheerr(0);
  521. write_c0_derraddr1(0);
  522. }
  523. /**
  524. * Early entry point for arch setup
  525. */
  526. void __init prom_init(void)
  527. {
  528. struct cvmx_sysinfo *sysinfo;
  529. const char *arg;
  530. char *p;
  531. int i;
  532. int argc;
  533. #ifdef CONFIG_CAVIUM_RESERVE32
  534. int64_t addr = -1;
  535. #endif
  536. /*
  537. * The bootloader passes a pointer to the boot descriptor in
  538. * $a3, this is available as fw_arg3.
  539. */
  540. octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
  541. octeon_bootinfo =
  542. cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
  543. cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
  544. sysinfo = cvmx_sysinfo_get();
  545. memset(sysinfo, 0, sizeof(*sysinfo));
  546. sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
  547. sysinfo->phy_mem_desc_ptr =
  548. cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
  549. sysinfo->core_mask = octeon_bootinfo->core_mask;
  550. sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
  551. sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
  552. sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
  553. sysinfo->board_type = octeon_bootinfo->board_type;
  554. sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
  555. sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
  556. memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
  557. sizeof(sysinfo->mac_addr_base));
  558. sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
  559. memcpy(sysinfo->board_serial_number,
  560. octeon_bootinfo->board_serial_number,
  561. sizeof(sysinfo->board_serial_number));
  562. sysinfo->compact_flash_common_base_addr =
  563. octeon_bootinfo->compact_flash_common_base_addr;
  564. sysinfo->compact_flash_attribute_base_addr =
  565. octeon_bootinfo->compact_flash_attribute_base_addr;
  566. sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
  567. sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
  568. sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
  569. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  570. /* I/O clock runs at a different rate than the CPU. */
  571. union cvmx_mio_rst_boot rst_boot;
  572. rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
  573. octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
  574. } else {
  575. octeon_io_clock_rate = sysinfo->cpu_clock_hz;
  576. }
  577. /*
  578. * Only enable the LED controller if we're running on a CN38XX, CN58XX,
  579. * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
  580. */
  581. if (!octeon_is_simulation() &&
  582. octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
  583. cvmx_write_csr(CVMX_LED_EN, 0);
  584. cvmx_write_csr(CVMX_LED_PRT, 0);
  585. cvmx_write_csr(CVMX_LED_DBG, 0);
  586. cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
  587. cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
  588. cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
  589. cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
  590. cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
  591. cvmx_write_csr(CVMX_LED_EN, 1);
  592. }
  593. #ifdef CONFIG_CAVIUM_RESERVE32
  594. /*
  595. * We need to temporarily allocate all memory in the reserve32
  596. * region. This makes sure the kernel doesn't allocate this
  597. * memory when it is getting memory from the
  598. * bootloader. Later, after the memory allocations are
  599. * complete, the reserve32 will be freed.
  600. *
  601. * Allocate memory for RESERVED32 aligned on 2MB boundary. This
  602. * is in case we later use hugetlb entries with it.
  603. */
  604. addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
  605. 0, 0, 2 << 20,
  606. "CAVIUM_RESERVE32", 0);
  607. if (addr < 0)
  608. pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
  609. else
  610. octeon_reserve32_memory = addr;
  611. #endif
  612. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
  613. if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
  614. pr_info("Skipping L2 locking due to reduced L2 cache size\n");
  615. } else {
  616. uint32_t ebase = read_c0_ebase() & 0x3ffff000;
  617. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
  618. /* TLB refill */
  619. cvmx_l2c_lock_mem_region(ebase, 0x100);
  620. #endif
  621. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
  622. /* General exception */
  623. cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
  624. #endif
  625. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
  626. /* Interrupt handler */
  627. cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
  628. #endif
  629. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
  630. cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
  631. cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
  632. #endif
  633. #ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
  634. cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
  635. #endif
  636. }
  637. #endif
  638. octeon_check_cpu_bist();
  639. octeon_uart = octeon_get_boot_uart();
  640. #ifdef CONFIG_SMP
  641. octeon_write_lcd("LinuxSMP");
  642. #else
  643. octeon_write_lcd("Linux");
  644. #endif
  645. #ifdef CONFIG_CAVIUM_GDB
  646. /*
  647. * When debugging the linux kernel, force the cores to enter
  648. * the debug exception handler to break in.
  649. */
  650. if (octeon_get_boot_debug_flag()) {
  651. cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
  652. cvmx_read_csr(CVMX_CIU_DINT);
  653. }
  654. #endif
  655. octeon_setup_delays();
  656. /*
  657. * BIST should always be enabled when doing a soft reset. L2
  658. * Cache locking for instance is not cleared unless BIST is
  659. * enabled. Unfortunately due to a chip errata G-200 for
  660. * Cn38XX and CN31XX, BIST msut be disabled on these parts.
  661. */
  662. if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
  663. OCTEON_IS_MODEL(OCTEON_CN31XX))
  664. cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
  665. else
  666. cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
  667. /* Default to 64MB in the simulator to speed things up */
  668. if (octeon_is_simulation())
  669. MAX_MEMORY = 64ull << 20;
  670. arg = strstr(arcs_cmdline, "mem=");
  671. if (arg) {
  672. MAX_MEMORY = memparse(arg + 4, &p);
  673. if (MAX_MEMORY == 0)
  674. MAX_MEMORY = 32ull << 30;
  675. if (*p == '@')
  676. RESERVE_LOW_MEM = memparse(p + 1, &p);
  677. }
  678. arcs_cmdline[0] = 0;
  679. argc = octeon_boot_desc_ptr->argc;
  680. for (i = 0; i < argc; i++) {
  681. const char *arg =
  682. cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
  683. if ((strncmp(arg, "MEM=", 4) == 0) ||
  684. (strncmp(arg, "mem=", 4) == 0)) {
  685. MAX_MEMORY = memparse(arg + 4, &p);
  686. if (MAX_MEMORY == 0)
  687. MAX_MEMORY = 32ull << 30;
  688. if (*p == '@')
  689. RESERVE_LOW_MEM = memparse(p + 1, &p);
  690. } else if (strcmp(arg, "ecc_verbose") == 0) {
  691. #ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
  692. __cvmx_interrupt_ecc_report_single_bit_errors = 1;
  693. pr_notice("Reporting of single bit ECC errors is "
  694. "turned on\n");
  695. #endif
  696. #ifdef CONFIG_KEXEC
  697. } else if (strncmp(arg, "crashkernel=", 12) == 0) {
  698. crashk_size = memparse(arg+12, &p);
  699. if (*p == '@')
  700. crashk_base = memparse(p+1, &p);
  701. strcat(arcs_cmdline, " ");
  702. strcat(arcs_cmdline, arg);
  703. /*
  704. * To do: switch parsing to new style, something like:
  705. * parse_crashkernel(arg, sysinfo->system_dram_size,
  706. * &crashk_size, &crashk_base);
  707. */
  708. #endif
  709. } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
  710. sizeof(arcs_cmdline) - 1) {
  711. strcat(arcs_cmdline, " ");
  712. strcat(arcs_cmdline, arg);
  713. }
  714. }
  715. if (strstr(arcs_cmdline, "console=") == NULL) {
  716. #ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
  717. strcat(arcs_cmdline, " console=ttyS0,115200");
  718. #else
  719. if (octeon_uart == 1)
  720. strcat(arcs_cmdline, " console=ttyS1,115200");
  721. else
  722. strcat(arcs_cmdline, " console=ttyS0,115200");
  723. #endif
  724. }
  725. if (octeon_is_simulation()) {
  726. /*
  727. * The simulator uses a mtdram device pre filled with
  728. * the filesystem. Also specify the calibration delay
  729. * to avoid calculating it every time.
  730. */
  731. strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824");
  732. }
  733. mips_hpt_frequency = octeon_get_clock_rate();
  734. octeon_init_cvmcount();
  735. _machine_restart = octeon_restart;
  736. _machine_halt = octeon_halt;
  737. #ifdef CONFIG_KEXEC
  738. _machine_kexec_shutdown = octeon_shutdown;
  739. _machine_crash_shutdown = octeon_crash_shutdown;
  740. _machine_kexec_prepare = octeon_kexec_prepare;
  741. #endif
  742. octeon_user_io_init();
  743. register_smp_ops(&octeon_smp_ops);
  744. }
  745. /* Exclude a single page from the regions obtained in plat_mem_setup. */
  746. #ifndef CONFIG_CRASH_DUMP
  747. static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
  748. {
  749. if (addr > *mem && addr < *mem + *size) {
  750. u64 inc = addr - *mem;
  751. add_memory_region(*mem, inc, BOOT_MEM_RAM);
  752. *mem += inc;
  753. *size -= inc;
  754. }
  755. if (addr == *mem && *size > PAGE_SIZE) {
  756. *mem += PAGE_SIZE;
  757. *size -= PAGE_SIZE;
  758. }
  759. }
  760. #endif /* CONFIG_CRASH_DUMP */
  761. void __init plat_mem_setup(void)
  762. {
  763. uint64_t mem_alloc_size;
  764. uint64_t total;
  765. uint64_t crashk_end;
  766. #ifndef CONFIG_CRASH_DUMP
  767. int64_t memory;
  768. uint64_t kernel_start;
  769. uint64_t kernel_size;
  770. #endif
  771. total = 0;
  772. crashk_end = 0;
  773. /*
  774. * The Mips memory init uses the first memory location for
  775. * some memory vectors. When SPARSEMEM is in use, it doesn't
  776. * verify that the size is big enough for the final
  777. * vectors. Making the smallest chuck 4MB seems to be enough
  778. * to consistently work.
  779. */
  780. mem_alloc_size = 4 << 20;
  781. if (mem_alloc_size > MAX_MEMORY)
  782. mem_alloc_size = MAX_MEMORY;
  783. /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
  784. #ifdef CONFIG_CRASH_DUMP
  785. add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
  786. total += MAX_MEMORY;
  787. #else
  788. #ifdef CONFIG_KEXEC
  789. if (crashk_size > 0) {
  790. add_memory_region(crashk_base, crashk_size, BOOT_MEM_RAM);
  791. crashk_end = crashk_base + crashk_size;
  792. }
  793. #endif
  794. /*
  795. * When allocating memory, we want incrementing addresses from
  796. * bootmem_alloc so the code in add_memory_region can merge
  797. * regions next to each other.
  798. */
  799. cvmx_bootmem_lock();
  800. while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
  801. && (total < MAX_MEMORY)) {
  802. memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
  803. __pa_symbol(&__init_end), -1,
  804. 0x100000,
  805. CVMX_BOOTMEM_FLAG_NO_LOCKING);
  806. if (memory >= 0) {
  807. u64 size = mem_alloc_size;
  808. #ifdef CONFIG_KEXEC
  809. uint64_t end;
  810. #endif
  811. /*
  812. * exclude a page at the beginning and end of
  813. * the 256MB PCIe 'hole' so the kernel will not
  814. * try to allocate multi-page buffers that
  815. * span the discontinuity.
  816. */
  817. memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
  818. &memory, &size);
  819. memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
  820. CVMX_PCIE_BAR1_PHYS_SIZE,
  821. &memory, &size);
  822. #ifdef CONFIG_KEXEC
  823. end = memory + mem_alloc_size;
  824. /*
  825. * This function automatically merges address regions
  826. * next to each other if they are received in
  827. * incrementing order
  828. */
  829. if (memory < crashk_base && end > crashk_end) {
  830. /* region is fully in */
  831. add_memory_region(memory,
  832. crashk_base - memory,
  833. BOOT_MEM_RAM);
  834. total += crashk_base - memory;
  835. add_memory_region(crashk_end,
  836. end - crashk_end,
  837. BOOT_MEM_RAM);
  838. total += end - crashk_end;
  839. continue;
  840. }
  841. if (memory >= crashk_base && end <= crashk_end)
  842. /*
  843. * Entire memory region is within the new
  844. * kernel's memory, ignore it.
  845. */
  846. continue;
  847. if (memory > crashk_base && memory < crashk_end &&
  848. end > crashk_end) {
  849. /*
  850. * Overlap with the beginning of the region,
  851. * reserve the beginning.
  852. */
  853. mem_alloc_size -= crashk_end - memory;
  854. memory = crashk_end;
  855. } else if (memory < crashk_base && end > crashk_base &&
  856. end < crashk_end)
  857. /*
  858. * Overlap with the beginning of the region,
  859. * chop of end.
  860. */
  861. mem_alloc_size -= end - crashk_base;
  862. #endif
  863. add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
  864. total += mem_alloc_size;
  865. /* Recovering mem_alloc_size */
  866. mem_alloc_size = 4 << 20;
  867. } else {
  868. break;
  869. }
  870. }
  871. cvmx_bootmem_unlock();
  872. /* Add the memory region for the kernel. */
  873. kernel_start = (unsigned long) _text;
  874. kernel_size = ALIGN(_end - _text, 0x100000);
  875. /* Adjust for physical offset. */
  876. kernel_start &= ~0xffffffff80000000ULL;
  877. add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
  878. #endif /* CONFIG_CRASH_DUMP */
  879. #ifdef CONFIG_CAVIUM_RESERVE32
  880. /*
  881. * Now that we've allocated the kernel memory it is safe to
  882. * free the reserved region. We free it here so that builtin
  883. * drivers can use the memory.
  884. */
  885. if (octeon_reserve32_memory)
  886. cvmx_bootmem_free_named("CAVIUM_RESERVE32");
  887. #endif /* CONFIG_CAVIUM_RESERVE32 */
  888. if (total == 0)
  889. panic("Unable to allocate memory from "
  890. "cvmx_bootmem_phy_alloc\n");
  891. }
  892. /*
  893. * Emit one character to the boot UART. Exported for use by the
  894. * watchdog timer.
  895. */
  896. int prom_putchar(char c)
  897. {
  898. uint64_t lsrval;
  899. /* Spin until there is room */
  900. do {
  901. lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
  902. } while ((lsrval & 0x20) == 0);
  903. /* Write the byte */
  904. cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
  905. return 1;
  906. }
  907. EXPORT_SYMBOL(prom_putchar);
  908. void prom_free_prom_memory(void)
  909. {
  910. if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
  911. /* Check for presence of Core-14449 fix. */
  912. u32 insn;
  913. u32 *foo;
  914. foo = &insn;
  915. asm volatile("# before" : : : "memory");
  916. prefetch(foo);
  917. asm volatile(
  918. ".set push\n\t"
  919. ".set noreorder\n\t"
  920. "bal 1f\n\t"
  921. "nop\n"
  922. "1:\tlw %0,-12($31)\n\t"
  923. ".set pop\n\t"
  924. : "=r" (insn) : : "$31", "memory");
  925. if ((insn >> 26) != 0x33)
  926. panic("No PREF instruction at Core-14449 probe point.");
  927. if (((insn >> 16) & 0x1f) != 28)
  928. panic("Core-14449 WAR not in place (%04x).\n"
  929. "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
  930. }
  931. }
  932. int octeon_prune_device_tree(void);
  933. extern const char __dtb_octeon_3xxx_begin;
  934. extern const char __dtb_octeon_3xxx_end;
  935. extern const char __dtb_octeon_68xx_begin;
  936. extern const char __dtb_octeon_68xx_end;
  937. void __init device_tree_init(void)
  938. {
  939. int dt_size;
  940. struct boot_param_header *fdt;
  941. bool do_prune;
  942. if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
  943. fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
  944. if (fdt_check_header(fdt))
  945. panic("Corrupt Device Tree passed to kernel.");
  946. dt_size = be32_to_cpu(fdt->totalsize);
  947. do_prune = false;
  948. } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
  949. fdt = (struct boot_param_header *)&__dtb_octeon_68xx_begin;
  950. dt_size = &__dtb_octeon_68xx_end - &__dtb_octeon_68xx_begin;
  951. do_prune = true;
  952. } else {
  953. fdt = (struct boot_param_header *)&__dtb_octeon_3xxx_begin;
  954. dt_size = &__dtb_octeon_3xxx_end - &__dtb_octeon_3xxx_begin;
  955. do_prune = true;
  956. }
  957. /* Copy the default tree from init memory. */
  958. initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
  959. if (initial_boot_params == NULL)
  960. panic("Could not allocate initial_boot_params\n");
  961. memcpy(initial_boot_params, fdt, dt_size);
  962. if (do_prune) {
  963. octeon_prune_device_tree();
  964. pr_info("Using internal Device Tree.\n");
  965. } else {
  966. pr_info("Using passed Device Tree.\n");
  967. }
  968. unflatten_device_tree();
  969. }
  970. static int __initdata disable_octeon_edac_p;
  971. static int __init disable_octeon_edac(char *str)
  972. {
  973. disable_octeon_edac_p = 1;
  974. return 0;
  975. }
  976. early_param("disable_octeon_edac", disable_octeon_edac);
  977. static char *edac_device_names[] = {
  978. "octeon_l2c_edac",
  979. "octeon_pc_edac",
  980. };
  981. static int __init edac_devinit(void)
  982. {
  983. struct platform_device *dev;
  984. int i, err = 0;
  985. int num_lmc;
  986. char *name;
  987. if (disable_octeon_edac_p)
  988. return 0;
  989. for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
  990. name = edac_device_names[i];
  991. dev = platform_device_register_simple(name, -1, NULL, 0);
  992. if (IS_ERR(dev)) {
  993. pr_err("Registation of %s failed!\n", name);
  994. err = PTR_ERR(dev);
  995. }
  996. }
  997. num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
  998. (OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
  999. for (i = 0; i < num_lmc; i++) {
  1000. dev = platform_device_register_simple("octeon_lmc_edac",
  1001. i, NULL, 0);
  1002. if (IS_ERR(dev)) {
  1003. pr_err("Registation of octeon_lmc_edac %d failed!\n", i);
  1004. err = PTR_ERR(dev);
  1005. }
  1006. }
  1007. return err;
  1008. }
  1009. device_initcall(edac_devinit);