head.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2007-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * MMU code derived from arch/ppc/kernel/head_4xx.S:
  7. * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
  8. * Initial PowerPC version.
  9. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
  10. * Rewritten for PReP
  11. * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  12. * Low-level exception handers, MMU support, and rewrite.
  13. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  14. * PowerPC 8xx modifications.
  15. * Copyright (c) 1998-1999 TiVo, Inc.
  16. * PowerPC 403GCX modifications.
  17. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  18. * PowerPC 403GCX/405GP modifications.
  19. * Copyright 2000 MontaVista Software Inc.
  20. * PPC405 modifications
  21. * PowerPC 403GCX/405GP modifications.
  22. * Author: MontaVista Software, Inc.
  23. * frank_rowand@mvista.com or source@mvista.com
  24. * debbie_chu@mvista.com
  25. *
  26. * This file is subject to the terms and conditions of the GNU General Public
  27. * License. See the file "COPYING" in the main directory of this archive
  28. * for more details.
  29. */
  30. #include <linux/init.h>
  31. #include <linux/linkage.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/page.h>
  34. #include <linux/of_fdt.h> /* for OF_DT_HEADER */
  35. #ifdef CONFIG_MMU
  36. #include <asm/setup.h> /* COMMAND_LINE_SIZE */
  37. #include <asm/mmu.h>
  38. #include <asm/processor.h>
  39. .section .data
  40. .global empty_zero_page
  41. .align 12
  42. empty_zero_page:
  43. .space PAGE_SIZE
  44. .global swapper_pg_dir
  45. swapper_pg_dir:
  46. .space PAGE_SIZE
  47. #endif /* CONFIG_MMU */
  48. .section .rodata
  49. .align 4
  50. endian_check:
  51. .word 1
  52. __HEAD
  53. ENTRY(_start)
  54. #if CONFIG_KERNEL_BASE_ADDR == 0
  55. brai TOPHYS(real_start)
  56. .org 0x100
  57. real_start:
  58. #endif
  59. mts rmsr, r0
  60. /*
  61. * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
  62. * if the msrclr instruction is not enabled. We use this to detect
  63. * if the opcode is available, by issuing msrclr and then testing the result.
  64. * r8 == 0 - msr instructions are implemented
  65. * r8 != 0 - msr instructions are not implemented
  66. */
  67. mfs r1, rmsr
  68. msrclr r8, 0 /* clear nothing - just read msr for test */
  69. cmpu r8, r8, r1 /* r1 must contain msr reg content */
  70. /* r7 may point to an FDT, or there may be one linked in.
  71. if it's in r7, we've got to save it away ASAP.
  72. We ensure r7 points to a valid FDT, just in case the bootloader
  73. is broken or non-existent */
  74. beqi r7, no_fdt_arg /* NULL pointer? don't copy */
  75. /* Does r7 point to a valid FDT? Load HEADER magic number */
  76. /* Run time Big/Little endian platform */
  77. /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
  78. lbui r11, r0, TOPHYS(endian_check)
  79. beqid r11, big_endian /* DO NOT break delay stop dependency */
  80. lw r11, r0, r7 /* Big endian load in delay slot */
  81. lwr r11, r0, r7 /* Little endian load */
  82. big_endian:
  83. rsubi r11, r11, OF_DT_HEADER /* Check FDT header */
  84. beqi r11, _prepare_copy_fdt
  85. or r7, r0, r0 /* clear R7 when not valid DTB */
  86. bnei r11, no_fdt_arg /* No - get out of here */
  87. _prepare_copy_fdt:
  88. or r11, r0, r0 /* incremment */
  89. ori r4, r0, TOPHYS(_fdt_start)
  90. ori r3, r0, (0x8000 - 4)
  91. _copy_fdt:
  92. lw r12, r7, r11 /* r12 = r7 + r11 */
  93. sw r12, r4, r11 /* addr[r4 + r11] = r12 */
  94. addik r11, r11, 4 /* increment counting */
  95. bgtid r3, _copy_fdt /* loop for all entries */
  96. addik r3, r3, -4 /* descrement loop */
  97. no_fdt_arg:
  98. #ifdef CONFIG_MMU
  99. #ifndef CONFIG_CMDLINE_BOOL
  100. /*
  101. * handling command line
  102. * copy command line directly to cmd_line placed in data section.
  103. */
  104. beqid r5, skip /* Skip if NULL pointer */
  105. or r6, r0, r0 /* incremment */
  106. ori r4, r0, cmd_line /* load address of command line */
  107. tophys(r4,r4) /* convert to phys address */
  108. ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
  109. _copy_command_line:
  110. /* r2=r5+r6 - r5 contain pointer to command line */
  111. lbu r2, r5, r6
  112. beqid r2, skip /* Skip if no data */
  113. sb r2, r4, r6 /* addr[r4+r6]= r2*/
  114. addik r6, r6, 1 /* increment counting */
  115. bgtid r3, _copy_command_line /* loop for all entries */
  116. addik r3, r3, -1 /* decrement loop */
  117. addik r5, r4, 0 /* add new space for command line */
  118. tovirt(r5,r5)
  119. skip:
  120. #endif /* CONFIG_CMDLINE_BOOL */
  121. #ifdef NOT_COMPILE
  122. /* save bram context */
  123. or r6, r0, r0 /* incremment */
  124. ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
  125. ori r3, r0, (LMB_SIZE - 4)
  126. _copy_bram:
  127. lw r7, r0, r6 /* r7 = r0 + r6 */
  128. sw r7, r4, r6 /* addr[r4 + r6] = r7*/
  129. addik r6, r6, 4 /* increment counting */
  130. bgtid r3, _copy_bram /* loop for all entries */
  131. addik r3, r3, -4 /* descrement loop */
  132. #endif
  133. /* We have to turn on the MMU right away. */
  134. /*
  135. * Set up the initial MMU state so we can do the first level of
  136. * kernel initialization. This maps the first 16 MBytes of memory 1:1
  137. * virtual to physical.
  138. */
  139. nop
  140. addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
  141. _invalidate:
  142. mts rtlbx, r3
  143. mts rtlbhi, r0 /* flush: ensure V is clear */
  144. mts rtlblo, r0
  145. bgtid r3, _invalidate /* loop for all entries */
  146. addik r3, r3, -1
  147. /* sync */
  148. /* Setup the kernel PID */
  149. mts rpid,r0 /* Load the kernel PID */
  150. nop
  151. bri 4
  152. /*
  153. * We should still be executing code at physical address area
  154. * RAM_BASEADDR at this point. However, kernel code is at
  155. * a virtual address. So, set up a TLB mapping to cover this once
  156. * translation is enabled.
  157. */
  158. addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
  159. tophys(r4,r3) /* Load the kernel physical address */
  160. /* start to do TLB calculation */
  161. addik r12, r0, _end
  162. rsub r12, r3, r12
  163. addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */
  164. or r9, r0, r0 /* TLB0 = 0 */
  165. or r10, r0, r0 /* TLB1 = 0 */
  166. addik r11, r12, -0x1000000
  167. bgei r11, GT16 /* size is greater than 16MB */
  168. addik r11, r12, -0x0800000
  169. bgei r11, GT8 /* size is greater than 8MB */
  170. addik r11, r12, -0x0400000
  171. bgei r11, GT4 /* size is greater than 4MB */
  172. /* size is less than 4MB */
  173. addik r11, r12, -0x0200000
  174. bgei r11, GT2 /* size is greater than 2MB */
  175. addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
  176. addik r11, r12, -0x0100000
  177. bgei r11, GT1 /* size is greater than 1MB */
  178. /* TLB1 is 0 which is setup above */
  179. bri tlb_end
  180. GT4: /* r11 contains the rest - will be either 1 or 4 */
  181. ori r9, r0, 0x400000 /* TLB0 is 4MB */
  182. bri TLB1
  183. GT16: /* TLB0 is 16MB */
  184. addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
  185. TLB1:
  186. /* must be used r2 because of substract if failed */
  187. addik r2, r11, -0x0400000
  188. bgei r2, GT20 /* size is greater than 16MB */
  189. /* size is >16MB and <20MB */
  190. addik r11, r11, -0x0100000
  191. bgei r11, GT17 /* size is greater than 17MB */
  192. /* kernel is >16MB and < 17MB */
  193. GT1:
  194. addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
  195. bri tlb_end
  196. GT2: /* TLB0 is 0 and TLB1 will be 4MB */
  197. GT17: /* TLB1 is 4MB - kernel size <20MB */
  198. addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
  199. bri tlb_end
  200. GT8: /* TLB0 is still zero that's why I can use only TLB1 */
  201. GT20: /* TLB1 is 16MB - kernel size >20MB */
  202. addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
  203. tlb_end:
  204. /*
  205. * Configure and load two entries into TLB slots 0 and 1.
  206. * In case we are pinning TLBs, these are reserved in by the
  207. * other TLB functions. If not reserving, then it doesn't
  208. * matter where they are loaded.
  209. */
  210. andi r4,r4,0xfffffc00 /* Mask off the real page number */
  211. ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
  212. /*
  213. * TLB0 is always used - check if is not zero (r9 stores TLB0 value)
  214. * if is use TLB1 value and clear it (r10 stores TLB1 value)
  215. */
  216. bnei r9, tlb0_not_zero
  217. add r9, r10, r0
  218. add r10, r0, r0
  219. tlb0_not_zero:
  220. /* look at the code below */
  221. ori r30, r0, 0x200
  222. andi r29, r9, 0x100000
  223. bneid r29, 1f
  224. addik r30, r30, 0x80
  225. andi r29, r9, 0x400000
  226. bneid r29, 1f
  227. addik r30, r30, 0x80
  228. andi r29, r9, 0x1000000
  229. bneid r29, 1f
  230. addik r30, r30, 0x80
  231. 1:
  232. andi r3,r3,0xfffffc00 /* Mask off the effective page number */
  233. ori r3,r3,(TLB_VALID)
  234. or r3, r3, r30
  235. /* Load tlb_skip size value which is index to first unused TLB entry */
  236. lwi r11, r0, TOPHYS(tlb_skip)
  237. mts rtlbx,r11 /* TLB slow 0 */
  238. mts rtlblo,r4 /* Load the data portion of the entry */
  239. mts rtlbhi,r3 /* Load the tag portion of the entry */
  240. /* Increase tlb_skip size */
  241. addik r11, r11, 1
  242. swi r11, r0, TOPHYS(tlb_skip)
  243. /* TLB1 can be zeroes that's why we not setup it */
  244. beqi r10, jump_over2
  245. /* look at the code below */
  246. ori r30, r0, 0x200
  247. andi r29, r10, 0x100000
  248. bneid r29, 1f
  249. addik r30, r30, 0x80
  250. andi r29, r10, 0x400000
  251. bneid r29, 1f
  252. addik r30, r30, 0x80
  253. andi r29, r10, 0x1000000
  254. bneid r29, 1f
  255. addik r30, r30, 0x80
  256. 1:
  257. addk r4, r4, r9 /* previous addr + TLB0 size */
  258. addk r3, r3, r9
  259. andi r3,r3,0xfffffc00 /* Mask off the effective page number */
  260. ori r3,r3,(TLB_VALID)
  261. or r3, r3, r30
  262. lwi r11, r0, TOPHYS(tlb_skip)
  263. mts rtlbx, r11 /* r11 is used from TLB0 */
  264. mts rtlblo,r4 /* Load the data portion of the entry */
  265. mts rtlbhi,r3 /* Load the tag portion of the entry */
  266. /* Increase tlb_skip size */
  267. addik r11, r11, 1
  268. swi r11, r0, TOPHYS(tlb_skip)
  269. jump_over2:
  270. /*
  271. * Load a TLB entry for LMB, since we need access to
  272. * the exception vectors, using a 4k real==virtual mapping.
  273. */
  274. /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
  275. ori r6, r0, MICROBLAZE_LMB_TLB_ID
  276. mts rtlbx,r6
  277. ori r4,r0,(TLB_WR | TLB_EX)
  278. ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
  279. mts rtlblo,r4 /* Load the data portion of the entry */
  280. mts rtlbhi,r3 /* Load the tag portion of the entry */
  281. /*
  282. * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
  283. * caches ready to work.
  284. */
  285. turn_on_mmu:
  286. ori r15,r0,start_here
  287. ori r4,r0,MSR_KERNEL_VMS
  288. mts rmsr,r4
  289. nop
  290. rted r15,0 /* enables MMU */
  291. nop
  292. start_here:
  293. #endif /* CONFIG_MMU */
  294. /* Initialize small data anchors */
  295. addik r13, r0, _KERNEL_SDA_BASE_
  296. addik r2, r0, _KERNEL_SDA2_BASE_
  297. /* Initialize stack pointer */
  298. addik r1, r0, init_thread_union + THREAD_SIZE - 4
  299. /* Initialize r31 with current task address */
  300. addik r31, r0, init_task
  301. /*
  302. * Call platform dependent initialize function.
  303. * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
  304. * the function.
  305. */
  306. addik r11, r0, machine_early_init
  307. brald r15, r11
  308. nop
  309. #ifndef CONFIG_MMU
  310. addik r15, r0, machine_halt
  311. braid start_kernel
  312. nop
  313. #else
  314. /*
  315. * Initialize the MMU.
  316. */
  317. bralid r15, mmu_init
  318. nop
  319. /* Go back to running unmapped so we can load up new values
  320. * and change to using our exception vectors.
  321. * On the MicroBlaze, all we invalidate the used TLB entries to clear
  322. * the old 16M byte TLB mappings.
  323. */
  324. ori r15,r0,TOPHYS(kernel_load_context)
  325. ori r4,r0,MSR_KERNEL
  326. mts rmsr,r4
  327. nop
  328. bri 4
  329. rted r15,0
  330. nop
  331. /* Load up the kernel context */
  332. kernel_load_context:
  333. ori r5, r0, MICROBLAZE_LMB_TLB_ID
  334. mts rtlbx,r5
  335. nop
  336. mts rtlbhi,r0
  337. nop
  338. addi r15, r0, machine_halt
  339. ori r17, r0, start_kernel
  340. ori r4, r0, MSR_KERNEL_VMS
  341. mts rmsr, r4
  342. nop
  343. rted r17, 0 /* enable MMU and jump to start_kernel */
  344. nop
  345. #endif /* CONFIG_MMU */