head.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * linux/arch/arm26/boot/compressed/head.S
  3. *
  4. * Copyright (C) 1996-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. /*
  12. * Debugging stuff
  13. *
  14. * Note that these macros must not contain any code which is not
  15. * 100% relocatable. Any attempt to do so will result in a crash.
  16. * Please select one of the following when turning on debugging.
  17. */
  18. .macro kputc,val
  19. mov r0, \val
  20. bl putc
  21. .endm
  22. .macro kphex,val,len
  23. mov r0, \val
  24. mov r1, #\len
  25. bl phex
  26. .endm
  27. .macro debug_reloc_start
  28. .endm
  29. .macro debug_reloc_end
  30. .endm
  31. .section ".start", #alloc, #execinstr
  32. /*
  33. * sort out different calling conventions
  34. */
  35. .align
  36. start:
  37. .type start,#function
  38. .rept 8
  39. mov r0, r0
  40. .endr
  41. b 1f
  42. .word 0x016f2818 @ Magic numbers to help the loader
  43. .word start @ absolute load/run zImage address
  44. .word _edata @ zImage end address
  45. 1: mov r7, r1 @ save architecture ID
  46. mov r8, #0 @ save r0
  47. teqp pc, #0x0c000003 @ turn off interrupts
  48. .text
  49. adr r0, LC0
  50. ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp}
  51. subs r0, r0, r1 @ calculate the delta offset
  52. teq r0, #0 @ if delta is zero, we're
  53. beq not_relocated @ running at the address we
  54. @ were linked at.
  55. add r2, r2, r0 @ different address, so we
  56. add r3, r3, r0 @ need to fix up various
  57. add r5, r5, r0 @ pointers.
  58. add r6, r6, r0
  59. add ip, ip, r0
  60. add sp, sp, r0
  61. 1: ldr r1, [r6, #0] @ relocate entries in the GOT
  62. add r1, r1, r0 @ table. This fixes up the
  63. str r1, [r6], #4 @ C references.
  64. cmp r6, ip
  65. blo 1b
  66. not_relocated: mov r0, #0
  67. 1: str r0, [r2], #4 @ clear bss
  68. str r0, [r2], #4
  69. str r0, [r2], #4
  70. str r0, [r2], #4
  71. cmp r2, r3
  72. blo 1b
  73. bl cache_on
  74. mov r1, sp @ malloc space above stack
  75. add r2, sp, #0x10000 @ 64k max
  76. /*
  77. * Check to see if we will overwrite ourselves.
  78. * r4 = final kernel address
  79. * r5 = start of this image
  80. * r2 = end of malloc space (and therefore this image)
  81. * We basically want:
  82. * r4 >= r2 -> OK
  83. * r4 + image length <= r5 -> OK
  84. */
  85. cmp r4, r2
  86. bhs wont_overwrite
  87. add r0, r4, #4096*1024 @ 4MB largest kernel size
  88. cmp r0, r5
  89. bls wont_overwrite
  90. mov r5, r2 @ decompress after malloc space
  91. mov r0, r5
  92. mov r3, r7
  93. bl decompress_kernel
  94. add r0, r0, #127
  95. bic r0, r0, #127 @ align the kernel length
  96. /*
  97. * r0 = decompressed kernel length
  98. * r1-r3 = unused
  99. * r4 = kernel execution address
  100. * r5 = decompressed kernel start
  101. * r6 = processor ID
  102. * r7 = architecture ID
  103. * r8-r14 = unused
  104. */
  105. add r1, r5, r0 @ end of decompressed kernel
  106. adr r2, reloc_start
  107. ldr r3, LC1
  108. add r3, r2, r3
  109. 1: ldmia r2!, {r8 - r13} @ copy relocation code
  110. stmia r1!, {r8 - r13}
  111. ldmia r2!, {r8 - r13}
  112. stmia r1!, {r8 - r13}
  113. cmp r2, r3
  114. blo 1b
  115. bl cache_clean_flush
  116. add pc, r5, r0 @ call relocation code
  117. /*
  118. * We're not in danger of overwriting ourselves. Do this the simple way.
  119. *
  120. * r4 = kernel execution address
  121. * r7 = architecture ID
  122. */
  123. wont_overwrite: mov r0, r4
  124. mov r3, r7
  125. bl decompress_kernel
  126. b call_kernel
  127. .type LC0, #object
  128. LC0: .word LC0 @ r1
  129. .word __bss_start @ r2
  130. .word _end @ r3
  131. .word _load_addr @ r4
  132. .word _start @ r5
  133. .word _got_start @ r6
  134. .word _got_end @ ip
  135. .word user_stack+4096 @ sp
  136. LC1: .word reloc_end - reloc_start
  137. .size LC0, . - LC0
  138. /*
  139. * Turn on the cache. We need to setup some page tables so that we
  140. * can have both the I and D caches on.
  141. *
  142. * We place the page tables 16k down from the kernel execution address,
  143. * and we hope that nothing else is using it. If we're using it, we
  144. * will go pop!
  145. *
  146. * On entry,
  147. * r4 = kernel execution address
  148. * r6 = processor ID
  149. * r7 = architecture number
  150. * r8 = run-time address of "start"
  151. * On exit,
  152. * r1, r2, r3, r8, r9, r12 corrupted
  153. * This routine must preserve:
  154. * r4, r5, r6, r7
  155. */
  156. .align 5
  157. cache_on: mov r3, #8 @ cache_on function
  158. b call_cache_fn
  159. __setup_mmu: sub r3, r4, #16384 @ Page directory size
  160. bic r3, r3, #0xff @ Align the pointer
  161. bic r3, r3, #0x3f00
  162. /*
  163. * Initialise the page tables, turning on the cacheable and bufferable
  164. * bits for the RAM area only.
  165. */
  166. mov r0, r3
  167. mov r8, r0, lsr #18
  168. mov r8, r8, lsl #18 @ start of RAM
  169. add r9, r8, #0x10000000 @ a reasonable RAM size
  170. mov r1, #0x12
  171. orr r1, r1, #3 << 10
  172. add r2, r3, #16384
  173. 1: cmp r1, r8 @ if virt > start of RAM
  174. orrhs r1, r1, #0x0c @ set cacheable, bufferable
  175. cmp r1, r9 @ if virt > end of RAM
  176. bichs r1, r1, #0x0c @ clear cacheable, bufferable
  177. str r1, [r0], #4 @ 1:1 mapping
  178. add r1, r1, #1048576
  179. teq r0, r2
  180. bne 1b
  181. /*
  182. * If ever we are running from Flash, then we surely want the cache
  183. * to be enabled also for our execution instance... We map 2MB of it
  184. * so there is no map overlap problem for up to 1 MB compressed kernel.
  185. * If the execution is in RAM then we would only be duplicating the above.
  186. */
  187. mov r1, #0x1e
  188. orr r1, r1, #3 << 10
  189. mov r2, pc, lsr #20
  190. orr r1, r1, r2, lsl #20
  191. add r0, r3, r2, lsl #2
  192. str r1, [r0], #4
  193. add r1, r1, #1048576
  194. str r1, [r0]
  195. mov pc, lr
  196. __armv4_cache_on:
  197. mov r12, lr
  198. bl __setup_mmu
  199. mov r0, #0
  200. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  201. mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  202. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  203. orr r0, r0, #0x1000 @ I-cache enable
  204. orr r0, r0, #0x0030
  205. b __common_cache_on
  206. __arm6_cache_on:
  207. mov r12, lr
  208. bl __setup_mmu
  209. mov r0, #0
  210. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  211. mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
  212. mov r0, #0x30
  213. __common_cache_on:
  214. #ifndef DEBUG
  215. orr r0, r0, #0x000d @ Write buffer, mmu
  216. #endif
  217. mov r1, #-1
  218. mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
  219. mcr p15, 0, r1, c3, c0, 0 @ load domain access control
  220. mcr p15, 0, r0, c1, c0, 0 @ load control register
  221. mov pc, r12
  222. /*
  223. * All code following this line is relocatable. It is relocated by
  224. * the above code to the end of the decompressed kernel image and
  225. * executed there. During this time, we have no stacks.
  226. *
  227. * r0 = decompressed kernel length
  228. * r1-r3 = unused
  229. * r4 = kernel execution address
  230. * r5 = decompressed kernel start
  231. * r6 = processor ID
  232. * r7 = architecture ID
  233. * r8-r14 = unused
  234. */
  235. .align 5
  236. reloc_start: add r8, r5, r0
  237. debug_reloc_start
  238. mov r1, r4
  239. 1:
  240. .rept 4
  241. ldmia r5!, {r0, r2, r3, r9 - r13} @ relocate kernel
  242. stmia r1!, {r0, r2, r3, r9 - r13}
  243. .endr
  244. cmp r5, r8
  245. blo 1b
  246. debug_reloc_end
  247. call_kernel: bl cache_clean_flush
  248. bl cache_off
  249. mov r0, #0
  250. mov r1, r7 @ restore architecture number
  251. mov pc, r4 @ call kernel
  252. /*
  253. * Here follow the relocatable cache support functions for the
  254. * various processors. This is a generic hook for locating an
  255. * entry and jumping to an instruction at the specified offset
  256. * from the start of the block. Please note this is all position
  257. * independent code.
  258. *
  259. * r1 = corrupted
  260. * r2 = corrupted
  261. * r3 = block offset
  262. * r6 = corrupted
  263. * r12 = corrupted
  264. */
  265. call_cache_fn: adr r12, proc_types
  266. mrc p15, 0, r6, c0, c0 @ get processor ID
  267. 1: ldr r1, [r12, #0] @ get value
  268. ldr r2, [r12, #4] @ get mask
  269. eor r1, r1, r6 @ (real ^ match)
  270. tst r1, r2 @ & mask
  271. addeq pc, r12, r3 @ call cache function
  272. add r12, r12, #4*5
  273. b 1b
  274. /*
  275. * Table for cache operations. This is basically:
  276. * - CPU ID match
  277. * - CPU ID mask
  278. * - 'cache on' method instruction
  279. * - 'cache off' method instruction
  280. * - 'cache flush' method instruction
  281. *
  282. * We match an entry using: ((real_id ^ match) & mask) == 0
  283. *
  284. * Writethrough caches generally only need 'on' and 'off'
  285. * methods. Writeback caches _must_ have the flush method
  286. * defined.
  287. */
  288. .type proc_types,#object
  289. proc_types:
  290. .word 0x41560600 @ ARM6/610
  291. .word 0xffffffe0
  292. b __arm6_cache_off @ works, but slow
  293. b __arm6_cache_off
  294. mov pc, lr
  295. @ b __arm6_cache_on @ untested
  296. @ b __arm6_cache_off
  297. @ b __armv3_cache_flush
  298. .word 0x41007000 @ ARM7/710
  299. .word 0xfff8fe00
  300. b __arm7_cache_off
  301. b __arm7_cache_off
  302. mov pc, lr
  303. .word 0x41807200 @ ARM720T (writethrough)
  304. .word 0xffffff00
  305. b __armv4_cache_on
  306. b __armv4_cache_off
  307. mov pc, lr
  308. .word 0x41129200 @ ARM920T
  309. .word 0xff00fff0
  310. b __armv4_cache_on
  311. b __armv4_cache_off
  312. b __armv4_cache_flush
  313. .word 0x4401a100 @ sa110 / sa1100
  314. .word 0xffffffe0
  315. b __armv4_cache_on
  316. b __armv4_cache_off
  317. b __armv4_cache_flush
  318. .word 0x6901b110 @ sa1110
  319. .word 0xfffffff0
  320. b __armv4_cache_on
  321. b __armv4_cache_off
  322. b __armv4_cache_flush
  323. .word 0x69050000 @ xscale
  324. .word 0xffff0000
  325. b __armv4_cache_on
  326. b __armv4_cache_off
  327. b __armv4_cache_flush
  328. .word 0 @ unrecognised type
  329. .word 0
  330. mov pc, lr
  331. mov pc, lr
  332. mov pc, lr
  333. .size proc_types, . - proc_types
  334. /*
  335. * Turn off the Cache and MMU. ARMv3 does not support
  336. * reading the control register, but ARMv4 does.
  337. *
  338. * On entry, r6 = processor ID
  339. * On exit, r0, r1, r2, r3, r12 corrupted
  340. * This routine must preserve: r4, r6, r7
  341. */
  342. .align 5
  343. cache_off: mov r3, #12 @ cache_off function
  344. b call_cache_fn
  345. __armv4_cache_off:
  346. mrc p15, 0, r0, c1, c0
  347. bic r0, r0, #0x000d
  348. mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
  349. mov r0, #0
  350. mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
  351. mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
  352. mov pc, lr
  353. __arm6_cache_off:
  354. mov r0, #0x00000030 @ ARM6 control reg.
  355. b __armv3_cache_off
  356. __arm7_cache_off:
  357. mov r0, #0x00000070 @ ARM7 control reg.
  358. b __armv3_cache_off
  359. __armv3_cache_off:
  360. mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
  361. mov r0, #0
  362. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  363. mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
  364. mov pc, lr
  365. /*
  366. * Clean and flush the cache to maintain consistency.
  367. *
  368. * On entry,
  369. * r6 = processor ID
  370. * On exit,
  371. * r1, r2, r3, r12 corrupted
  372. * This routine must preserve:
  373. * r0, r4, r5, r6, r7
  374. */
  375. .align 5
  376. cache_clean_flush:
  377. mov r3, #16
  378. b call_cache_fn
  379. __armv4_cache_flush:
  380. bic r1, pc, #31
  381. add r2, r1, #65536 @ 2x the largest dcache size
  382. 1: ldr r12, [r1], #32 @ s/w flush D cache
  383. teq r1, r2
  384. bne 1b
  385. mcr p15, 0, r1, c7, c7, 0 @ flush I cache
  386. mcr p15, 0, r1, c7, c10, 4 @ drain WB
  387. mov pc, lr
  388. __armv3_cache_flush:
  389. mov r1, #0
  390. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  391. mov pc, lr
  392. /*
  393. * Various debugging routines for printing hex characters and
  394. * memory, which again must be relocatable.
  395. */
  396. #ifdef DEBUG
  397. .type phexbuf,#object
  398. phexbuf: .space 12
  399. .size phexbuf, . - phexbuf
  400. phex: adr r3, phexbuf
  401. mov r2, #0
  402. strb r2, [r3, r1]
  403. 1: subs r1, r1, #1
  404. movmi r0, r3
  405. bmi puts
  406. and r2, r0, #15
  407. mov r0, r0, lsr #4
  408. cmp r2, #10
  409. addge r2, r2, #7
  410. add r2, r2, #'0'
  411. strb r2, [r3, r1]
  412. b 1b
  413. puts: loadsp r3
  414. 1: ldrb r2, [r0], #1
  415. teq r2, #0
  416. moveq pc, lr
  417. 2: writeb r2
  418. mov r1, #0x00020000
  419. 3: subs r1, r1, #1
  420. bne 3b
  421. teq r2, #'\n'
  422. moveq r2, #'\r'
  423. beq 2b
  424. teq r0, #0
  425. bne 1b
  426. mov pc, lr
  427. putc:
  428. mov r2, r0
  429. mov r0, #0
  430. loadsp r3
  431. b 2b
  432. memdump: mov r12, r0
  433. mov r10, lr
  434. mov r11, #0
  435. 2: mov r0, r11, lsl #2
  436. add r0, r0, r12
  437. mov r1, #8
  438. bl phex
  439. mov r0, #':'
  440. bl putc
  441. 1: mov r0, #' '
  442. bl putc
  443. ldr r0, [r12, r11, lsl #2]
  444. mov r1, #8
  445. bl phex
  446. and r0, r11, #7
  447. teq r0, #3
  448. moveq r0, #' '
  449. bleq putc
  450. and r0, r11, #7
  451. add r11, r11, #1
  452. teq r0, #7
  453. bne 1b
  454. mov r0, #'\n'
  455. bl putc
  456. cmp r11, #64
  457. blt 2b
  458. mov pc, r10
  459. #endif
  460. reloc_end:
  461. .align
  462. .section ".stack", "aw"
  463. user_stack: .space 4096