start.S 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
  3. * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
  4. * Copyright (C) 2000, 2001,2002 Wolfgang Denk <wd@denx.de>
  5. * Copyright Freescale Semiconductor, Inc. 2004, 2006. All rights reserved.
  6. *
  7. * See file CREDITS for list of people who contributed to this
  8. * project.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of
  13. * the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  23. * MA 02111-1307 USA
  24. */
  25. /*
  26. * U-Boot - Startup Code for MPC83xx PowerPC based Embedded Boards
  27. */
  28. #include <config.h>
  29. #include <mpc83xx.h>
  30. #include <version.h>
  31. #define CONFIG_83XX 1 /* needed for Linux kernel header files*/
  32. #define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */
  33. #include <ppc_asm.tmpl>
  34. #include <ppc_defs.h>
  35. #include <asm/cache.h>
  36. #include <asm/mmu.h>
  37. #ifndef CONFIG_IDENT_STRING
  38. #define CONFIG_IDENT_STRING "MPC83XX"
  39. #endif
  40. /* We don't want the MMU yet.
  41. */
  42. #undef MSR_KERNEL
  43. /*
  44. * Floating Point enable, Machine Check and Recoverable Interr.
  45. */
  46. #ifdef DEBUG
  47. #define MSR_KERNEL (MSR_FP|MSR_RI)
  48. #else
  49. #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
  50. #endif
  51. /*
  52. * Set up GOT: Global Offset Table
  53. *
  54. * Use r14 to access the GOT
  55. */
  56. START_GOT
  57. GOT_ENTRY(_GOT2_TABLE_)
  58. GOT_ENTRY(_FIXUP_TABLE_)
  59. GOT_ENTRY(_start)
  60. GOT_ENTRY(_start_of_vectors)
  61. GOT_ENTRY(_end_of_vectors)
  62. GOT_ENTRY(transfer_to_handler)
  63. GOT_ENTRY(__init_end)
  64. GOT_ENTRY(_end)
  65. GOT_ENTRY(__bss_start)
  66. END_GOT
  67. /*
  68. * The Hard Reset Configuration Word (HRCW) table is in the first 64
  69. * (0x40) bytes of flash. It has 8 bytes, but each byte is repeated 8
  70. * times so the processor can fetch it out of flash whether the flash
  71. * is 8, 16, 32, or 64 bits wide (hardware trickery).
  72. */
  73. .text
  74. #define _HRCW_TABLE_ENTRY(w) \
  75. .fill 8,1,(((w)>>24)&0xff); \
  76. .fill 8,1,(((w)>>16)&0xff); \
  77. .fill 8,1,(((w)>> 8)&0xff); \
  78. .fill 8,1,(((w) )&0xff)
  79. _HRCW_TABLE_ENTRY(CFG_HRCW_LOW)
  80. _HRCW_TABLE_ENTRY(CFG_HRCW_HIGH)
  81. /*
  82. * Magic number and version string - put it after the HRCW since it
  83. * cannot be first in flash like it is in many other processors.
  84. */
  85. .long 0x27051956 /* U-Boot Magic Number */
  86. .globl version_string
  87. version_string:
  88. .ascii U_BOOT_VERSION
  89. .ascii " (", __DATE__, " - ", __TIME__, ")"
  90. .ascii " ", CONFIG_IDENT_STRING, "\0"
  91. #ifndef CONFIG_DEFAULT_IMMR
  92. #error CONFIG_DEFAULT_IMMR must be defined
  93. #endif /* CFG_DEFAULT_IMMR */
  94. #ifndef CFG_IMMR
  95. #define CFG_IMMR CONFIG_DEFAULT_IMMR
  96. #endif /* CFG_IMMR */
  97. /*
  98. * After configuration, a system reset exception is executed using the
  99. * vector at offset 0x100 relative to the base set by MSR[IP]. If
  100. * MSR[IP] is 0, the base address is 0x00000000. If MSR[IP] is 1, the
  101. * base address is 0xfff00000. In the case of a Power On Reset or Hard
  102. * Reset, the value of MSR[IP] is determined by the CIP field in the
  103. * HRCW.
  104. *
  105. * Other bits in the HRCW set up the Base Address and Port Size in BR0.
  106. * This determines the location of the boot ROM (flash or EPROM) in the
  107. * processor's address space at boot time. As long as the HRCW is set up
  108. * so that we eventually end up executing the code below when the
  109. * processor executes the reset exception, the actual values used should
  110. * not matter.
  111. *
  112. * Once we have got here, the address mask in OR0 is cleared so that the
  113. * bottom 32K of the boot ROM is effectively repeated all throughout the
  114. * processor's address space, after which we can jump to the absolute
  115. * address at which the boot ROM was linked at compile time, and proceed
  116. * to initialise the memory controller without worrying if the rug will
  117. * be pulled out from under us, so to speak (it will be fine as long as
  118. * we configure BR0 with the same boot ROM link address).
  119. */
  120. . = EXC_OFF_SYS_RESET
  121. .globl _start
  122. _start: /* time t 0 */
  123. li r21, BOOTFLAG_COLD /* Normal Power-On: Boot from FLASH*/
  124. nop
  125. b boot_cold
  126. . = EXC_OFF_SYS_RESET + 0x10
  127. .globl _start_warm
  128. _start_warm:
  129. li r21, BOOTFLAG_WARM /* Software reboot */
  130. b boot_warm
  131. boot_cold: /* time t 3 */
  132. lis r4, CONFIG_DEFAULT_IMMR@h
  133. nop
  134. boot_warm: /* time t 5 */
  135. mfmsr r5 /* save msr contents */
  136. lis r3, CFG_IMMR@h
  137. ori r3, r3, CFG_IMMR@l
  138. stw r3, IMMRBAR(r4)
  139. /* Initialise the E300 processor core */
  140. /*------------------------------------------*/
  141. bl init_e300_core
  142. #ifndef CFG_RAMBOOT
  143. /* Inflate flash location so it appears everywhere, calculate */
  144. /* the absolute address in final location of the FLASH, jump */
  145. /* there and deflate the flash size back to minimal size */
  146. /*------------------------------------------------------------*/
  147. bl map_flash_by_law1
  148. GET_GOT /* initialize GOT access */
  149. lwz r4, GOT(_start)
  150. addi r4, r4, -EXC_OFF_SYS_RESET
  151. addi r5, r4, in_flash - _start + EXC_OFF_SYS_RESET
  152. mtlr r5
  153. blr
  154. in_flash:
  155. #if 1 /* Remapping flash with LAW0. */
  156. bl remap_flash_by_law0
  157. #endif
  158. #endif /* CFG_RAMBOOT */
  159. /* setup the bats */
  160. bl setup_bats
  161. sync
  162. /*
  163. * Cache must be enabled here for stack-in-cache trick.
  164. * This means we need to enable the BATS.
  165. * This means:
  166. * 1) for the EVB, original gt regs need to be mapped
  167. * 2) need to have an IBAT for the 0xf region,
  168. * we are running there!
  169. * Cache should be turned on after BATs, since by default
  170. * everything is write-through.
  171. * The init-mem BAT can be reused after reloc. The old
  172. * gt-regs BAT can be reused after board_init_f calls
  173. * board_early_init_f (EVB only).
  174. */
  175. /* enable address translation */
  176. bl enable_addr_trans
  177. sync
  178. /* enable and invalidate the data cache */
  179. bl dcache_enable
  180. sync
  181. #ifdef CFG_INIT_RAM_LOCK
  182. bl lock_ram_in_cache
  183. sync
  184. #endif
  185. /* set up the stack pointer in our newly created
  186. * cache-ram (r1) */
  187. lis r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@h
  188. ori r1, r1, (CFG_INIT_RAM_ADDR + CFG_GBL_DATA_OFFSET)@l
  189. li r0, 0 /* Make room for stack frame header and */
  190. stwu r0, -4(r1) /* clear final stack frame so that */
  191. stwu r0, -4(r1) /* stack backtraces terminate cleanly */
  192. /* let the C-code set up the rest */
  193. /* */
  194. /* Be careful to keep code relocatable & stack humble */
  195. /*------------------------------------------------------*/
  196. GET_GOT /* initialize GOT access */
  197. /* r3: IMMR */
  198. lis r3, CFG_IMMR@h
  199. /* run low-level CPU init code (in Flash)*/
  200. bl cpu_init_f
  201. /* r3: BOOTFLAG */
  202. mr r3, r21
  203. /* run 1st part of board init code (in Flash)*/
  204. bl board_init_f
  205. /*
  206. * Vector Table
  207. */
  208. .globl _start_of_vectors
  209. _start_of_vectors:
  210. /* Machine check */
  211. STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
  212. /* Data Storage exception. */
  213. STD_EXCEPTION(0x300, DataStorage, UnknownException)
  214. /* Instruction Storage exception. */
  215. STD_EXCEPTION(0x400, InstStorage, UnknownException)
  216. /* External Interrupt exception. */
  217. #ifndef FIXME
  218. STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
  219. #endif
  220. /* Alignment exception. */
  221. . = 0x600
  222. Alignment:
  223. EXCEPTION_PROLOG(SRR0, SRR1)
  224. mfspr r4,DAR
  225. stw r4,_DAR(r21)
  226. mfspr r5,DSISR
  227. stw r5,_DSISR(r21)
  228. addi r3,r1,STACK_FRAME_OVERHEAD
  229. li r20,MSR_KERNEL
  230. rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
  231. rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */
  232. lwz r6,GOT(transfer_to_handler)
  233. mtlr r6
  234. blrl
  235. .L_Alignment:
  236. .long AlignmentException - _start + EXC_OFF_SYS_RESET
  237. .long int_return - _start + EXC_OFF_SYS_RESET
  238. /* Program check exception */
  239. . = 0x700
  240. ProgramCheck:
  241. EXCEPTION_PROLOG(SRR0, SRR1)
  242. addi r3,r1,STACK_FRAME_OVERHEAD
  243. li r20,MSR_KERNEL
  244. rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
  245. rlwimi r20,r23,0,25,25 /* copy IP bit from saved MSR */
  246. lwz r6,GOT(transfer_to_handler)
  247. mtlr r6
  248. blrl
  249. .L_ProgramCheck:
  250. .long ProgramCheckException - _start + EXC_OFF_SYS_RESET
  251. .long int_return - _start + EXC_OFF_SYS_RESET
  252. STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
  253. /* I guess we could implement decrementer, and may have
  254. * to someday for timekeeping.
  255. */
  256. STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
  257. STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
  258. STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
  259. STD_EXCEPTION(0xc00, SystemCall, UnknownException)
  260. STD_EXCEPTION(0xd00, SingleStep, UnknownException)
  261. STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
  262. STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
  263. STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
  264. STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
  265. STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
  266. #ifdef DEBUG
  267. . = 0x1300
  268. /*
  269. * This exception occurs when the program counter matches the
  270. * Instruction Address Breakpoint Register (IABR).
  271. *
  272. * I want the cpu to halt if this occurs so I can hunt around
  273. * with the debugger and look at things.
  274. *
  275. * When DEBUG is defined, both machine check enable (in the MSR)
  276. * and checkstop reset enable (in the reset mode register) are
  277. * turned off and so a checkstop condition will result in the cpu
  278. * halting.
  279. *
  280. * I force the cpu into a checkstop condition by putting an illegal
  281. * instruction here (at least this is the theory).
  282. *
  283. * well - that didnt work, so just do an infinite loop!
  284. */
  285. 1: b 1b
  286. #else
  287. STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException)
  288. #endif
  289. STD_EXCEPTION(0x1400, SMI, UnknownException)
  290. STD_EXCEPTION(0x1500, Trap_15, UnknownException)
  291. STD_EXCEPTION(0x1600, Trap_16, UnknownException)
  292. STD_EXCEPTION(0x1700, Trap_17, UnknownException)
  293. STD_EXCEPTION(0x1800, Trap_18, UnknownException)
  294. STD_EXCEPTION(0x1900, Trap_19, UnknownException)
  295. STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
  296. STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
  297. STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
  298. STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
  299. STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
  300. STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
  301. STD_EXCEPTION(0x2000, Trap_20, UnknownException)
  302. STD_EXCEPTION(0x2100, Trap_21, UnknownException)
  303. STD_EXCEPTION(0x2200, Trap_22, UnknownException)
  304. STD_EXCEPTION(0x2300, Trap_23, UnknownException)
  305. STD_EXCEPTION(0x2400, Trap_24, UnknownException)
  306. STD_EXCEPTION(0x2500, Trap_25, UnknownException)
  307. STD_EXCEPTION(0x2600, Trap_26, UnknownException)
  308. STD_EXCEPTION(0x2700, Trap_27, UnknownException)
  309. STD_EXCEPTION(0x2800, Trap_28, UnknownException)
  310. STD_EXCEPTION(0x2900, Trap_29, UnknownException)
  311. STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
  312. STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
  313. STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
  314. STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
  315. STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
  316. STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
  317. .globl _end_of_vectors
  318. _end_of_vectors:
  319. . = 0x3000
  320. /*
  321. * This code finishes saving the registers to the exception frame
  322. * and jumps to the appropriate handler for the exception.
  323. * Register r21 is pointer into trap frame, r1 has new stack pointer.
  324. */
  325. .globl transfer_to_handler
  326. transfer_to_handler:
  327. stw r22,_NIP(r21)
  328. lis r22,MSR_POW@h
  329. andc r23,r23,r22
  330. stw r23,_MSR(r21)
  331. SAVE_GPR(7, r21)
  332. SAVE_4GPRS(8, r21)
  333. SAVE_8GPRS(12, r21)
  334. SAVE_8GPRS(24, r21)
  335. mflr r23
  336. andi. r24,r23,0x3f00 /* get vector offset */
  337. stw r24,TRAP(r21)
  338. li r22,0
  339. stw r22,RESULT(r21)
  340. lwz r24,0(r23) /* virtual address of handler */
  341. lwz r23,4(r23) /* where to go when done */
  342. mtspr SRR0,r24
  343. mtspr SRR1,r20
  344. mtlr r23
  345. SYNC
  346. rfi /* jump to handler, enable MMU */
  347. int_return:
  348. mfmsr r28 /* Disable interrupts */
  349. li r4,0
  350. ori r4,r4,MSR_EE
  351. andc r28,r28,r4
  352. SYNC /* Some chip revs need this... */
  353. mtmsr r28
  354. SYNC
  355. lwz r2,_CTR(r1)
  356. lwz r0,_LINK(r1)
  357. mtctr r2
  358. mtlr r0
  359. lwz r2,_XER(r1)
  360. lwz r0,_CCR(r1)
  361. mtspr XER,r2
  362. mtcrf 0xFF,r0
  363. REST_10GPRS(3, r1)
  364. REST_10GPRS(13, r1)
  365. REST_8GPRS(23, r1)
  366. REST_GPR(31, r1)
  367. lwz r2,_NIP(r1) /* Restore environment */
  368. lwz r0,_MSR(r1)
  369. mtspr SRR0,r2
  370. mtspr SRR1,r0
  371. lwz r0,GPR0(r1)
  372. lwz r2,GPR2(r1)
  373. lwz r1,GPR1(r1)
  374. SYNC
  375. rfi
  376. /*
  377. * This code initialises the E300 processor core
  378. * (conforms to PowerPC 603e spec)
  379. * Note: expects original MSR contents to be in r5.
  380. */
  381. .globl init_e300_core
  382. init_e300_core: /* time t 10 */
  383. /* Initialize machine status; enable machine check interrupt */
  384. /*-----------------------------------------------------------*/
  385. li r3, MSR_KERNEL /* Set ME and RI flags */
  386. rlwimi r3, r5, 0, 25, 25 /* preserve IP bit set by HRCW */
  387. #ifdef DEBUG
  388. rlwimi r3, r5, 0, 21, 22 /* debugger might set SE & BE bits */
  389. #endif
  390. SYNC /* Some chip revs need this... */
  391. mtmsr r3
  392. SYNC
  393. mtspr SRR1, r3 /* Make SRR1 match MSR */
  394. lis r3, CFG_IMMR@h
  395. #if defined(CONFIG_WATCHDOG)
  396. /* Initialise the Wathcdog values and reset it (if req) */
  397. /*------------------------------------------------------*/
  398. lis r4, CFG_WATCHDOG_VALUE
  399. ori r4, r4, (SWCRR_SWEN | SWCRR_SWRI | SWCRR_SWPR)
  400. stw r4, SWCRR(r3)
  401. /* and reset it */
  402. li r4, 0x556C
  403. sth r4, SWSRR@l(r3)
  404. li r4, -0x55C7
  405. sth r4, SWSRR@l(r3)
  406. #else
  407. /* Disable Wathcdog */
  408. /*-------------------*/
  409. lwz r4, SWCRR(r3)
  410. /* Check to see if its enabled for disabling
  411. once disabled by SW you can't re-enable */
  412. andi. r4, r4, 0x4
  413. beq 1f
  414. xor r4, r4, r4
  415. stw r4, SWCRR(r3)
  416. 1:
  417. #endif /* CONFIG_WATCHDOG */
  418. /* Initialize the Hardware Implementation-dependent Registers */
  419. /* HID0 also contains cache control */
  420. /*------------------------------------------------------*/
  421. lis r3, CFG_HID0_INIT@h
  422. ori r3, r3, CFG_HID0_INIT@l
  423. SYNC
  424. mtspr HID0, r3
  425. lis r3, CFG_HID0_FINAL@h
  426. ori r3, r3, CFG_HID0_FINAL@l
  427. SYNC
  428. mtspr HID0, r3
  429. lis r3, CFG_HID2@h
  430. ori r3, r3, CFG_HID2@l
  431. SYNC
  432. mtspr HID2, r3
  433. /* clear all BAT's */
  434. /*----------------------------------*/
  435. xor r0, r0, r0
  436. mtspr DBAT0U, r0
  437. mtspr DBAT0L, r0
  438. mtspr DBAT1U, r0
  439. mtspr DBAT1L, r0
  440. mtspr DBAT2U, r0
  441. mtspr DBAT2L, r0
  442. mtspr DBAT3U, r0
  443. mtspr DBAT3L, r0
  444. mtspr IBAT0U, r0
  445. mtspr IBAT0L, r0
  446. mtspr IBAT1U, r0
  447. mtspr IBAT1L, r0
  448. mtspr IBAT2U, r0
  449. mtspr IBAT2L, r0
  450. mtspr IBAT3U, r0
  451. mtspr IBAT3L, r0
  452. SYNC
  453. /* invalidate all tlb's
  454. *
  455. * From the 603e User Manual: "The 603e provides the ability to
  456. * invalidate a TLB entry. The TLB Invalidate Entry (tlbie)
  457. * instruction invalidates the TLB entry indexed by the EA, and
  458. * operates on both the instruction and data TLBs simultaneously
  459. * invalidating four TLB entries (both sets in each TLB). The
  460. * index corresponds to bits 15-19 of the EA. To invalidate all
  461. * entries within both TLBs, 32 tlbie instructions should be
  462. * issued, incrementing this field by one each time."
  463. *
  464. * "Note that the tlbia instruction is not implemented on the
  465. * 603e."
  466. *
  467. * bits 15-19 correspond to addresses 0x00000000 to 0x0001F000
  468. * incrementing by 0x1000 each time. The code below is sort of
  469. * based on code in "flush_tlbs" from arch/ppc/kernel/head.S
  470. *
  471. */
  472. li r3, 32
  473. mtctr r3
  474. li r3, 0
  475. 1: tlbie r3
  476. addi r3, r3, 0x1000
  477. bdnz 1b
  478. SYNC
  479. /* Done! */
  480. /*------------------------------*/
  481. blr
  482. .globl invalidate_bats
  483. invalidate_bats:
  484. /* invalidate BATs */
  485. mtspr IBAT0U, r0
  486. mtspr IBAT1U, r0
  487. mtspr IBAT2U, r0
  488. mtspr IBAT3U, r0
  489. #if (CFG_HID2 & HID2_HBE)
  490. mtspr IBAT4U, r0
  491. mtspr IBAT5U, r0
  492. mtspr IBAT6U, r0
  493. mtspr IBAT7U, r0
  494. #endif
  495. isync
  496. mtspr DBAT0U, r0
  497. mtspr DBAT1U, r0
  498. mtspr DBAT2U, r0
  499. mtspr DBAT3U, r0
  500. #if (CFG_HID2 & HID2_HBE)
  501. mtspr DBAT4U, r0
  502. mtspr DBAT5U, r0
  503. mtspr DBAT6U, r0
  504. mtspr DBAT7U, r0
  505. #endif
  506. isync
  507. sync
  508. blr
  509. /* setup_bats - set them up to some initial state */
  510. .globl setup_bats
  511. setup_bats:
  512. addis r0, r0, 0x0000
  513. /* IBAT 0 */
  514. addis r4, r0, CFG_IBAT0L@h
  515. ori r4, r4, CFG_IBAT0L@l
  516. addis r3, r0, CFG_IBAT0U@h
  517. ori r3, r3, CFG_IBAT0U@l
  518. mtspr IBAT0L, r4
  519. mtspr IBAT0U, r3
  520. isync
  521. /* DBAT 0 */
  522. addis r4, r0, CFG_DBAT0L@h
  523. ori r4, r4, CFG_DBAT0L@l
  524. addis r3, r0, CFG_DBAT0U@h
  525. ori r3, r3, CFG_DBAT0U@l
  526. mtspr DBAT0L, r4
  527. mtspr DBAT0U, r3
  528. isync
  529. /* IBAT 1 */
  530. addis r4, r0, CFG_IBAT1L@h
  531. ori r4, r4, CFG_IBAT1L@l
  532. addis r3, r0, CFG_IBAT1U@h
  533. ori r3, r3, CFG_IBAT1U@l
  534. mtspr IBAT1L, r4
  535. mtspr IBAT1U, r3
  536. isync
  537. /* DBAT 1 */
  538. addis r4, r0, CFG_DBAT1L@h
  539. ori r4, r4, CFG_DBAT1L@l
  540. addis r3, r0, CFG_DBAT1U@h
  541. ori r3, r3, CFG_DBAT1U@l
  542. mtspr DBAT1L, r4
  543. mtspr DBAT1U, r3
  544. isync
  545. /* IBAT 2 */
  546. addis r4, r0, CFG_IBAT2L@h
  547. ori r4, r4, CFG_IBAT2L@l
  548. addis r3, r0, CFG_IBAT2U@h
  549. ori r3, r3, CFG_IBAT2U@l
  550. mtspr IBAT2L, r4
  551. mtspr IBAT2U, r3
  552. isync
  553. /* DBAT 2 */
  554. addis r4, r0, CFG_DBAT2L@h
  555. ori r4, r4, CFG_DBAT2L@l
  556. addis r3, r0, CFG_DBAT2U@h
  557. ori r3, r3, CFG_DBAT2U@l
  558. mtspr DBAT2L, r4
  559. mtspr DBAT2U, r3
  560. isync
  561. /* IBAT 3 */
  562. addis r4, r0, CFG_IBAT3L@h
  563. ori r4, r4, CFG_IBAT3L@l
  564. addis r3, r0, CFG_IBAT3U@h
  565. ori r3, r3, CFG_IBAT3U@l
  566. mtspr IBAT3L, r4
  567. mtspr IBAT3U, r3
  568. isync
  569. /* DBAT 3 */
  570. addis r4, r0, CFG_DBAT3L@h
  571. ori r4, r4, CFG_DBAT3L@l
  572. addis r3, r0, CFG_DBAT3U@h
  573. ori r3, r3, CFG_DBAT3U@l
  574. mtspr DBAT3L, r4
  575. mtspr DBAT3U, r3
  576. isync
  577. #if (CFG_HID2 & HID2_HBE)
  578. /* IBAT 4 */
  579. addis r4, r0, CFG_IBAT4L@h
  580. ori r4, r4, CFG_IBAT4L@l
  581. addis r3, r0, CFG_IBAT4U@h
  582. ori r3, r3, CFG_IBAT4U@l
  583. mtspr IBAT4L, r4
  584. mtspr IBAT4U, r3
  585. isync
  586. /* DBAT 4 */
  587. addis r4, r0, CFG_DBAT4L@h
  588. ori r4, r4, CFG_DBAT4L@l
  589. addis r3, r0, CFG_DBAT4U@h
  590. ori r3, r3, CFG_DBAT4U@l
  591. mtspr DBAT4L, r4
  592. mtspr DBAT4U, r3
  593. isync
  594. /* IBAT 5 */
  595. addis r4, r0, CFG_IBAT5L@h
  596. ori r4, r4, CFG_IBAT5L@l
  597. addis r3, r0, CFG_IBAT5U@h
  598. ori r3, r3, CFG_IBAT5U@l
  599. mtspr IBAT5L, r4
  600. mtspr IBAT5U, r3
  601. isync
  602. /* DBAT 5 */
  603. addis r4, r0, CFG_DBAT5L@h
  604. ori r4, r4, CFG_DBAT5L@l
  605. addis r3, r0, CFG_DBAT5U@h
  606. ori r3, r3, CFG_DBAT5U@l
  607. mtspr DBAT5L, r4
  608. mtspr DBAT5U, r3
  609. isync
  610. /* IBAT 6 */
  611. addis r4, r0, CFG_IBAT6L@h
  612. ori r4, r4, CFG_IBAT6L@l
  613. addis r3, r0, CFG_IBAT6U@h
  614. ori r3, r3, CFG_IBAT6U@l
  615. mtspr IBAT6L, r4
  616. mtspr IBAT6U, r3
  617. isync
  618. /* DBAT 6 */
  619. addis r4, r0, CFG_DBAT6L@h
  620. ori r4, r4, CFG_DBAT6L@l
  621. addis r3, r0, CFG_DBAT6U@h
  622. ori r3, r3, CFG_DBAT6U@l
  623. mtspr DBAT6L, r4
  624. mtspr DBAT6U, r3
  625. isync
  626. /* IBAT 7 */
  627. addis r4, r0, CFG_IBAT7L@h
  628. ori r4, r4, CFG_IBAT7L@l
  629. addis r3, r0, CFG_IBAT7U@h
  630. ori r3, r3, CFG_IBAT7U@l
  631. mtspr IBAT7L, r4
  632. mtspr IBAT7U, r3
  633. isync
  634. /* DBAT 7 */
  635. addis r4, r0, CFG_DBAT7L@h
  636. ori r4, r4, CFG_DBAT7L@l
  637. addis r3, r0, CFG_DBAT7U@h
  638. ori r3, r3, CFG_DBAT7U@l
  639. mtspr DBAT7L, r4
  640. mtspr DBAT7U, r3
  641. isync
  642. #endif
  643. /* Invalidate TLBs.
  644. * -> for (val = 0; val < 0x20000; val+=0x1000)
  645. * -> tlbie(val);
  646. */
  647. lis r3, 0
  648. lis r5, 2
  649. 1:
  650. tlbie r3
  651. addi r3, r3, 0x1000
  652. cmp 0, 0, r3, r5
  653. blt 1b
  654. blr
  655. .globl enable_addr_trans
  656. enable_addr_trans:
  657. /* enable address translation */
  658. mfmsr r5
  659. ori r5, r5, (MSR_IR | MSR_DR)
  660. mtmsr r5
  661. isync
  662. blr
  663. .globl disable_addr_trans
  664. disable_addr_trans:
  665. /* disable address translation */
  666. mflr r4
  667. mfmsr r3
  668. andi. r0, r3, (MSR_IR | MSR_DR)
  669. beqlr
  670. andc r3, r3, r0
  671. mtspr SRR0, r4
  672. mtspr SRR1, r3
  673. rfi
  674. /* Cache functions.
  675. *
  676. * Note: requires that all cache bits in
  677. * HID0 are in the low half word.
  678. */
  679. .globl icache_enable
  680. icache_enable:
  681. mfspr r3, HID0
  682. ori r3, r3, HID0_ICE
  683. lis r4, 0
  684. ori r4, r4, HID0_ILOCK
  685. andc r3, r3, r4
  686. ori r4, r3, HID0_ICFI
  687. isync
  688. mtspr HID0, r4 /* sets enable and invalidate, clears lock */
  689. isync
  690. mtspr HID0, r3 /* clears invalidate */
  691. blr
  692. .globl icache_disable
  693. icache_disable:
  694. mfspr r3, HID0
  695. lis r4, 0
  696. ori r4, r4, HID0_ICE|HID0_ILOCK
  697. andc r3, r3, r4
  698. ori r4, r3, HID0_ICFI
  699. isync
  700. mtspr HID0, r4 /* sets invalidate, clears enable and lock*/
  701. isync
  702. mtspr HID0, r3 /* clears invalidate */
  703. blr
  704. .globl icache_status
  705. icache_status:
  706. mfspr r3, HID0
  707. rlwinm r3, r3, (31 - HID0_ICE_SHIFT + 1), 31, 31
  708. blr
  709. .globl dcache_enable
  710. dcache_enable:
  711. mfspr r3, HID0
  712. li r5, HID0_DCFI|HID0_DLOCK
  713. andc r3, r3, r5
  714. mtspr HID0, r3 /* no invalidate, unlock */
  715. ori r3, r3, HID0_DCE
  716. ori r5, r3, HID0_DCFI
  717. mtspr HID0, r5 /* enable + invalidate */
  718. mtspr HID0, r3 /* enable */
  719. sync
  720. blr
  721. .globl dcache_disable
  722. dcache_disable:
  723. mfspr r3, HID0
  724. lis r4, 0
  725. ori r4, r4, HID0_DCE|HID0_DLOCK
  726. andc r3, r3, r4
  727. ori r4, r3, HID0_DCI
  728. sync
  729. mtspr HID0, r4 /* sets invalidate, clears enable and lock */
  730. sync
  731. mtspr HID0, r3 /* clears invalidate */
  732. blr
  733. .globl dcache_status
  734. dcache_status:
  735. mfspr r3, HID0
  736. rlwinm r3, r3, (31 - HID0_DCE_SHIFT + 1), 31, 31
  737. blr
  738. .globl get_pvr
  739. get_pvr:
  740. mfspr r3, PVR
  741. blr
  742. .globl ppcDWstore
  743. ppcDWstore:
  744. lfd 1, 0(r4)
  745. stfd 1, 0(r3)
  746. blr
  747. .globl ppcDWload
  748. ppcDWload:
  749. lfd 1, 0(r3)
  750. stfd 1, 0(r4)
  751. blr
  752. /*-------------------------------------------------------------------*/
  753. /*
  754. * void relocate_code (addr_sp, gd, addr_moni)
  755. *
  756. * This "function" does not return, instead it continues in RAM
  757. * after relocating the monitor code.
  758. *
  759. * r3 = dest
  760. * r4 = src
  761. * r5 = length in bytes
  762. * r6 = cachelinesize
  763. */
  764. .globl relocate_code
  765. relocate_code:
  766. mr r1, r3 /* Set new stack pointer */
  767. mr r9, r4 /* Save copy of Global Data pointer */
  768. mr r10, r5 /* Save copy of Destination Address */
  769. mr r3, r5 /* Destination Address */
  770. lwz r4, GOT(_start)
  771. addi r4, r4, -EXC_OFF_SYS_RESET
  772. lwz r5, GOT(__init_end)
  773. sub r5, r5, r4
  774. li r6, CFG_CACHELINE_SIZE /* Cache Line Size */
  775. /*
  776. * Fix GOT pointer:
  777. *
  778. * New GOT-PTR = (old GOT-PTR - CFG_MONITOR_BASE)
  779. * + Destination Address
  780. *
  781. * Offset:
  782. */
  783. sub r15, r10, r4
  784. /* First our own GOT */
  785. add r14, r14, r15
  786. /* then the one used by the C code */
  787. add r30, r30, r15
  788. /*
  789. * Now relocate code
  790. */
  791. cmplw cr1,r3,r4
  792. addi r0,r5,3
  793. srwi. r0,r0,2
  794. beq cr1,4f /* In place copy is not necessary */
  795. beq 7f /* Protect against 0 count */
  796. mtctr r0
  797. bge cr1,2f
  798. la r8,-4(r4)
  799. la r7,-4(r3)
  800. /* copy */
  801. 1: lwzu r0,4(r8)
  802. stwu r0,4(r7)
  803. bdnz 1b
  804. addi r0,r5,3
  805. srwi. r0,r0,2
  806. mtctr r0
  807. la r8,-4(r4)
  808. la r7,-4(r3)
  809. /* and compare */
  810. 20: lwzu r20,4(r8)
  811. lwzu r21,4(r7)
  812. xor. r22, r20, r21
  813. bne 30f
  814. bdnz 20b
  815. b 4f
  816. /* compare failed */
  817. 30: li r3, 0
  818. blr
  819. 2: slwi r0,r0,2 /* re copy in reverse order ... y do we needed it? */
  820. add r8,r4,r0
  821. add r7,r3,r0
  822. 3: lwzu r0,-4(r8)
  823. stwu r0,-4(r7)
  824. bdnz 3b
  825. /*
  826. * Now flush the cache: note that we must start from a cache aligned
  827. * address. Otherwise we might miss one cache line.
  828. */
  829. 4: cmpwi r6,0
  830. add r5,r3,r5
  831. beq 7f /* Always flush prefetch queue in any case */
  832. subi r0,r6,1
  833. andc r3,r3,r0
  834. mr r4,r3
  835. 5: dcbst 0,r4
  836. add r4,r4,r6
  837. cmplw r4,r5
  838. blt 5b
  839. sync /* Wait for all dcbst to complete on bus */
  840. mr r4,r3
  841. 6: icbi 0,r4
  842. add r4,r4,r6
  843. cmplw r4,r5
  844. blt 6b
  845. 7: sync /* Wait for all icbi to complete on bus */
  846. isync
  847. /*
  848. * We are done. Do not return, instead branch to second part of board
  849. * initialization, now running from RAM.
  850. */
  851. addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
  852. mtlr r0
  853. blr
  854. in_ram:
  855. /*
  856. * Relocation Function, r14 point to got2+0x8000
  857. *
  858. * Adjust got2 pointers, no need to check for 0, this code
  859. * already puts a few entries in the table.
  860. */
  861. li r0,__got2_entries@sectoff@l
  862. la r3,GOT(_GOT2_TABLE_)
  863. lwz r11,GOT(_GOT2_TABLE_)
  864. mtctr r0
  865. sub r11,r3,r11
  866. addi r3,r3,-4
  867. 1: lwzu r0,4(r3)
  868. add r0,r0,r11
  869. stw r0,0(r3)
  870. bdnz 1b
  871. /*
  872. * Now adjust the fixups and the pointers to the fixups
  873. * in case we need to move ourselves again.
  874. */
  875. 2: li r0,__fixup_entries@sectoff@l
  876. lwz r3,GOT(_FIXUP_TABLE_)
  877. cmpwi r0,0
  878. mtctr r0
  879. addi r3,r3,-4
  880. beq 4f
  881. 3: lwzu r4,4(r3)
  882. lwzux r0,r4,r11
  883. add r0,r0,r11
  884. stw r10,0(r3)
  885. stw r0,0(r4)
  886. bdnz 3b
  887. 4:
  888. clear_bss:
  889. /*
  890. * Now clear BSS segment
  891. */
  892. lwz r3,GOT(__bss_start)
  893. #if defined(CONFIG_HYMOD)
  894. /*
  895. * For HYMOD - the environment is the very last item in flash.
  896. * The real .bss stops just before environment starts, so only
  897. * clear up to that point.
  898. *
  899. * taken from mods for FADS board
  900. */
  901. lwz r4,GOT(environment)
  902. #else
  903. lwz r4,GOT(_end)
  904. #endif
  905. cmplw 0, r3, r4
  906. beq 6f
  907. li r0, 0
  908. 5:
  909. stw r0, 0(r3)
  910. addi r3, r3, 4
  911. cmplw 0, r3, r4
  912. bne 5b
  913. 6:
  914. mr r3, r9 /* Global Data pointer */
  915. mr r4, r10 /* Destination Address */
  916. bl board_init_r
  917. /*
  918. * Copy exception vector code to low memory
  919. *
  920. * r3: dest_addr
  921. * r7: source address, r8: end address, r9: target address
  922. */
  923. .globl trap_init
  924. trap_init:
  925. lwz r7, GOT(_start)
  926. lwz r8, GOT(_end_of_vectors)
  927. li r9, 0x100 /* reset vector always at 0x100 */
  928. cmplw 0, r7, r8
  929. bgelr /* return if r7>=r8 - just in case */
  930. mflr r4 /* save link register */
  931. 1:
  932. lwz r0, 0(r7)
  933. stw r0, 0(r9)
  934. addi r7, r7, 4
  935. addi r9, r9, 4
  936. cmplw 0, r7, r8
  937. bne 1b
  938. /*
  939. * relocate `hdlr' and `int_return' entries
  940. */
  941. li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
  942. li r8, Alignment - _start + EXC_OFF_SYS_RESET
  943. 2:
  944. bl trap_reloc
  945. addi r7, r7, 0x100 /* next exception vector */
  946. cmplw 0, r7, r8
  947. blt 2b
  948. li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
  949. bl trap_reloc
  950. li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
  951. bl trap_reloc
  952. li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
  953. li r8, SystemCall - _start + EXC_OFF_SYS_RESET
  954. 3:
  955. bl trap_reloc
  956. addi r7, r7, 0x100 /* next exception vector */
  957. cmplw 0, r7, r8
  958. blt 3b
  959. li r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
  960. li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
  961. 4:
  962. bl trap_reloc
  963. addi r7, r7, 0x100 /* next exception vector */
  964. cmplw 0, r7, r8
  965. blt 4b
  966. mfmsr r3 /* now that the vectors have */
  967. lis r7, MSR_IP@h /* relocated into low memory */
  968. ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
  969. andc r3, r3, r7 /* (if it was on) */
  970. SYNC /* Some chip revs need this... */
  971. mtmsr r3
  972. SYNC
  973. mtlr r4 /* restore link register */
  974. blr
  975. /*
  976. * Function: relocate entries for one exception vector
  977. */
  978. trap_reloc:
  979. lwz r0, 0(r7) /* hdlr ... */
  980. add r0, r0, r3 /* ... += dest_addr */
  981. stw r0, 0(r7)
  982. lwz r0, 4(r7) /* int_return ... */
  983. add r0, r0, r3 /* ... += dest_addr */
  984. stw r0, 4(r7)
  985. blr
  986. #ifdef CFG_INIT_RAM_LOCK
  987. lock_ram_in_cache:
  988. /* Allocate Initial RAM in data cache.
  989. */
  990. lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
  991. ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
  992. li r2, ((CFG_INIT_RAM_END & ~31) + \
  993. (CFG_INIT_RAM_ADDR & 31) + 31) / 32
  994. mtctr r2
  995. 1:
  996. dcbz r0, r3
  997. addi r3, r3, 32
  998. bdnz 1b
  999. /* Lock the data cache */
  1000. mfspr r0, HID0
  1001. ori r0, r0, 0x1000
  1002. sync
  1003. mtspr HID0, r0
  1004. sync
  1005. blr
  1006. .globl unlock_ram_in_cache
  1007. unlock_ram_in_cache:
  1008. /* invalidate the INIT_RAM section */
  1009. lis r3, (CFG_INIT_RAM_ADDR & ~31)@h
  1010. ori r3, r3, (CFG_INIT_RAM_ADDR & ~31)@l
  1011. li r2,512
  1012. mtctr r2
  1013. 1: icbi r0, r3
  1014. dcbi r0, r3
  1015. addi r3, r3, 32
  1016. bdnz 1b
  1017. sync /* Wait for all icbi to complete on bus */
  1018. isync
  1019. /* Unlock the data cache and invalidate it */
  1020. mfspr r3, HID0
  1021. li r5, HID0_DLOCK|HID0_DCFI
  1022. andc r3, r3, r5 /* no invalidate, unlock */
  1023. ori r5, r3, HID0_DCFI /* invalidate, unlock */
  1024. mtspr HID0, r5 /* invalidate, unlock */
  1025. mtspr HID0, r3 /* no invalidate, unlock */
  1026. sync
  1027. blr
  1028. #endif
  1029. map_flash_by_law1:
  1030. /* When booting from ROM (Flash or EPROM), clear the */
  1031. /* Address Mask in OR0 so ROM appears everywhere */
  1032. /*----------------------------------------------------*/
  1033. lis r3, (CFG_IMMR)@h /* r3 <= CFG_IMMR */
  1034. lwz r4, OR0@l(r3)
  1035. li r5, 0x7fff /* r5 <= 0x00007FFFF */
  1036. and r4, r4, r5
  1037. stw r4, OR0@l(r3) /* OR0 <= OR0 & 0x00007FFFF */
  1038. /* As MPC8349E User's Manual presented, when RCW[BMS] is set to 0,
  1039. * system will boot from 0x0000_0100, and the LBLAWBAR0[BASE_ADDR]
  1040. * reset value is 0x00000; when RCW[BMS] is set to 1, system will boot
  1041. * from 0xFFF0_0100, and the LBLAWBAR0[BASE_ADDR] reset value is
  1042. * 0xFF800. From the hard resetting to here, the processor fetched and
  1043. * executed the instructions one by one. There is not absolutely
  1044. * jumping happened. Laterly, the u-boot code has to do an absolutely
  1045. * jumping to tell the CPU instruction fetching component what the
  1046. * u-boot TEXT base address is. Because the TEXT base resides in the
  1047. * boot ROM memory space, to garantee the code can run smoothly after
  1048. * that jumping, we must map in the entire boot ROM by Local Access
  1049. * Window. Sometimes, we desire an non-0x00000 or non-0xFF800 starting
  1050. * address for boot ROM, such as 0xFE000000. In this case, the default
  1051. * LBIU Local Access Widow 0 will not cover this memory space. So, we
  1052. * need another window to map in it.
  1053. */
  1054. lis r4, (CFG_FLASH_BASE)@h
  1055. ori r4, r4, (CFG_FLASH_BASE)@l
  1056. stw r4, LBLAWBAR1(r3) /* LBLAWBAR1 <= CFG_FLASH_BASE */
  1057. /* Store 0x80000012 + log2(CFG_FLASH_SIZE) into LBLAWAR1 */
  1058. lis r4, (0x80000012)@h
  1059. ori r4, r4, (0x80000012)@l
  1060. li r5, CFG_FLASH_SIZE
  1061. 1: srawi. r5, r5, 1 /* r5 = r5 >> 1 */
  1062. addi r4, r4, 1
  1063. bne 1b
  1064. stw r4, LBLAWAR1(r3) /* LBLAWAR1 <= 8MB Flash Size */
  1065. blr
  1066. /* Though all the LBIU Local Access Windows and LBC Banks will be
  1067. * initialized in the C code, we'd better configure boot ROM's
  1068. * window 0 and bank 0 correctly at here.
  1069. */
  1070. remap_flash_by_law0:
  1071. /* Initialize the BR0 with the boot ROM starting address. */
  1072. lwz r4, BR0(r3)
  1073. li r5, 0x7FFF
  1074. and r4, r4, r5
  1075. lis r5, (CFG_FLASH_BASE & 0xFFFF8000)@h
  1076. ori r5, r5, (CFG_FLASH_BASE & 0xFFFF8000)@l
  1077. or r5, r5, r4
  1078. stw r5, BR0(r3) /* r5 <= (CFG_FLASH_BASE & 0xFFFF8000) | (BR0 & 0x00007FFF) */
  1079. lwz r4, OR0(r3)
  1080. lis r5, ~((CFG_FLASH_SIZE << 4) - 1)
  1081. or r4, r4, r5
  1082. stw r4, OR0(r3)
  1083. lis r4, (CFG_FLASH_BASE)@h
  1084. ori r4, r4, (CFG_FLASH_BASE)@l
  1085. stw r4, LBLAWBAR0(r3) /* LBLAWBAR0 <= CFG_FLASH_BASE */
  1086. /* Store 0x80000012 + log2(CFG_FLASH_SIZE) into LBLAWAR0 */
  1087. lis r4, (0x80000012)@h
  1088. ori r4, r4, (0x80000012)@l
  1089. li r5, CFG_FLASH_SIZE
  1090. 1: srawi. r5, r5, 1 /* r5 = r5 >> 1 */
  1091. addi r4, r4, 1
  1092. bne 1b
  1093. stw r4, LBLAWAR0(r3) /* LBLAWAR0 <= Flash Size */
  1094. xor r4, r4, r4
  1095. stw r4, LBLAWBAR1(r3)
  1096. stw r4, LBLAWAR1(r3) /* Off LBIU LAW1 */
  1097. blr