head_44x.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * Kernel execution entry point code.
  3. *
  4. * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
  5. * Initial PowerPC version.
  6. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
  7. * Rewritten for PReP
  8. * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  9. * Low-level exception handers, MMU support, and rewrite.
  10. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  11. * PowerPC 8xx modifications.
  12. * Copyright (c) 1998-1999 TiVo, Inc.
  13. * PowerPC 403GCX modifications.
  14. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  15. * PowerPC 403GCX/405GP modifications.
  16. * Copyright 2000 MontaVista Software Inc.
  17. * PPC405 modifications
  18. * PowerPC 403GCX/405GP modifications.
  19. * Author: MontaVista Software, Inc.
  20. * frank_rowand@mvista.com or source@mvista.com
  21. * debbie_chu@mvista.com
  22. * Copyright 2002-2005 MontaVista Software, Inc.
  23. * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
  24. *
  25. * This program is free software; you can redistribute it and/or modify it
  26. * under the terms of the GNU General Public License as published by the
  27. * Free Software Foundation; either version 2 of the License, or (at your
  28. * option) any later version.
  29. */
  30. #include <linux/init.h>
  31. #include <asm/processor.h>
  32. #include <asm/page.h>
  33. #include <asm/mmu.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cputable.h>
  36. #include <asm/thread_info.h>
  37. #include <asm/ppc_asm.h>
  38. #include <asm/asm-offsets.h>
  39. #include "head_booke.h"
  40. /* As with the other PowerPC ports, it is expected that when code
  41. * execution begins here, the following registers contain valid, yet
  42. * optional, information:
  43. *
  44. * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
  45. * r4 - Starting address of the init RAM disk
  46. * r5 - Ending address of the init RAM disk
  47. * r6 - Start of kernel command line string (e.g. "mem=128")
  48. * r7 - End of kernel command line string
  49. *
  50. */
  51. __HEAD
  52. _ENTRY(_stext);
  53. _ENTRY(_start);
  54. /*
  55. * Reserve a word at a fixed location to store the address
  56. * of abatron_pteptrs
  57. */
  58. nop
  59. /*
  60. * Save parameters we are passed
  61. */
  62. mr r31,r3
  63. mr r30,r4
  64. mr r29,r5
  65. mr r28,r6
  66. mr r27,r7
  67. li r24,0 /* CPU number */
  68. bl init_cpu_state
  69. /*
  70. * This is where the main kernel code starts.
  71. */
  72. /* ptr to current */
  73. lis r2,init_task@h
  74. ori r2,r2,init_task@l
  75. /* ptr to current thread */
  76. addi r4,r2,THREAD /* init task's THREAD */
  77. mtspr SPRN_SPRG_THREAD,r4
  78. /* stack */
  79. lis r1,init_thread_union@h
  80. ori r1,r1,init_thread_union@l
  81. li r0,0
  82. stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
  83. bl early_init
  84. /*
  85. * Decide what sort of machine this is and initialize the MMU.
  86. */
  87. mr r3,r31
  88. mr r4,r30
  89. mr r5,r29
  90. mr r6,r28
  91. mr r7,r27
  92. bl machine_init
  93. bl MMU_init
  94. /* Setup PTE pointers for the Abatron bdiGDB */
  95. lis r6, swapper_pg_dir@h
  96. ori r6, r6, swapper_pg_dir@l
  97. lis r5, abatron_pteptrs@h
  98. ori r5, r5, abatron_pteptrs@l
  99. lis r4, KERNELBASE@h
  100. ori r4, r4, KERNELBASE@l
  101. stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
  102. stw r6, 0(r5)
  103. /* Let's move on */
  104. lis r4,start_kernel@h
  105. ori r4,r4,start_kernel@l
  106. lis r3,MSR_KERNEL@h
  107. ori r3,r3,MSR_KERNEL@l
  108. mtspr SPRN_SRR0,r4
  109. mtspr SPRN_SRR1,r3
  110. rfi /* change context and jump to start_kernel */
  111. /*
  112. * Interrupt vector entry code
  113. *
  114. * The Book E MMUs are always on so we don't need to handle
  115. * interrupts in real mode as with previous PPC processors. In
  116. * this case we handle interrupts in the kernel virtual address
  117. * space.
  118. *
  119. * Interrupt vectors are dynamically placed relative to the
  120. * interrupt prefix as determined by the address of interrupt_base.
  121. * The interrupt vectors offsets are programmed using the labels
  122. * for each interrupt vector entry.
  123. *
  124. * Interrupt vectors must be aligned on a 16 byte boundary.
  125. * We align on a 32 byte cache line boundary for good measure.
  126. */
  127. interrupt_base:
  128. /* Critical Input Interrupt */
  129. CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
  130. /* Machine Check Interrupt */
  131. CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
  132. MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
  133. /* Data Storage Interrupt */
  134. DATA_STORAGE_EXCEPTION
  135. /* Instruction Storage Interrupt */
  136. INSTRUCTION_STORAGE_EXCEPTION
  137. /* External Input Interrupt */
  138. EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
  139. /* Alignment Interrupt */
  140. ALIGNMENT_EXCEPTION
  141. /* Program Interrupt */
  142. PROGRAM_EXCEPTION
  143. /* Floating Point Unavailable Interrupt */
  144. #ifdef CONFIG_PPC_FPU
  145. FP_UNAVAILABLE_EXCEPTION
  146. #else
  147. EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
  148. #endif
  149. /* System Call Interrupt */
  150. START_EXCEPTION(SystemCall)
  151. NORMAL_EXCEPTION_PROLOG
  152. EXC_XFER_EE_LITE(0x0c00, DoSyscall)
  153. /* Auxillary Processor Unavailable Interrupt */
  154. EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
  155. /* Decrementer Interrupt */
  156. DECREMENTER_EXCEPTION
  157. /* Fixed Internal Timer Interrupt */
  158. /* TODO: Add FIT support */
  159. EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
  160. /* Watchdog Timer Interrupt */
  161. /* TODO: Add watchdog support */
  162. #ifdef CONFIG_BOOKE_WDT
  163. CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
  164. #else
  165. CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
  166. #endif
  167. /* Data TLB Error Interrupt */
  168. START_EXCEPTION(DataTLBError)
  169. mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
  170. mtspr SPRN_SPRG_WSCRATCH1, r11
  171. mtspr SPRN_SPRG_WSCRATCH2, r12
  172. mtspr SPRN_SPRG_WSCRATCH3, r13
  173. mfcr r11
  174. mtspr SPRN_SPRG_WSCRATCH4, r11
  175. mfspr r10, SPRN_DEAR /* Get faulting address */
  176. /* If we are faulting a kernel address, we have to use the
  177. * kernel page tables.
  178. */
  179. lis r11, PAGE_OFFSET@h
  180. cmplw r10, r11
  181. blt+ 3f
  182. lis r11, swapper_pg_dir@h
  183. ori r11, r11, swapper_pg_dir@l
  184. mfspr r12,SPRN_MMUCR
  185. rlwinm r12,r12,0,0,23 /* Clear TID */
  186. b 4f
  187. /* Get the PGD for the current thread */
  188. 3:
  189. mfspr r11,SPRN_SPRG_THREAD
  190. lwz r11,PGDIR(r11)
  191. /* Load PID into MMUCR TID */
  192. mfspr r12,SPRN_MMUCR
  193. mfspr r13,SPRN_PID /* Get PID */
  194. rlwimi r12,r13,0,24,31 /* Set TID */
  195. 4:
  196. mtspr SPRN_MMUCR,r12
  197. /* Mask of required permission bits. Note that while we
  198. * do copy ESR:ST to _PAGE_RW position as trying to write
  199. * to an RO page is pretty common, we don't do it with
  200. * _PAGE_DIRTY. We could do it, but it's a fairly rare
  201. * event so I'd rather take the overhead when it happens
  202. * rather than adding an instruction here. We should measure
  203. * whether the whole thing is worth it in the first place
  204. * as we could avoid loading SPRN_ESR completely in the first
  205. * place...
  206. *
  207. * TODO: Is it worth doing that mfspr & rlwimi in the first
  208. * place or can we save a couple of instructions here ?
  209. */
  210. mfspr r12,SPRN_ESR
  211. li r13,_PAGE_PRESENT|_PAGE_ACCESSED
  212. rlwimi r13,r12,10,30,30
  213. /* Load the PTE */
  214. /* Compute pgdir/pmd offset */
  215. rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
  216. lwzx r11, r12, r11 /* Get pgd/pmd entry */
  217. rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
  218. beq 2f /* Bail if no table */
  219. /* Compute pte address */
  220. rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
  221. lwz r11, 0(r12) /* Get high word of pte entry */
  222. lwz r12, 4(r12) /* Get low word of pte entry */
  223. lis r10,tlb_44x_index@ha
  224. andc. r13,r13,r12 /* Check permission */
  225. /* Load the next available TLB index */
  226. lwz r13,tlb_44x_index@l(r10)
  227. bne 2f /* Bail if permission mismach */
  228. /* Increment, rollover, and store TLB index */
  229. addi r13,r13,1
  230. /* Compare with watermark (instruction gets patched) */
  231. .globl tlb_44x_patch_hwater_D
  232. tlb_44x_patch_hwater_D:
  233. cmpwi 0,r13,1 /* reserve entries */
  234. ble 5f
  235. li r13,0
  236. 5:
  237. /* Store the next available TLB index */
  238. stw r13,tlb_44x_index@l(r10)
  239. /* Re-load the faulting address */
  240. mfspr r10,SPRN_DEAR
  241. /* Jump to common tlb load */
  242. b finish_tlb_load
  243. 2:
  244. /* The bailout. Restore registers to pre-exception conditions
  245. * and call the heavyweights to help us out.
  246. */
  247. mfspr r11, SPRN_SPRG_RSCRATCH4
  248. mtcr r11
  249. mfspr r13, SPRN_SPRG_RSCRATCH3
  250. mfspr r12, SPRN_SPRG_RSCRATCH2
  251. mfspr r11, SPRN_SPRG_RSCRATCH1
  252. mfspr r10, SPRN_SPRG_RSCRATCH0
  253. b DataStorage
  254. /* Instruction TLB Error Interrupt */
  255. /*
  256. * Nearly the same as above, except we get our
  257. * information from different registers and bailout
  258. * to a different point.
  259. */
  260. START_EXCEPTION(InstructionTLBError)
  261. mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
  262. mtspr SPRN_SPRG_WSCRATCH1, r11
  263. mtspr SPRN_SPRG_WSCRATCH2, r12
  264. mtspr SPRN_SPRG_WSCRATCH3, r13
  265. mfcr r11
  266. mtspr SPRN_SPRG_WSCRATCH4, r11
  267. mfspr r10, SPRN_SRR0 /* Get faulting address */
  268. /* If we are faulting a kernel address, we have to use the
  269. * kernel page tables.
  270. */
  271. lis r11, PAGE_OFFSET@h
  272. cmplw r10, r11
  273. blt+ 3f
  274. lis r11, swapper_pg_dir@h
  275. ori r11, r11, swapper_pg_dir@l
  276. mfspr r12,SPRN_MMUCR
  277. rlwinm r12,r12,0,0,23 /* Clear TID */
  278. b 4f
  279. /* Get the PGD for the current thread */
  280. 3:
  281. mfspr r11,SPRN_SPRG_THREAD
  282. lwz r11,PGDIR(r11)
  283. /* Load PID into MMUCR TID */
  284. mfspr r12,SPRN_MMUCR
  285. mfspr r13,SPRN_PID /* Get PID */
  286. rlwimi r12,r13,0,24,31 /* Set TID */
  287. 4:
  288. mtspr SPRN_MMUCR,r12
  289. /* Make up the required permissions */
  290. li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
  291. /* Compute pgdir/pmd offset */
  292. rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
  293. lwzx r11, r12, r11 /* Get pgd/pmd entry */
  294. rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
  295. beq 2f /* Bail if no table */
  296. /* Compute pte address */
  297. rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
  298. lwz r11, 0(r12) /* Get high word of pte entry */
  299. lwz r12, 4(r12) /* Get low word of pte entry */
  300. lis r10,tlb_44x_index@ha
  301. andc. r13,r13,r12 /* Check permission */
  302. /* Load the next available TLB index */
  303. lwz r13,tlb_44x_index@l(r10)
  304. bne 2f /* Bail if permission mismach */
  305. /* Increment, rollover, and store TLB index */
  306. addi r13,r13,1
  307. /* Compare with watermark (instruction gets patched) */
  308. .globl tlb_44x_patch_hwater_I
  309. tlb_44x_patch_hwater_I:
  310. cmpwi 0,r13,1 /* reserve entries */
  311. ble 5f
  312. li r13,0
  313. 5:
  314. /* Store the next available TLB index */
  315. stw r13,tlb_44x_index@l(r10)
  316. /* Re-load the faulting address */
  317. mfspr r10,SPRN_SRR0
  318. /* Jump to common TLB load point */
  319. b finish_tlb_load
  320. 2:
  321. /* The bailout. Restore registers to pre-exception conditions
  322. * and call the heavyweights to help us out.
  323. */
  324. mfspr r11, SPRN_SPRG_RSCRATCH4
  325. mtcr r11
  326. mfspr r13, SPRN_SPRG_RSCRATCH3
  327. mfspr r12, SPRN_SPRG_RSCRATCH2
  328. mfspr r11, SPRN_SPRG_RSCRATCH1
  329. mfspr r10, SPRN_SPRG_RSCRATCH0
  330. b InstructionStorage
  331. /* Debug Interrupt */
  332. DEBUG_CRIT_EXCEPTION
  333. /*
  334. * Local functions
  335. */
  336. /*
  337. * Both the instruction and data TLB miss get to this
  338. * point to load the TLB.
  339. * r10 - EA of fault
  340. * r11 - PTE high word value
  341. * r12 - PTE low word value
  342. * r13 - TLB index
  343. * MMUCR - loaded with proper value when we get here
  344. * Upon exit, we reload everything and RFI.
  345. */
  346. finish_tlb_load:
  347. /* Combine RPN & ERPN an write WS 0 */
  348. rlwimi r11,r12,0,0,31-PAGE_SHIFT
  349. tlbwe r11,r13,PPC44x_TLB_XLAT
  350. /*
  351. * Create WS1. This is the faulting address (EPN),
  352. * page size, and valid flag.
  353. */
  354. li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
  355. /* Insert valid and page size */
  356. rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
  357. tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
  358. /* And WS 2 */
  359. li r10,0xf85 /* Mask to apply from PTE */
  360. rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
  361. and r11,r12,r10 /* Mask PTE bits to keep */
  362. andi. r10,r12,_PAGE_USER /* User page ? */
  363. beq 1f /* nope, leave U bits empty */
  364. rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
  365. 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
  366. /* Done...restore registers and get out of here.
  367. */
  368. mfspr r11, SPRN_SPRG_RSCRATCH4
  369. mtcr r11
  370. mfspr r13, SPRN_SPRG_RSCRATCH3
  371. mfspr r12, SPRN_SPRG_RSCRATCH2
  372. mfspr r11, SPRN_SPRG_RSCRATCH1
  373. mfspr r10, SPRN_SPRG_RSCRATCH0
  374. rfi /* Force context change */
  375. /*
  376. * Global functions
  377. */
  378. /*
  379. * Adjust the machine check IVOR on 440A cores
  380. */
  381. _GLOBAL(__fixup_440A_mcheck)
  382. li r3,MachineCheckA@l
  383. mtspr SPRN_IVOR1,r3
  384. sync
  385. blr
  386. /*
  387. * extern void giveup_altivec(struct task_struct *prev)
  388. *
  389. * The 44x core does not have an AltiVec unit.
  390. */
  391. _GLOBAL(giveup_altivec)
  392. blr
  393. /*
  394. * extern void giveup_fpu(struct task_struct *prev)
  395. *
  396. * The 44x core does not have an FPU.
  397. */
  398. #ifndef CONFIG_PPC_FPU
  399. _GLOBAL(giveup_fpu)
  400. blr
  401. #endif
  402. _GLOBAL(set_context)
  403. #ifdef CONFIG_BDI_SWITCH
  404. /* Context switch the PTE pointer for the Abatron BDI2000.
  405. * The PGDIR is the second parameter.
  406. */
  407. lis r5, abatron_pteptrs@h
  408. ori r5, r5, abatron_pteptrs@l
  409. stw r4, 0x4(r5)
  410. #endif
  411. mtspr SPRN_PID,r3
  412. isync /* Force context change */
  413. blr
  414. /*
  415. * Init CPU state. This is called at boot time or for secondary CPUs
  416. * to setup initial TLB entries, setup IVORs, etc...
  417. */
  418. _GLOBAL(init_cpu_state)
  419. mflr r22
  420. /*
  421. * In case the firmware didn't do it, we apply some workarounds
  422. * that are good for all 440 core variants here
  423. */
  424. mfspr r3,SPRN_CCR0
  425. rlwinm r3,r3,0,0,27 /* disable icache prefetch */
  426. isync
  427. mtspr SPRN_CCR0,r3
  428. isync
  429. sync
  430. /*
  431. * Set up the initial MMU state
  432. *
  433. * We are still executing code at the virtual address
  434. * mappings set by the firmware for the base of RAM.
  435. *
  436. * We first invalidate all TLB entries but the one
  437. * we are running from. We then load the KERNELBASE
  438. * mappings so we can begin to use kernel addresses
  439. * natively and so the interrupt vector locations are
  440. * permanently pinned (necessary since Book E
  441. * implementations always have translation enabled).
  442. *
  443. * TODO: Use the known TLB entry we are running from to
  444. * determine which physical region we are located
  445. * in. This can be used to determine where in RAM
  446. * (on a shared CPU system) or PCI memory space
  447. * (on a DRAMless system) we are located.
  448. * For now, we assume a perfect world which means
  449. * we are located at the base of DRAM (physical 0).
  450. */
  451. /*
  452. * Search TLB for entry that we are currently using.
  453. * Invalidate all entries but the one we are using.
  454. */
  455. /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
  456. mfspr r3,SPRN_PID /* Get PID */
  457. mfmsr r4 /* Get MSR */
  458. andi. r4,r4,MSR_IS@l /* TS=1? */
  459. beq wmmucr /* If not, leave STS=0 */
  460. oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
  461. wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
  462. sync
  463. bl invstr /* Find our address */
  464. invstr: mflr r5 /* Make it accessible */
  465. tlbsx r23,0,r5 /* Find entry we are in */
  466. li r4,0 /* Start at TLB entry 0 */
  467. li r3,0 /* Set PAGEID inval value */
  468. 1: cmpw r23,r4 /* Is this our entry? */
  469. beq skpinv /* If so, skip the inval */
  470. tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
  471. skpinv: addi r4,r4,1 /* Increment */
  472. cmpwi r4,64 /* Are we done? */
  473. bne 1b /* If not, repeat */
  474. isync /* If so, context change */
  475. /*
  476. * Configure and load pinned entry into TLB slot 63.
  477. */
  478. lis r3,PAGE_OFFSET@h
  479. ori r3,r3,PAGE_OFFSET@l
  480. /* Kernel is at the base of RAM */
  481. li r4, 0 /* Load the kernel physical address */
  482. /* Load the kernel PID = 0 */
  483. li r0,0
  484. mtspr SPRN_PID,r0
  485. sync
  486. /* Initialize MMUCR */
  487. li r5,0
  488. mtspr SPRN_MMUCR,r5
  489. sync
  490. /* pageid fields */
  491. clrrwi r3,r3,10 /* Mask off the effective page number */
  492. ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
  493. /* xlat fields */
  494. clrrwi r4,r4,10 /* Mask off the real page number */
  495. /* ERPN is 0 for first 4GB page */
  496. /* attrib fields */
  497. /* Added guarded bit to protect against speculative loads/stores */
  498. li r5,0
  499. ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
  500. li r0,63 /* TLB slot 63 */
  501. tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
  502. tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
  503. tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
  504. /* Force context change */
  505. mfmsr r0
  506. mtspr SPRN_SRR1, r0
  507. lis r0,3f@h
  508. ori r0,r0,3f@l
  509. mtspr SPRN_SRR0,r0
  510. sync
  511. rfi
  512. /* If necessary, invalidate original entry we used */
  513. 3: cmpwi r23,63
  514. beq 4f
  515. li r6,0
  516. tlbwe r6,r23,PPC44x_TLB_PAGEID
  517. isync
  518. 4:
  519. #ifdef CONFIG_PPC_EARLY_DEBUG_44x
  520. /* Add UART mapping for early debug. */
  521. /* pageid fields */
  522. lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
  523. ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
  524. /* xlat fields */
  525. lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
  526. ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
  527. /* attrib fields */
  528. li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
  529. li r0,62 /* TLB slot 0 */
  530. tlbwe r3,r0,PPC44x_TLB_PAGEID
  531. tlbwe r4,r0,PPC44x_TLB_XLAT
  532. tlbwe r5,r0,PPC44x_TLB_ATTRIB
  533. /* Force context change */
  534. isync
  535. #endif /* CONFIG_PPC_EARLY_DEBUG_44x */
  536. /* Establish the interrupt vector offsets */
  537. SET_IVOR(0, CriticalInput);
  538. SET_IVOR(1, MachineCheck);
  539. SET_IVOR(2, DataStorage);
  540. SET_IVOR(3, InstructionStorage);
  541. SET_IVOR(4, ExternalInput);
  542. SET_IVOR(5, Alignment);
  543. SET_IVOR(6, Program);
  544. SET_IVOR(7, FloatingPointUnavailable);
  545. SET_IVOR(8, SystemCall);
  546. SET_IVOR(9, AuxillaryProcessorUnavailable);
  547. SET_IVOR(10, Decrementer);
  548. SET_IVOR(11, FixedIntervalTimer);
  549. SET_IVOR(12, WatchdogTimer);
  550. SET_IVOR(13, DataTLBError);
  551. SET_IVOR(14, InstructionTLBError);
  552. SET_IVOR(15, DebugCrit);
  553. /* Establish the interrupt vector base */
  554. lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
  555. mtspr SPRN_IVPR,r4
  556. addis r22,r22,KERNELBASE@h
  557. mtlr r22
  558. blr
  559. /*
  560. * We put a few things here that have to be page-aligned. This stuff
  561. * goes at the beginning of the data segment, which is page-aligned.
  562. */
  563. .data
  564. .align PAGE_SHIFT
  565. .globl sdata
  566. sdata:
  567. .globl empty_zero_page
  568. empty_zero_page:
  569. .space PAGE_SIZE
  570. /*
  571. * To support >32-bit physical addresses, we use an 8KB pgdir.
  572. */
  573. .globl swapper_pg_dir
  574. swapper_pg_dir:
  575. .space PGD_TABLE_SIZE
  576. /*
  577. * Room for two PTE pointers, usually the kernel and current user pointers
  578. * to their respective root page table.
  579. */
  580. abatron_pteptrs:
  581. .space 8