head_44x.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * Kernel execution entry point code.
  3. *
  4. * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
  5. * Initial PowerPC version.
  6. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
  7. * Rewritten for PReP
  8. * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  9. * Low-level exception handers, MMU support, and rewrite.
  10. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  11. * PowerPC 8xx modifications.
  12. * Copyright (c) 1998-1999 TiVo, Inc.
  13. * PowerPC 403GCX modifications.
  14. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  15. * PowerPC 403GCX/405GP modifications.
  16. * Copyright 2000 MontaVista Software Inc.
  17. * PPC405 modifications
  18. * PowerPC 403GCX/405GP modifications.
  19. * Author: MontaVista Software, Inc.
  20. * frank_rowand@mvista.com or source@mvista.com
  21. * debbie_chu@mvista.com
  22. * Copyright 2002-2005 MontaVista Software, Inc.
  23. * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
  24. *
  25. * This program is free software; you can redistribute it and/or modify it
  26. * under the terms of the GNU General Public License as published by the
  27. * Free Software Foundation; either version 2 of the License, or (at your
  28. * option) any later version.
  29. */
  30. #include <asm/processor.h>
  31. #include <asm/page.h>
  32. #include <asm/mmu.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/cputable.h>
  35. #include <asm/thread_info.h>
  36. #include <asm/ppc_asm.h>
  37. #include <asm/asm-offsets.h>
  38. #include "head_booke.h"
  39. /* As with the other PowerPC ports, it is expected that when code
  40. * execution begins here, the following registers contain valid, yet
  41. * optional, information:
  42. *
  43. * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
  44. * r4 - Starting address of the init RAM disk
  45. * r5 - Ending address of the init RAM disk
  46. * r6 - Start of kernel command line string (e.g. "mem=128")
  47. * r7 - End of kernel command line string
  48. *
  49. */
  50. .section .text.head, "ax"
  51. _ENTRY(_stext);
  52. _ENTRY(_start);
  53. /*
  54. * Reserve a word at a fixed location to store the address
  55. * of abatron_pteptrs
  56. */
  57. nop
  58. /*
  59. * Save parameters we are passed
  60. */
  61. mr r31,r3
  62. mr r30,r4
  63. mr r29,r5
  64. mr r28,r6
  65. mr r27,r7
  66. li r24,0 /* CPU number */
  67. /*
  68. * In case the firmware didn't do it, we apply some workarounds
  69. * that are good for all 440 core variants here
  70. */
  71. mfspr r3,SPRN_CCR0
  72. rlwinm r3,r3,0,0,27 /* disable icache prefetch */
  73. isync
  74. mtspr SPRN_CCR0,r3
  75. isync
  76. sync
  77. /*
  78. * Set up the initial MMU state
  79. *
  80. * We are still executing code at the virtual address
  81. * mappings set by the firmware for the base of RAM.
  82. *
  83. * We first invalidate all TLB entries but the one
  84. * we are running from. We then load the KERNELBASE
  85. * mappings so we can begin to use kernel addresses
  86. * natively and so the interrupt vector locations are
  87. * permanently pinned (necessary since Book E
  88. * implementations always have translation enabled).
  89. *
  90. * TODO: Use the known TLB entry we are running from to
  91. * determine which physical region we are located
  92. * in. This can be used to determine where in RAM
  93. * (on a shared CPU system) or PCI memory space
  94. * (on a DRAMless system) we are located.
  95. * For now, we assume a perfect world which means
  96. * we are located at the base of DRAM (physical 0).
  97. */
  98. /*
  99. * Search TLB for entry that we are currently using.
  100. * Invalidate all entries but the one we are using.
  101. */
  102. /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
  103. mfspr r3,SPRN_PID /* Get PID */
  104. mfmsr r4 /* Get MSR */
  105. andi. r4,r4,MSR_IS@l /* TS=1? */
  106. beq wmmucr /* If not, leave STS=0 */
  107. oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
  108. wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
  109. sync
  110. bl invstr /* Find our address */
  111. invstr: mflr r5 /* Make it accessible */
  112. tlbsx r23,0,r5 /* Find entry we are in */
  113. li r4,0 /* Start at TLB entry 0 */
  114. li r3,0 /* Set PAGEID inval value */
  115. 1: cmpw r23,r4 /* Is this our entry? */
  116. beq skpinv /* If so, skip the inval */
  117. tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
  118. skpinv: addi r4,r4,1 /* Increment */
  119. cmpwi r4,64 /* Are we done? */
  120. bne 1b /* If not, repeat */
  121. isync /* If so, context change */
  122. /*
  123. * Configure and load pinned entry into TLB slot 63.
  124. */
  125. lis r3,PAGE_OFFSET@h
  126. ori r3,r3,PAGE_OFFSET@l
  127. /* Kernel is at the base of RAM */
  128. li r4, 0 /* Load the kernel physical address */
  129. /* Load the kernel PID = 0 */
  130. li r0,0
  131. mtspr SPRN_PID,r0
  132. sync
  133. /* Initialize MMUCR */
  134. li r5,0
  135. mtspr SPRN_MMUCR,r5
  136. sync
  137. /* pageid fields */
  138. clrrwi r3,r3,10 /* Mask off the effective page number */
  139. ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
  140. /* xlat fields */
  141. clrrwi r4,r4,10 /* Mask off the real page number */
  142. /* ERPN is 0 for first 4GB page */
  143. /* attrib fields */
  144. /* Added guarded bit to protect against speculative loads/stores */
  145. li r5,0
  146. ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
  147. li r0,63 /* TLB slot 63 */
  148. tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
  149. tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
  150. tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
  151. /* Force context change */
  152. mfmsr r0
  153. mtspr SPRN_SRR1, r0
  154. lis r0,3f@h
  155. ori r0,r0,3f@l
  156. mtspr SPRN_SRR0,r0
  157. sync
  158. rfi
  159. /* If necessary, invalidate original entry we used */
  160. 3: cmpwi r23,63
  161. beq 4f
  162. li r6,0
  163. tlbwe r6,r23,PPC44x_TLB_PAGEID
  164. isync
  165. 4:
  166. #ifdef CONFIG_PPC_EARLY_DEBUG_44x
  167. /* Add UART mapping for early debug. */
  168. /* pageid fields */
  169. lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
  170. ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
  171. /* xlat fields */
  172. lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
  173. ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
  174. /* attrib fields */
  175. li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
  176. li r0,62 /* TLB slot 0 */
  177. tlbwe r3,r0,PPC44x_TLB_PAGEID
  178. tlbwe r4,r0,PPC44x_TLB_XLAT
  179. tlbwe r5,r0,PPC44x_TLB_ATTRIB
  180. /* Force context change */
  181. isync
  182. #endif /* CONFIG_PPC_EARLY_DEBUG_44x */
  183. /* Establish the interrupt vector offsets */
  184. SET_IVOR(0, CriticalInput);
  185. SET_IVOR(1, MachineCheck);
  186. SET_IVOR(2, DataStorage);
  187. SET_IVOR(3, InstructionStorage);
  188. SET_IVOR(4, ExternalInput);
  189. SET_IVOR(5, Alignment);
  190. SET_IVOR(6, Program);
  191. SET_IVOR(7, FloatingPointUnavailable);
  192. SET_IVOR(8, SystemCall);
  193. SET_IVOR(9, AuxillaryProcessorUnavailable);
  194. SET_IVOR(10, Decrementer);
  195. SET_IVOR(11, FixedIntervalTimer);
  196. SET_IVOR(12, WatchdogTimer);
  197. SET_IVOR(13, DataTLBError);
  198. SET_IVOR(14, InstructionTLBError);
  199. SET_IVOR(15, DebugCrit);
  200. /* Establish the interrupt vector base */
  201. lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
  202. mtspr SPRN_IVPR,r4
  203. /*
  204. * This is where the main kernel code starts.
  205. */
  206. /* ptr to current */
  207. lis r2,init_task@h
  208. ori r2,r2,init_task@l
  209. /* ptr to current thread */
  210. addi r4,r2,THREAD /* init task's THREAD */
  211. mtspr SPRN_SPRG3,r4
  212. /* stack */
  213. lis r1,init_thread_union@h
  214. ori r1,r1,init_thread_union@l
  215. li r0,0
  216. stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
  217. bl early_init
  218. /*
  219. * Decide what sort of machine this is and initialize the MMU.
  220. */
  221. mr r3,r31
  222. mr r4,r30
  223. mr r5,r29
  224. mr r6,r28
  225. mr r7,r27
  226. bl machine_init
  227. bl MMU_init
  228. /* Setup PTE pointers for the Abatron bdiGDB */
  229. lis r6, swapper_pg_dir@h
  230. ori r6, r6, swapper_pg_dir@l
  231. lis r5, abatron_pteptrs@h
  232. ori r5, r5, abatron_pteptrs@l
  233. lis r4, KERNELBASE@h
  234. ori r4, r4, KERNELBASE@l
  235. stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
  236. stw r6, 0(r5)
  237. /* Let's move on */
  238. lis r4,start_kernel@h
  239. ori r4,r4,start_kernel@l
  240. lis r3,MSR_KERNEL@h
  241. ori r3,r3,MSR_KERNEL@l
  242. mtspr SPRN_SRR0,r4
  243. mtspr SPRN_SRR1,r3
  244. rfi /* change context and jump to start_kernel */
  245. /*
  246. * Interrupt vector entry code
  247. *
  248. * The Book E MMUs are always on so we don't need to handle
  249. * interrupts in real mode as with previous PPC processors. In
  250. * this case we handle interrupts in the kernel virtual address
  251. * space.
  252. *
  253. * Interrupt vectors are dynamically placed relative to the
  254. * interrupt prefix as determined by the address of interrupt_base.
  255. * The interrupt vectors offsets are programmed using the labels
  256. * for each interrupt vector entry.
  257. *
  258. * Interrupt vectors must be aligned on a 16 byte boundary.
  259. * We align on a 32 byte cache line boundary for good measure.
  260. */
  261. interrupt_base:
  262. /* Critical Input Interrupt */
  263. CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
  264. /* Machine Check Interrupt */
  265. CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
  266. MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
  267. /* Data Storage Interrupt */
  268. DATA_STORAGE_EXCEPTION
  269. /* Instruction Storage Interrupt */
  270. INSTRUCTION_STORAGE_EXCEPTION
  271. /* External Input Interrupt */
  272. EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
  273. /* Alignment Interrupt */
  274. ALIGNMENT_EXCEPTION
  275. /* Program Interrupt */
  276. PROGRAM_EXCEPTION
  277. /* Floating Point Unavailable Interrupt */
  278. #ifdef CONFIG_PPC_FPU
  279. FP_UNAVAILABLE_EXCEPTION
  280. #else
  281. EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
  282. #endif
  283. /* System Call Interrupt */
  284. START_EXCEPTION(SystemCall)
  285. NORMAL_EXCEPTION_PROLOG
  286. EXC_XFER_EE_LITE(0x0c00, DoSyscall)
  287. /* Auxillary Processor Unavailable Interrupt */
  288. EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
  289. /* Decrementer Interrupt */
  290. DECREMENTER_EXCEPTION
  291. /* Fixed Internal Timer Interrupt */
  292. /* TODO: Add FIT support */
  293. EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
  294. /* Watchdog Timer Interrupt */
  295. /* TODO: Add watchdog support */
  296. #ifdef CONFIG_BOOKE_WDT
  297. CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
  298. #else
  299. CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
  300. #endif
  301. /* Data TLB Error Interrupt */
  302. START_EXCEPTION(DataTLBError)
  303. mtspr SPRN_SPRG0, r10 /* Save some working registers */
  304. mtspr SPRN_SPRG1, r11
  305. mtspr SPRN_SPRG4W, r12
  306. mtspr SPRN_SPRG5W, r13
  307. mfcr r11
  308. mtspr SPRN_SPRG7W, r11
  309. mfspr r10, SPRN_DEAR /* Get faulting address */
  310. /* If we are faulting a kernel address, we have to use the
  311. * kernel page tables.
  312. */
  313. lis r11, PAGE_OFFSET@h
  314. cmplw r10, r11
  315. blt+ 3f
  316. lis r11, swapper_pg_dir@h
  317. ori r11, r11, swapper_pg_dir@l
  318. mfspr r12,SPRN_MMUCR
  319. rlwinm r12,r12,0,0,23 /* Clear TID */
  320. b 4f
  321. /* Get the PGD for the current thread */
  322. 3:
  323. mfspr r11,SPRN_SPRG3
  324. lwz r11,PGDIR(r11)
  325. /* Load PID into MMUCR TID */
  326. mfspr r12,SPRN_MMUCR
  327. mfspr r13,SPRN_PID /* Get PID */
  328. rlwimi r12,r13,0,24,31 /* Set TID */
  329. 4:
  330. mtspr SPRN_MMUCR,r12
  331. /* Mask of required permission bits. Note that while we
  332. * do copy ESR:ST to _PAGE_RW position as trying to write
  333. * to an RO page is pretty common, we don't do it with
  334. * _PAGE_DIRTY. We could do it, but it's a fairly rare
  335. * event so I'd rather take the overhead when it happens
  336. * rather than adding an instruction here. We should measure
  337. * whether the whole thing is worth it in the first place
  338. * as we could avoid loading SPRN_ESR completely in the first
  339. * place...
  340. *
  341. * TODO: Is it worth doing that mfspr & rlwimi in the first
  342. * place or can we save a couple of instructions here ?
  343. */
  344. mfspr r12,SPRN_ESR
  345. li r13,_PAGE_PRESENT|_PAGE_ACCESSED
  346. rlwimi r13,r12,10,30,30
  347. /* Load the PTE */
  348. /* Compute pgdir/pmd offset */
  349. rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
  350. lwzx r11, r12, r11 /* Get pgd/pmd entry */
  351. rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
  352. beq 2f /* Bail if no table */
  353. /* Compute pte address */
  354. rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
  355. lwz r11, 0(r12) /* Get high word of pte entry */
  356. lwz r12, 4(r12) /* Get low word of pte entry */
  357. lis r10,tlb_44x_index@ha
  358. andc. r13,r13,r12 /* Check permission */
  359. /* Load the next available TLB index */
  360. lwz r13,tlb_44x_index@l(r10)
  361. bne 2f /* Bail if permission mismach */
  362. /* Increment, rollover, and store TLB index */
  363. addi r13,r13,1
  364. /* Compare with watermark (instruction gets patched) */
  365. .globl tlb_44x_patch_hwater_D
  366. tlb_44x_patch_hwater_D:
  367. cmpwi 0,r13,1 /* reserve entries */
  368. ble 5f
  369. li r13,0
  370. 5:
  371. /* Store the next available TLB index */
  372. stw r13,tlb_44x_index@l(r10)
  373. /* Re-load the faulting address */
  374. mfspr r10,SPRN_DEAR
  375. /* Jump to common tlb load */
  376. b finish_tlb_load
  377. 2:
  378. /* The bailout. Restore registers to pre-exception conditions
  379. * and call the heavyweights to help us out.
  380. */
  381. mfspr r11, SPRN_SPRG7R
  382. mtcr r11
  383. mfspr r13, SPRN_SPRG5R
  384. mfspr r12, SPRN_SPRG4R
  385. mfspr r11, SPRN_SPRG1
  386. mfspr r10, SPRN_SPRG0
  387. b DataStorage
  388. /* Instruction TLB Error Interrupt */
  389. /*
  390. * Nearly the same as above, except we get our
  391. * information from different registers and bailout
  392. * to a different point.
  393. */
  394. START_EXCEPTION(InstructionTLBError)
  395. mtspr SPRN_SPRG0, r10 /* Save some working registers */
  396. mtspr SPRN_SPRG1, r11
  397. mtspr SPRN_SPRG4W, r12
  398. mtspr SPRN_SPRG5W, r13
  399. mfcr r11
  400. mtspr SPRN_SPRG7W, r11
  401. mfspr r10, SPRN_SRR0 /* Get faulting address */
  402. /* If we are faulting a kernel address, we have to use the
  403. * kernel page tables.
  404. */
  405. lis r11, PAGE_OFFSET@h
  406. cmplw r10, r11
  407. blt+ 3f
  408. lis r11, swapper_pg_dir@h
  409. ori r11, r11, swapper_pg_dir@l
  410. mfspr r12,SPRN_MMUCR
  411. rlwinm r12,r12,0,0,23 /* Clear TID */
  412. b 4f
  413. /* Get the PGD for the current thread */
  414. 3:
  415. mfspr r11,SPRN_SPRG3
  416. lwz r11,PGDIR(r11)
  417. /* Load PID into MMUCR TID */
  418. mfspr r12,SPRN_MMUCR
  419. mfspr r13,SPRN_PID /* Get PID */
  420. rlwimi r12,r13,0,24,31 /* Set TID */
  421. 4:
  422. mtspr SPRN_MMUCR,r12
  423. /* Make up the required permissions */
  424. li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
  425. /* Compute pgdir/pmd offset */
  426. rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
  427. lwzx r11, r12, r11 /* Get pgd/pmd entry */
  428. rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
  429. beq 2f /* Bail if no table */
  430. /* Compute pte address */
  431. rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
  432. lwz r11, 0(r12) /* Get high word of pte entry */
  433. lwz r12, 4(r12) /* Get low word of pte entry */
  434. lis r10,tlb_44x_index@ha
  435. andc. r13,r13,r12 /* Check permission */
  436. /* Load the next available TLB index */
  437. lwz r13,tlb_44x_index@l(r10)
  438. bne 2f /* Bail if permission mismach */
  439. /* Increment, rollover, and store TLB index */
  440. addi r13,r13,1
  441. /* Compare with watermark (instruction gets patched) */
  442. .globl tlb_44x_patch_hwater_I
  443. tlb_44x_patch_hwater_I:
  444. cmpwi 0,r13,1 /* reserve entries */
  445. ble 5f
  446. li r13,0
  447. 5:
  448. /* Store the next available TLB index */
  449. stw r13,tlb_44x_index@l(r10)
  450. /* Re-load the faulting address */
  451. mfspr r10,SPRN_SRR0
  452. /* Jump to common TLB load point */
  453. b finish_tlb_load
  454. 2:
  455. /* The bailout. Restore registers to pre-exception conditions
  456. * and call the heavyweights to help us out.
  457. */
  458. mfspr r11, SPRN_SPRG7R
  459. mtcr r11
  460. mfspr r13, SPRN_SPRG5R
  461. mfspr r12, SPRN_SPRG4R
  462. mfspr r11, SPRN_SPRG1
  463. mfspr r10, SPRN_SPRG0
  464. b InstructionStorage
  465. /* Debug Interrupt */
  466. DEBUG_CRIT_EXCEPTION
  467. /*
  468. * Local functions
  469. */
  470. /*
  471. * Both the instruction and data TLB miss get to this
  472. * point to load the TLB.
  473. * r10 - EA of fault
  474. * r11 - PTE high word value
  475. * r12 - PTE low word value
  476. * r13 - TLB index
  477. * MMUCR - loaded with proper value when we get here
  478. * Upon exit, we reload everything and RFI.
  479. */
  480. finish_tlb_load:
  481. /* Combine RPN & ERPN an write WS 0 */
  482. rlwimi r11,r12,0,0,31-PAGE_SHIFT
  483. tlbwe r11,r13,PPC44x_TLB_XLAT
  484. /*
  485. * Create WS1. This is the faulting address (EPN),
  486. * page size, and valid flag.
  487. */
  488. li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
  489. /* Insert valid and page size */
  490. rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
  491. tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
  492. /* And WS 2 */
  493. li r10,0xf85 /* Mask to apply from PTE */
  494. rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
  495. and r11,r12,r10 /* Mask PTE bits to keep */
  496. andi. r10,r12,_PAGE_USER /* User page ? */
  497. beq 1f /* nope, leave U bits empty */
  498. rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
  499. 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
  500. /* Done...restore registers and get out of here.
  501. */
  502. mfspr r11, SPRN_SPRG7R
  503. mtcr r11
  504. mfspr r13, SPRN_SPRG5R
  505. mfspr r12, SPRN_SPRG4R
  506. mfspr r11, SPRN_SPRG1
  507. mfspr r10, SPRN_SPRG0
  508. rfi /* Force context change */
  509. /*
  510. * Global functions
  511. */
  512. /*
  513. * Adjust the machine check IVOR on 440A cores
  514. */
  515. _GLOBAL(__fixup_440A_mcheck)
  516. li r3,MachineCheckA@l
  517. mtspr SPRN_IVOR1,r3
  518. sync
  519. blr
  520. /*
  521. * extern void giveup_altivec(struct task_struct *prev)
  522. *
  523. * The 44x core does not have an AltiVec unit.
  524. */
  525. _GLOBAL(giveup_altivec)
  526. blr
  527. /*
  528. * extern void giveup_fpu(struct task_struct *prev)
  529. *
  530. * The 44x core does not have an FPU.
  531. */
  532. #ifndef CONFIG_PPC_FPU
  533. _GLOBAL(giveup_fpu)
  534. blr
  535. #endif
  536. _GLOBAL(set_context)
  537. #ifdef CONFIG_BDI_SWITCH
  538. /* Context switch the PTE pointer for the Abatron BDI2000.
  539. * The PGDIR is the second parameter.
  540. */
  541. lis r5, abatron_pteptrs@h
  542. ori r5, r5, abatron_pteptrs@l
  543. stw r4, 0x4(r5)
  544. #endif
  545. mtspr SPRN_PID,r3
  546. isync /* Force context change */
  547. blr
  548. /*
  549. * We put a few things here that have to be page-aligned. This stuff
  550. * goes at the beginning of the data segment, which is page-aligned.
  551. */
  552. .data
  553. .align PAGE_SHIFT
  554. .globl sdata
  555. sdata:
  556. .globl empty_zero_page
  557. empty_zero_page:
  558. .space PAGE_SIZE
  559. /*
  560. * To support >32-bit physical addresses, we use an 8KB pgdir.
  561. */
  562. .globl swapper_pg_dir
  563. swapper_pg_dir:
  564. .space PGD_TABLE_SIZE
  565. /*
  566. * Room for two PTE pointers, usually the kernel and current user pointers
  567. * to their respective root page table.
  568. */
  569. abatron_pteptrs:
  570. .space 8