head_fsl_booke.S 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. /*
  2. * Kernel execution entry point code.
  3. *
  4. * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
  5. * Initial PowerPC version.
  6. * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
  7. * Rewritten for PReP
  8. * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  9. * Low-level exception handers, MMU support, and rewrite.
  10. * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  11. * PowerPC 8xx modifications.
  12. * Copyright (c) 1998-1999 TiVo, Inc.
  13. * PowerPC 403GCX modifications.
  14. * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  15. * PowerPC 403GCX/405GP modifications.
  16. * Copyright 2000 MontaVista Software Inc.
  17. * PPC405 modifications
  18. * PowerPC 403GCX/405GP modifications.
  19. * Author: MontaVista Software, Inc.
  20. * frank_rowand@mvista.com or source@mvista.com
  21. * debbie_chu@mvista.com
  22. * Copyright 2002-2004 MontaVista Software, Inc.
  23. * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
  24. * Copyright 2004 Freescale Semiconductor, Inc
  25. * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
  26. *
  27. * This program is free software; you can redistribute it and/or modify it
  28. * under the terms of the GNU General Public License as published by the
  29. * Free Software Foundation; either version 2 of the License, or (at your
  30. * option) any later version.
  31. */
  32. #include <linux/threads.h>
  33. #include <asm/processor.h>
  34. #include <asm/page.h>
  35. #include <asm/mmu.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/cputable.h>
  38. #include <asm/thread_info.h>
  39. #include <asm/ppc_asm.h>
  40. #include <asm/asm-offsets.h>
  41. #include <asm/cache.h>
  42. #include "head_booke.h"
  43. /* As with the other PowerPC ports, it is expected that when code
  44. * execution begins here, the following registers contain valid, yet
  45. * optional, information:
  46. *
  47. * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
  48. * r4 - Starting address of the init RAM disk
  49. * r5 - Ending address of the init RAM disk
  50. * r6 - Start of kernel command line string (e.g. "mem=128")
  51. * r7 - End of kernel command line string
  52. *
  53. */
  54. .section .text.head, "ax"
  55. _ENTRY(_stext);
  56. _ENTRY(_start);
  57. /*
  58. * Reserve a word at a fixed location to store the address
  59. * of abatron_pteptrs
  60. */
  61. nop
  62. /*
  63. * Save parameters we are passed
  64. */
  65. mr r31,r3
  66. mr r30,r4
  67. mr r29,r5
  68. mr r28,r6
  69. mr r27,r7
  70. li r25,0 /* phys kernel start (low) */
  71. li r24,0 /* CPU number */
  72. li r23,0 /* phys kernel start (high) */
  73. /* We try to not make any assumptions about how the boot loader
  74. * setup or used the TLBs. We invalidate all mappings from the
  75. * boot loader and load a single entry in TLB1[0] to map the
  76. * first 64M of kernel memory. Any boot info passed from the
  77. * bootloader needs to live in this first 64M.
  78. *
  79. * Requirement on bootloader:
  80. * - The page we're executing in needs to reside in TLB1 and
  81. * have IPROT=1. If not an invalidate broadcast could
  82. * evict the entry we're currently executing in.
  83. *
  84. * r3 = Index of TLB1 were executing in
  85. * r4 = Current MSR[IS]
  86. * r5 = Index of TLB1 temp mapping
  87. *
  88. * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
  89. * if needed
  90. */
  91. /* 1. Find the index of the entry we're executing in */
  92. bl invstr /* Find our address */
  93. invstr: mflr r6 /* Make it accessible */
  94. mfmsr r7
  95. rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
  96. mfspr r7, SPRN_PID0
  97. slwi r7,r7,16
  98. or r7,r7,r4
  99. mtspr SPRN_MAS6,r7
  100. tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
  101. #ifndef CONFIG_E200
  102. mfspr r7,SPRN_MAS1
  103. andis. r7,r7,MAS1_VALID@h
  104. bne match_TLB
  105. mfspr r7,SPRN_PID1
  106. slwi r7,r7,16
  107. or r7,r7,r4
  108. mtspr SPRN_MAS6,r7
  109. tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
  110. mfspr r7,SPRN_MAS1
  111. andis. r7,r7,MAS1_VALID@h
  112. bne match_TLB
  113. mfspr r7, SPRN_PID2
  114. slwi r7,r7,16
  115. or r7,r7,r4
  116. mtspr SPRN_MAS6,r7
  117. tlbsx 0,r6 /* Fall through, we had to match */
  118. #endif
  119. match_TLB:
  120. mfspr r7,SPRN_MAS0
  121. rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
  122. mfspr r7,SPRN_MAS1 /* Insure IPROT set */
  123. oris r7,r7,MAS1_IPROT@h
  124. mtspr SPRN_MAS1,r7
  125. tlbwe
  126. /* 2. Invalidate all entries except the entry we're executing in */
  127. mfspr r9,SPRN_TLB1CFG
  128. andi. r9,r9,0xfff
  129. li r6,0 /* Set Entry counter to 0 */
  130. 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
  131. rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
  132. mtspr SPRN_MAS0,r7
  133. tlbre
  134. mfspr r7,SPRN_MAS1
  135. rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
  136. cmpw r3,r6
  137. beq skpinv /* Dont update the current execution TLB */
  138. mtspr SPRN_MAS1,r7
  139. tlbwe
  140. isync
  141. skpinv: addi r6,r6,1 /* Increment */
  142. cmpw r6,r9 /* Are we done? */
  143. bne 1b /* If not, repeat */
  144. /* Invalidate TLB0 */
  145. li r6,0x04
  146. tlbivax 0,r6
  147. TLBSYNC
  148. /* Invalidate TLB1 */
  149. li r6,0x0c
  150. tlbivax 0,r6
  151. TLBSYNC
  152. /* 3. Setup a temp mapping and jump to it */
  153. andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
  154. addi r5, r5, 0x1
  155. lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
  156. rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
  157. mtspr SPRN_MAS0,r7
  158. tlbre
  159. /* grab and fixup the RPN */
  160. mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
  161. rlwinm r6,r6,25,27,30
  162. li r8,-1
  163. addi r6,r6,10
  164. slw r6,r8,r6 /* convert to mask */
  165. bl 1f /* Find our address */
  166. 1: mflr r7
  167. mfspr r8,SPRN_MAS3
  168. #ifdef CONFIG_PHYS_64BIT
  169. mfspr r23,SPRN_MAS7
  170. #endif
  171. and r8,r6,r8
  172. subfic r9,r6,-4096
  173. and r9,r9,r7
  174. or r25,r8,r9
  175. ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
  176. /* Just modify the entry ID and EPN for the temp mapping */
  177. lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
  178. rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
  179. mtspr SPRN_MAS0,r7
  180. xori r6,r4,1 /* Setup TMP mapping in the other Address space */
  181. slwi r6,r6,12
  182. oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
  183. ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
  184. mtspr SPRN_MAS1,r6
  185. mfspr r6,SPRN_MAS2
  186. li r7,0 /* temp EPN = 0 */
  187. rlwimi r7,r6,0,20,31
  188. mtspr SPRN_MAS2,r7
  189. mtspr SPRN_MAS3,r8
  190. tlbwe
  191. xori r6,r4,1
  192. slwi r6,r6,5 /* setup new context with other address space */
  193. bl 1f /* Find our address */
  194. 1: mflr r9
  195. rlwimi r7,r9,0,20,31
  196. addi r7,r7,24
  197. mtspr SPRN_SRR0,r7
  198. mtspr SPRN_SRR1,r6
  199. rfi
  200. /* 4. Clear out PIDs & Search info */
  201. li r6,0
  202. mtspr SPRN_PID0,r6
  203. #ifndef CONFIG_E200
  204. mtspr SPRN_PID1,r6
  205. mtspr SPRN_PID2,r6
  206. #endif
  207. mtspr SPRN_MAS6,r6
  208. /* 5. Invalidate mapping we started in */
  209. lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
  210. rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
  211. mtspr SPRN_MAS0,r7
  212. tlbre
  213. mfspr r6,SPRN_MAS1
  214. rlwinm r6,r6,0,2,0 /* clear IPROT */
  215. mtspr SPRN_MAS1,r6
  216. tlbwe
  217. /* Invalidate TLB1 */
  218. li r9,0x0c
  219. tlbivax 0,r9
  220. TLBSYNC
  221. /* The mapping only needs to be cache-coherent on SMP */
  222. #ifdef CONFIG_SMP
  223. #define M_IF_SMP MAS2_M
  224. #else
  225. #define M_IF_SMP 0
  226. #endif
  227. /* 6. Setup KERNELBASE mapping in TLB1[0] */
  228. lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
  229. mtspr SPRN_MAS0,r6
  230. lis r6,(MAS1_VALID|MAS1_IPROT)@h
  231. ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
  232. mtspr SPRN_MAS1,r6
  233. lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h
  234. ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l
  235. mtspr SPRN_MAS2,r6
  236. mtspr SPRN_MAS3,r8
  237. tlbwe
  238. /* 7. Jump to KERNELBASE mapping */
  239. lis r6,(KERNELBASE & ~0xfff)@h
  240. ori r6,r6,(KERNELBASE & ~0xfff)@l
  241. lis r7,MSR_KERNEL@h
  242. ori r7,r7,MSR_KERNEL@l
  243. bl 1f /* Find our address */
  244. 1: mflr r9
  245. rlwimi r6,r9,0,20,31
  246. addi r6,r6,(2f - 1b)
  247. mtspr SPRN_SRR0,r6
  248. mtspr SPRN_SRR1,r7
  249. rfi /* start execution out of TLB1[0] entry */
  250. /* 8. Clear out the temp mapping */
  251. 2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
  252. rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
  253. mtspr SPRN_MAS0,r7
  254. tlbre
  255. mfspr r8,SPRN_MAS1
  256. rlwinm r8,r8,0,2,0 /* clear IPROT */
  257. mtspr SPRN_MAS1,r8
  258. tlbwe
  259. /* Invalidate TLB1 */
  260. li r9,0x0c
  261. tlbivax 0,r9
  262. TLBSYNC
  263. /* Establish the interrupt vector offsets */
  264. SET_IVOR(0, CriticalInput);
  265. SET_IVOR(1, MachineCheck);
  266. SET_IVOR(2, DataStorage);
  267. SET_IVOR(3, InstructionStorage);
  268. SET_IVOR(4, ExternalInput);
  269. SET_IVOR(5, Alignment);
  270. SET_IVOR(6, Program);
  271. SET_IVOR(7, FloatingPointUnavailable);
  272. SET_IVOR(8, SystemCall);
  273. SET_IVOR(9, AuxillaryProcessorUnavailable);
  274. SET_IVOR(10, Decrementer);
  275. SET_IVOR(11, FixedIntervalTimer);
  276. SET_IVOR(12, WatchdogTimer);
  277. SET_IVOR(13, DataTLBError);
  278. SET_IVOR(14, InstructionTLBError);
  279. SET_IVOR(15, DebugDebug);
  280. #if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
  281. SET_IVOR(15, DebugCrit);
  282. #endif
  283. SET_IVOR(32, SPEUnavailable);
  284. SET_IVOR(33, SPEFloatingPointData);
  285. SET_IVOR(34, SPEFloatingPointRound);
  286. #ifndef CONFIG_E200
  287. SET_IVOR(35, PerformanceMonitor);
  288. #endif
  289. #ifdef CONFIG_PPC_E500MC
  290. SET_IVOR(36, Doorbell);
  291. #endif
  292. /* Establish the interrupt vector base */
  293. lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
  294. mtspr SPRN_IVPR,r4
  295. /* Setup the defaults for TLB entries */
  296. li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
  297. #ifdef CONFIG_E200
  298. oris r2,r2,MAS4_TLBSELD(1)@h
  299. #endif
  300. mtspr SPRN_MAS4, r2
  301. #if 0
  302. /* Enable DOZE */
  303. mfspr r2,SPRN_HID0
  304. oris r2,r2,HID0_DOZE@h
  305. mtspr SPRN_HID0, r2
  306. #endif
  307. #ifdef CONFIG_E200
  308. /* enable dedicated debug exception handling resources (Debug APU) */
  309. mfspr r2,SPRN_HID0
  310. ori r2,r2,HID0_DAPUEN@l
  311. mtspr SPRN_HID0,r2
  312. #endif
  313. #if !defined(CONFIG_BDI_SWITCH)
  314. /*
  315. * The Abatron BDI JTAG debugger does not tolerate others
  316. * mucking with the debug registers.
  317. */
  318. lis r2,DBCR0_IDM@h
  319. mtspr SPRN_DBCR0,r2
  320. isync
  321. /* clear any residual debug events */
  322. li r2,-1
  323. mtspr SPRN_DBSR,r2
  324. #endif
  325. /*
  326. * This is where the main kernel code starts.
  327. */
  328. /* ptr to current */
  329. lis r2,init_task@h
  330. ori r2,r2,init_task@l
  331. /* ptr to current thread */
  332. addi r4,r2,THREAD /* init task's THREAD */
  333. mtspr SPRN_SPRG3,r4
  334. /* stack */
  335. lis r1,init_thread_union@h
  336. ori r1,r1,init_thread_union@l
  337. li r0,0
  338. stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
  339. bl early_init
  340. #ifdef CONFIG_RELOCATABLE
  341. lis r3,kernstart_addr@ha
  342. la r3,kernstart_addr@l(r3)
  343. #ifdef CONFIG_PHYS_64BIT
  344. stw r23,0(r3)
  345. stw r25,4(r3)
  346. #else
  347. stw r25,0(r3)
  348. #endif
  349. #endif
  350. mfspr r3,SPRN_TLB1CFG
  351. andi. r3,r3,0xfff
  352. lis r4,num_tlbcam_entries@ha
  353. stw r3,num_tlbcam_entries@l(r4)
  354. /*
  355. * Decide what sort of machine this is and initialize the MMU.
  356. */
  357. mr r3,r31
  358. mr r4,r30
  359. mr r5,r29
  360. mr r6,r28
  361. mr r7,r27
  362. bl machine_init
  363. bl MMU_init
  364. /* Setup PTE pointers for the Abatron bdiGDB */
  365. lis r6, swapper_pg_dir@h
  366. ori r6, r6, swapper_pg_dir@l
  367. lis r5, abatron_pteptrs@h
  368. ori r5, r5, abatron_pteptrs@l
  369. lis r4, KERNELBASE@h
  370. ori r4, r4, KERNELBASE@l
  371. stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
  372. stw r6, 0(r5)
  373. /* Let's move on */
  374. lis r4,start_kernel@h
  375. ori r4,r4,start_kernel@l
  376. lis r3,MSR_KERNEL@h
  377. ori r3,r3,MSR_KERNEL@l
  378. mtspr SPRN_SRR0,r4
  379. mtspr SPRN_SRR1,r3
  380. rfi /* change context and jump to start_kernel */
  381. /* Macros to hide the PTE size differences
  382. *
  383. * FIND_PTE -- walks the page tables given EA & pgdir pointer
  384. * r10 -- EA of fault
  385. * r11 -- PGDIR pointer
  386. * r12 -- free
  387. * label 2: is the bailout case
  388. *
  389. * if we find the pte (fall through):
  390. * r11 is low pte word
  391. * r12 is pointer to the pte
  392. */
  393. #ifdef CONFIG_PTE_64BIT
  394. #define FIND_PTE \
  395. rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
  396. lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
  397. rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
  398. beq 2f; /* Bail if no table */ \
  399. rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
  400. lwz r11, 4(r12); /* Get pte entry */
  401. #else
  402. #define FIND_PTE \
  403. rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
  404. lwz r11, 0(r11); /* Get L1 entry */ \
  405. rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
  406. beq 2f; /* Bail if no table */ \
  407. rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
  408. lwz r11, 0(r12); /* Get Linux PTE */
  409. #endif
  410. /*
  411. * Interrupt vector entry code
  412. *
  413. * The Book E MMUs are always on so we don't need to handle
  414. * interrupts in real mode as with previous PPC processors. In
  415. * this case we handle interrupts in the kernel virtual address
  416. * space.
  417. *
  418. * Interrupt vectors are dynamically placed relative to the
  419. * interrupt prefix as determined by the address of interrupt_base.
  420. * The interrupt vectors offsets are programmed using the labels
  421. * for each interrupt vector entry.
  422. *
  423. * Interrupt vectors must be aligned on a 16 byte boundary.
  424. * We align on a 32 byte cache line boundary for good measure.
  425. */
  426. interrupt_base:
  427. /* Critical Input Interrupt */
  428. CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
  429. /* Machine Check Interrupt */
  430. #ifdef CONFIG_E200
  431. /* no RFMCI, MCSRRs on E200 */
  432. CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
  433. #else
  434. MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
  435. #endif
  436. /* Data Storage Interrupt */
  437. START_EXCEPTION(DataStorage)
  438. NORMAL_EXCEPTION_PROLOG
  439. mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
  440. stw r5,_ESR(r11)
  441. mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
  442. andis. r10,r5,(ESR_ILK|ESR_DLK)@h
  443. bne 1f
  444. EXC_XFER_EE_LITE(0x0300, handle_page_fault)
  445. 1:
  446. addi r3,r1,STACK_FRAME_OVERHEAD
  447. EXC_XFER_EE_LITE(0x0300, CacheLockingException)
  448. /* Instruction Storage Interrupt */
  449. INSTRUCTION_STORAGE_EXCEPTION
  450. /* External Input Interrupt */
  451. EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
  452. /* Alignment Interrupt */
  453. ALIGNMENT_EXCEPTION
  454. /* Program Interrupt */
  455. PROGRAM_EXCEPTION
  456. /* Floating Point Unavailable Interrupt */
  457. #ifdef CONFIG_PPC_FPU
  458. FP_UNAVAILABLE_EXCEPTION
  459. #else
  460. #ifdef CONFIG_E200
  461. /* E200 treats 'normal' floating point instructions as FP Unavail exception */
  462. EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
  463. #else
  464. EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
  465. #endif
  466. #endif
  467. /* System Call Interrupt */
  468. START_EXCEPTION(SystemCall)
  469. NORMAL_EXCEPTION_PROLOG
  470. EXC_XFER_EE_LITE(0x0c00, DoSyscall)
  471. /* Auxillary Processor Unavailable Interrupt */
  472. EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
  473. /* Decrementer Interrupt */
  474. DECREMENTER_EXCEPTION
  475. /* Fixed Internal Timer Interrupt */
  476. /* TODO: Add FIT support */
  477. EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
  478. /* Watchdog Timer Interrupt */
  479. #ifdef CONFIG_BOOKE_WDT
  480. CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
  481. #else
  482. CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
  483. #endif
  484. /* Data TLB Error Interrupt */
  485. START_EXCEPTION(DataTLBError)
  486. mtspr SPRN_SPRG0, r10 /* Save some working registers */
  487. mtspr SPRN_SPRG1, r11
  488. mtspr SPRN_SPRG4W, r12
  489. mtspr SPRN_SPRG5W, r13
  490. mfcr r11
  491. mtspr SPRN_SPRG7W, r11
  492. mfspr r10, SPRN_DEAR /* Get faulting address */
  493. /* If we are faulting a kernel address, we have to use the
  494. * kernel page tables.
  495. */
  496. lis r11, PAGE_OFFSET@h
  497. cmplw 5, r10, r11
  498. blt 5, 3f
  499. lis r11, swapper_pg_dir@h
  500. ori r11, r11, swapper_pg_dir@l
  501. mfspr r12,SPRN_MAS1 /* Set TID to 0 */
  502. rlwinm r12,r12,0,16,1
  503. mtspr SPRN_MAS1,r12
  504. b 4f
  505. /* Get the PGD for the current thread */
  506. 3:
  507. mfspr r11,SPRN_SPRG3
  508. lwz r11,PGDIR(r11)
  509. 4:
  510. /* Mask of required permission bits. Note that while we
  511. * do copy ESR:ST to _PAGE_RW position as trying to write
  512. * to an RO page is pretty common, we don't do it with
  513. * _PAGE_DIRTY. We could do it, but it's a fairly rare
  514. * event so I'd rather take the overhead when it happens
  515. * rather than adding an instruction here. We should measure
  516. * whether the whole thing is worth it in the first place
  517. * as we could avoid loading SPRN_ESR completely in the first
  518. * place...
  519. *
  520. * TODO: Is it worth doing that mfspr & rlwimi in the first
  521. * place or can we save a couple of instructions here ?
  522. */
  523. mfspr r12,SPRN_ESR
  524. li r13,_PAGE_PRESENT|_PAGE_ACCESSED
  525. rlwimi r13,r12,11,29,29
  526. FIND_PTE
  527. andc. r13,r13,r11 /* Check permission */
  528. #ifdef CONFIG_PTE_64BIT
  529. #ifdef CONFIG_SMP
  530. subf r10,r11,r12 /* create false data dep */
  531. lwzx r13,r11,r10 /* Get upper pte bits */
  532. #else
  533. lwz r13,0(r12) /* Get upper pte bits */
  534. #endif
  535. #endif
  536. bne 2f /* Bail if permission/valid mismach */
  537. /* Jump to common tlb load */
  538. b finish_tlb_load
  539. 2:
  540. /* The bailout. Restore registers to pre-exception conditions
  541. * and call the heavyweights to help us out.
  542. */
  543. mfspr r11, SPRN_SPRG7R
  544. mtcr r11
  545. mfspr r13, SPRN_SPRG5R
  546. mfspr r12, SPRN_SPRG4R
  547. mfspr r11, SPRN_SPRG1
  548. mfspr r10, SPRN_SPRG0
  549. b DataStorage
  550. /* Instruction TLB Error Interrupt */
  551. /*
  552. * Nearly the same as above, except we get our
  553. * information from different registers and bailout
  554. * to a different point.
  555. */
  556. START_EXCEPTION(InstructionTLBError)
  557. mtspr SPRN_SPRG0, r10 /* Save some working registers */
  558. mtspr SPRN_SPRG1, r11
  559. mtspr SPRN_SPRG4W, r12
  560. mtspr SPRN_SPRG5W, r13
  561. mfcr r11
  562. mtspr SPRN_SPRG7W, r11
  563. mfspr r10, SPRN_SRR0 /* Get faulting address */
  564. /* If we are faulting a kernel address, we have to use the
  565. * kernel page tables.
  566. */
  567. lis r11, PAGE_OFFSET@h
  568. cmplw 5, r10, r11
  569. blt 5, 3f
  570. lis r11, swapper_pg_dir@h
  571. ori r11, r11, swapper_pg_dir@l
  572. mfspr r12,SPRN_MAS1 /* Set TID to 0 */
  573. rlwinm r12,r12,0,16,1
  574. mtspr SPRN_MAS1,r12
  575. b 4f
  576. /* Get the PGD for the current thread */
  577. 3:
  578. mfspr r11,SPRN_SPRG3
  579. lwz r11,PGDIR(r11)
  580. 4:
  581. /* Make up the required permissions */
  582. li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
  583. FIND_PTE
  584. andc. r13,r13,r11 /* Check permission */
  585. #ifdef CONFIG_PTE_64BIT
  586. #ifdef CONFIG_SMP
  587. subf r10,r11,r12 /* create false data dep */
  588. lwzx r13,r11,r10 /* Get upper pte bits */
  589. #else
  590. lwz r13,0(r12) /* Get upper pte bits */
  591. #endif
  592. #endif
  593. bne 2f /* Bail if permission mismach */
  594. /* Jump to common TLB load point */
  595. b finish_tlb_load
  596. 2:
  597. /* The bailout. Restore registers to pre-exception conditions
  598. * and call the heavyweights to help us out.
  599. */
  600. mfspr r11, SPRN_SPRG7R
  601. mtcr r11
  602. mfspr r13, SPRN_SPRG5R
  603. mfspr r12, SPRN_SPRG4R
  604. mfspr r11, SPRN_SPRG1
  605. mfspr r10, SPRN_SPRG0
  606. b InstructionStorage
  607. #ifdef CONFIG_SPE
  608. /* SPE Unavailable */
  609. START_EXCEPTION(SPEUnavailable)
  610. NORMAL_EXCEPTION_PROLOG
  611. bne load_up_spe
  612. addi r3,r1,STACK_FRAME_OVERHEAD
  613. EXC_XFER_EE_LITE(0x2010, KernelSPE)
  614. #else
  615. EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
  616. #endif /* CONFIG_SPE */
  617. /* SPE Floating Point Data */
  618. #ifdef CONFIG_SPE
  619. EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
  620. /* SPE Floating Point Round */
  621. EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
  622. #else
  623. EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
  624. EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
  625. #endif /* CONFIG_SPE */
  626. /* Performance Monitor */
  627. EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
  628. #ifdef CONFIG_PPC_E500MC
  629. EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
  630. #endif
  631. /* Debug Interrupt */
  632. DEBUG_DEBUG_EXCEPTION
  633. #if defined(CONFIG_E500) && !defined(CONFIG_PPC_E500MC)
  634. DEBUG_CRIT_EXCEPTION
  635. #endif
  636. /*
  637. * Local functions
  638. */
  639. /*
  640. * Both the instruction and data TLB miss get to this
  641. * point to load the TLB.
  642. * r10 - available to use
  643. * r11 - TLB (info from Linux PTE)
  644. * r12 - available to use
  645. * r13 - upper bits of PTE (if PTE_64BIT) or available to use
  646. * CR5 - results of addr >= PAGE_OFFSET
  647. * MAS0, MAS1 - loaded with proper value when we get here
  648. * MAS2, MAS3 - will need additional info from Linux PTE
  649. * Upon exit, we reload everything and RFI.
  650. */
  651. finish_tlb_load:
  652. /*
  653. * We set execute, because we don't have the granularity to
  654. * properly set this at the page level (Linux problem).
  655. * Many of these bits are software only. Bits we don't set
  656. * here we (properly should) assume have the appropriate value.
  657. */
  658. mfspr r12, SPRN_MAS2
  659. #ifdef CONFIG_PTE_64BIT
  660. rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
  661. #else
  662. rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
  663. #endif
  664. mtspr SPRN_MAS2, r12
  665. li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
  666. rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
  667. and r12, r11, r10
  668. andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
  669. slwi r10, r12, 1
  670. or r10, r10, r12
  671. iseleq r12, r12, r10
  672. #ifdef CONFIG_PTE_64BIT
  673. 2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
  674. rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
  675. mtspr SPRN_MAS3, r12
  676. BEGIN_FTR_SECTION
  677. srwi r10, r13, 8 /* grab RPN[8:31] */
  678. mtspr SPRN_MAS7, r10
  679. END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
  680. #else
  681. 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
  682. mtspr SPRN_MAS3, r11
  683. #endif
  684. #ifdef CONFIG_E200
  685. /* Round robin TLB1 entries assignment */
  686. mfspr r12, SPRN_MAS0
  687. /* Extract TLB1CFG(NENTRY) */
  688. mfspr r11, SPRN_TLB1CFG
  689. andi. r11, r11, 0xfff
  690. /* Extract MAS0(NV) */
  691. andi. r13, r12, 0xfff
  692. addi r13, r13, 1
  693. cmpw 0, r13, r11
  694. addi r12, r12, 1
  695. /* check if we need to wrap */
  696. blt 7f
  697. /* wrap back to first free tlbcam entry */
  698. lis r13, tlbcam_index@ha
  699. lwz r13, tlbcam_index@l(r13)
  700. rlwimi r12, r13, 0, 20, 31
  701. 7:
  702. mtspr SPRN_MAS0,r12
  703. #endif /* CONFIG_E200 */
  704. tlbwe
  705. /* Done...restore registers and get out of here. */
  706. mfspr r11, SPRN_SPRG7R
  707. mtcr r11
  708. mfspr r13, SPRN_SPRG5R
  709. mfspr r12, SPRN_SPRG4R
  710. mfspr r11, SPRN_SPRG1
  711. mfspr r10, SPRN_SPRG0
  712. rfi /* Force context change */
  713. #ifdef CONFIG_SPE
  714. /* Note that the SPE support is closely modeled after the AltiVec
  715. * support. Changes to one are likely to be applicable to the
  716. * other! */
  717. load_up_spe:
  718. /*
  719. * Disable SPE for the task which had SPE previously,
  720. * and save its SPE registers in its thread_struct.
  721. * Enables SPE for use in the kernel on return.
  722. * On SMP we know the SPE units are free, since we give it up every
  723. * switch. -- Kumar
  724. */
  725. mfmsr r5
  726. oris r5,r5,MSR_SPE@h
  727. mtmsr r5 /* enable use of SPE now */
  728. isync
  729. /*
  730. * For SMP, we don't do lazy SPE switching because it just gets too
  731. * horrendously complex, especially when a task switches from one CPU
  732. * to another. Instead we call giveup_spe in switch_to.
  733. */
  734. #ifndef CONFIG_SMP
  735. lis r3,last_task_used_spe@ha
  736. lwz r4,last_task_used_spe@l(r3)
  737. cmpi 0,r4,0
  738. beq 1f
  739. addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
  740. SAVE_32EVRS(0,r10,r4)
  741. evxor evr10, evr10, evr10 /* clear out evr10 */
  742. evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
  743. li r5,THREAD_ACC
  744. evstddx evr10, r4, r5 /* save off accumulator */
  745. lwz r5,PT_REGS(r4)
  746. lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  747. lis r10,MSR_SPE@h
  748. andc r4,r4,r10 /* disable SPE for previous task */
  749. stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  750. 1:
  751. #endif /* !CONFIG_SMP */
  752. /* enable use of SPE after return */
  753. oris r9,r9,MSR_SPE@h
  754. mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
  755. li r4,1
  756. li r10,THREAD_ACC
  757. stw r4,THREAD_USED_SPE(r5)
  758. evlddx evr4,r10,r5
  759. evmra evr4,evr4
  760. REST_32EVRS(0,r10,r5)
  761. #ifndef CONFIG_SMP
  762. subi r4,r5,THREAD
  763. stw r4,last_task_used_spe@l(r3)
  764. #endif /* !CONFIG_SMP */
  765. /* restore registers and return */
  766. 2: REST_4GPRS(3, r11)
  767. lwz r10,_CCR(r11)
  768. REST_GPR(1, r11)
  769. mtcr r10
  770. lwz r10,_LINK(r11)
  771. mtlr r10
  772. REST_GPR(10, r11)
  773. mtspr SPRN_SRR1,r9
  774. mtspr SPRN_SRR0,r12
  775. REST_GPR(9, r11)
  776. REST_GPR(12, r11)
  777. lwz r11,GPR11(r11)
  778. rfi
  779. /*
  780. * SPE unavailable trap from kernel - print a message, but let
  781. * the task use SPE in the kernel until it returns to user mode.
  782. */
  783. KernelSPE:
  784. lwz r3,_MSR(r1)
  785. oris r3,r3,MSR_SPE@h
  786. stw r3,_MSR(r1) /* enable use of SPE after return */
  787. lis r3,87f@h
  788. ori r3,r3,87f@l
  789. mr r4,r2 /* current */
  790. lwz r5,_NIP(r1)
  791. bl printk
  792. b ret_from_except
  793. 87: .string "SPE used in kernel (task=%p, pc=%x) \n"
  794. .align 4,0
  795. #endif /* CONFIG_SPE */
  796. /*
  797. * Global functions
  798. */
  799. /*
  800. * extern void loadcam_entry(unsigned int index)
  801. *
  802. * Load TLBCAM[index] entry in to the L2 CAM MMU
  803. */
  804. _GLOBAL(loadcam_entry)
  805. lis r4,TLBCAM@ha
  806. addi r4,r4,TLBCAM@l
  807. mulli r5,r3,20
  808. add r3,r5,r4
  809. lwz r4,0(r3)
  810. mtspr SPRN_MAS0,r4
  811. lwz r4,4(r3)
  812. mtspr SPRN_MAS1,r4
  813. lwz r4,8(r3)
  814. mtspr SPRN_MAS2,r4
  815. lwz r4,12(r3)
  816. mtspr SPRN_MAS3,r4
  817. tlbwe
  818. isync
  819. blr
  820. /*
  821. * extern void giveup_altivec(struct task_struct *prev)
  822. *
  823. * The e500 core does not have an AltiVec unit.
  824. */
  825. _GLOBAL(giveup_altivec)
  826. blr
  827. #ifdef CONFIG_SPE
  828. /*
  829. * extern void giveup_spe(struct task_struct *prev)
  830. *
  831. */
  832. _GLOBAL(giveup_spe)
  833. mfmsr r5
  834. oris r5,r5,MSR_SPE@h
  835. mtmsr r5 /* enable use of SPE now */
  836. isync
  837. cmpi 0,r3,0
  838. beqlr- /* if no previous owner, done */
  839. addi r3,r3,THREAD /* want THREAD of task */
  840. lwz r5,PT_REGS(r3)
  841. cmpi 0,r5,0
  842. SAVE_32EVRS(0, r4, r3)
  843. evxor evr6, evr6, evr6 /* clear out evr6 */
  844. evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
  845. li r4,THREAD_ACC
  846. evstddx evr6, r4, r3 /* save off accumulator */
  847. mfspr r6,SPRN_SPEFSCR
  848. stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
  849. beq 1f
  850. lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  851. lis r3,MSR_SPE@h
  852. andc r4,r4,r3 /* disable SPE for previous task */
  853. stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  854. 1:
  855. #ifndef CONFIG_SMP
  856. li r5,0
  857. lis r4,last_task_used_spe@ha
  858. stw r5,last_task_used_spe@l(r4)
  859. #endif /* !CONFIG_SMP */
  860. blr
  861. #endif /* CONFIG_SPE */
  862. /*
  863. * extern void giveup_fpu(struct task_struct *prev)
  864. *
  865. * Not all FSL Book-E cores have an FPU
  866. */
  867. #ifndef CONFIG_PPC_FPU
  868. _GLOBAL(giveup_fpu)
  869. blr
  870. #endif
  871. /*
  872. * extern void abort(void)
  873. *
  874. * At present, this routine just applies a system reset.
  875. */
  876. _GLOBAL(abort)
  877. li r13,0
  878. mtspr SPRN_DBCR0,r13 /* disable all debug events */
  879. isync
  880. mfmsr r13
  881. ori r13,r13,MSR_DE@l /* Enable Debug Events */
  882. mtmsr r13
  883. isync
  884. mfspr r13,SPRN_DBCR0
  885. lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
  886. mtspr SPRN_DBCR0,r13
  887. isync
  888. _GLOBAL(set_context)
  889. #ifdef CONFIG_BDI_SWITCH
  890. /* Context switch the PTE pointer for the Abatron BDI2000.
  891. * The PGDIR is the second parameter.
  892. */
  893. lis r5, abatron_pteptrs@h
  894. ori r5, r5, abatron_pteptrs@l
  895. stw r4, 0x4(r5)
  896. #endif
  897. mtspr SPRN_PID,r3
  898. isync /* Force context change */
  899. blr
  900. _GLOBAL(flush_dcache_L1)
  901. mfspr r3,SPRN_L1CFG0
  902. rlwinm r5,r3,9,3 /* Extract cache block size */
  903. twlgti r5,1 /* Only 32 and 64 byte cache blocks
  904. * are currently defined.
  905. */
  906. li r4,32
  907. subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
  908. * log2(number of ways)
  909. */
  910. slw r5,r4,r5 /* r5 = cache block size */
  911. rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
  912. mulli r7,r7,13 /* An 8-way cache will require 13
  913. * loads per set.
  914. */
  915. slw r7,r7,r6
  916. /* save off HID0 and set DCFA */
  917. mfspr r8,SPRN_HID0
  918. ori r9,r8,HID0_DCFA@l
  919. mtspr SPRN_HID0,r9
  920. isync
  921. lis r4,KERNELBASE@h
  922. mtctr r7
  923. 1: lwz r3,0(r4) /* Load... */
  924. add r4,r4,r5
  925. bdnz 1b
  926. msync
  927. lis r4,KERNELBASE@h
  928. mtctr r7
  929. 1: dcbf 0,r4 /* ...and flush. */
  930. add r4,r4,r5
  931. bdnz 1b
  932. /* restore HID0 */
  933. mtspr SPRN_HID0,r8
  934. isync
  935. blr
  936. /*
  937. * We put a few things here that have to be page-aligned. This stuff
  938. * goes at the beginning of the data segment, which is page-aligned.
  939. */
  940. .data
  941. .align 12
  942. .globl sdata
  943. sdata:
  944. .globl empty_zero_page
  945. empty_zero_page:
  946. .space 4096
  947. .globl swapper_pg_dir
  948. swapper_pg_dir:
  949. .space PGD_TABLE_SIZE
  950. /*
  951. * Room for two PTE pointers, usually the kernel and current user pointers
  952. * to their respective root page table.
  953. */
  954. abatron_pteptrs:
  955. .space 8