tlb_low_64e.S 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Low leve TLB miss handlers for Book3E
  3. *
  4. * Copyright (C) 2008-2009
  5. * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <asm/processor.h>
  13. #include <asm/reg.h>
  14. #include <asm/page.h>
  15. #include <asm/mmu.h>
  16. #include <asm/ppc_asm.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/cputable.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/exception-64e.h>
  21. #include <asm/ppc-opcode.h>
  22. #ifdef CONFIG_PPC_64K_PAGES
  23. #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
  24. #else
  25. #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
  26. #endif
  27. #define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
  28. #define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
  29. #define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
  30. /**********************************************************************
  31. * *
  32. * TLB miss handling for Book3E with TLB reservation and HES support *
  33. * *
  34. **********************************************************************/
  35. /* Data TLB miss */
  36. START_EXCEPTION(data_tlb_miss)
  37. TLB_MISS_PROLOG
  38. /* Now we handle the fault proper. We only save DEAR in normal
  39. * fault case since that's the only interesting values here.
  40. * We could probably also optimize by not saving SRR0/1 in the
  41. * linear mapping case but I'll leave that for later
  42. */
  43. mfspr r14,SPRN_ESR
  44. mfspr r16,SPRN_DEAR /* get faulting address */
  45. srdi r15,r16,60 /* get region */
  46. cmpldi cr0,r15,0xc /* linear mapping ? */
  47. TLB_MISS_STATS_SAVE_INFO
  48. beq tlb_load_linear /* yes -> go to linear map load */
  49. /* The page tables are mapped virtually linear. At this point, though,
  50. * we don't know whether we are trying to fault in a first level
  51. * virtual address or a virtual page table address. We can get that
  52. * from bit 0x1 of the region ID which we have set for a page table
  53. */
  54. andi. r10,r15,0x1
  55. bne- virt_page_table_tlb_miss
  56. std r14,EX_TLB_ESR(r12); /* save ESR */
  57. std r16,EX_TLB_DEAR(r12); /* save DEAR */
  58. /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
  59. li r11,_PAGE_PRESENT
  60. oris r11,r11,_PAGE_ACCESSED@h
  61. /* We do the user/kernel test for the PID here along with the RW test
  62. */
  63. cmpldi cr0,r15,0 /* Check for user region */
  64. /* We pre-test some combination of permissions to avoid double
  65. * faults:
  66. *
  67. * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
  68. * ESR_ST is 0x00800000
  69. * _PAGE_BAP_SW is 0x00000010
  70. * So the shift is >> 19. This tests for supervisor writeability.
  71. * If the page happens to be supervisor writeable and not user
  72. * writeable, we will take a new fault later, but that should be
  73. * a rare enough case.
  74. *
  75. * We also move ESR_ST in _PAGE_DIRTY position
  76. * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
  77. *
  78. * MAS1 is preset for all we need except for TID that needs to
  79. * be cleared for kernel translations
  80. */
  81. rlwimi r11,r14,32-19,27,27
  82. rlwimi r11,r14,32-16,19,19
  83. beq normal_tlb_miss
  84. /* XXX replace the RMW cycles with immediate loads + writes */
  85. 1: mfspr r10,SPRN_MAS1
  86. cmpldi cr0,r15,8 /* Check for vmalloc region */
  87. rlwinm r10,r10,0,16,1 /* Clear TID */
  88. mtspr SPRN_MAS1,r10
  89. beq+ normal_tlb_miss
  90. /* We got a crappy address, just fault with whatever DEAR and ESR
  91. * are here
  92. */
  93. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  94. TLB_MISS_EPILOG_ERROR
  95. b exc_data_storage_book3e
  96. /* Instruction TLB miss */
  97. START_EXCEPTION(instruction_tlb_miss)
  98. TLB_MISS_PROLOG
  99. /* If we take a recursive fault, the second level handler may need
  100. * to know whether we are handling a data or instruction fault in
  101. * order to get to the right store fault handler. We provide that
  102. * info by writing a crazy value in ESR in our exception frame
  103. */
  104. li r14,-1 /* store to exception frame is done later */
  105. /* Now we handle the fault proper. We only save DEAR in the non
  106. * linear mapping case since we know the linear mapping case will
  107. * not re-enter. We could indeed optimize and also not save SRR0/1
  108. * in the linear mapping case but I'll leave that for later
  109. *
  110. * Faulting address is SRR0 which is already in r16
  111. */
  112. srdi r15,r16,60 /* get region */
  113. cmpldi cr0,r15,0xc /* linear mapping ? */
  114. TLB_MISS_STATS_SAVE_INFO
  115. beq tlb_load_linear /* yes -> go to linear map load */
  116. /* We do the user/kernel test for the PID here along with the RW test
  117. */
  118. li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
  119. oris r11,r11,_PAGE_ACCESSED@h
  120. cmpldi cr0,r15,0 /* Check for user region */
  121. std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
  122. beq normal_tlb_miss
  123. /* XXX replace the RMW cycles with immediate loads + writes */
  124. 1: mfspr r10,SPRN_MAS1
  125. cmpldi cr0,r15,8 /* Check for vmalloc region */
  126. rlwinm r10,r10,0,16,1 /* Clear TID */
  127. mtspr SPRN_MAS1,r10
  128. beq+ normal_tlb_miss
  129. /* We got a crappy address, just fault */
  130. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  131. TLB_MISS_EPILOG_ERROR
  132. b exc_instruction_storage_book3e
  133. /*
  134. * This is the guts of the first-level TLB miss handler for direct
  135. * misses. We are entered with:
  136. *
  137. * r16 = faulting address
  138. * r15 = region ID
  139. * r14 = crap (free to use)
  140. * r13 = PACA
  141. * r12 = TLB exception frame in PACA
  142. * r11 = PTE permission mask
  143. * r10 = crap (free to use)
  144. */
  145. normal_tlb_miss:
  146. /* So we first construct the page table address. We do that by
  147. * shifting the bottom of the address (not the region ID) by
  148. * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
  149. * or'ing the fourth high bit.
  150. *
  151. * NOTE: For 64K pages, we do things slightly differently in
  152. * order to handle the weird page table format used by linux
  153. */
  154. ori r10,r15,0x1
  155. #ifdef CONFIG_PPC_64K_PAGES
  156. /* For the top bits, 16 bytes per PTE */
  157. rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
  158. /* Now create the bottom bits as 0 in position 0x8000 and
  159. * the rest calculated for 8 bytes per PTE
  160. */
  161. rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
  162. /* Insert the bottom bits in */
  163. rlwimi r14,r15,0,16,31
  164. #else
  165. rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
  166. #endif
  167. sldi r15,r10,60
  168. clrrdi r14,r14,3
  169. or r10,r15,r14
  170. BEGIN_MMU_FTR_SECTION
  171. /* Set the TLB reservation and seach for existing entry. Then load
  172. * the entry.
  173. */
  174. PPC_TLBSRX_DOT(0,r16)
  175. ld r14,0(r10)
  176. beq normal_tlb_miss_done
  177. MMU_FTR_SECTION_ELSE
  178. ld r14,0(r10)
  179. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
  180. finish_normal_tlb_miss:
  181. /* Check if required permissions are met */
  182. andc. r15,r11,r14
  183. bne- normal_tlb_miss_access_fault
  184. /* Now we build the MAS:
  185. *
  186. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  187. * MAS 1 : Almost fully setup
  188. * - PID already updated by caller if necessary
  189. * - TSIZE need change if !base page size, not
  190. * yet implemented for now
  191. * MAS 2 : Defaults not useful, need to be redone
  192. * MAS 3+7 : Needs to be done
  193. *
  194. * TODO: mix up code below for better scheduling
  195. */
  196. clrrdi r11,r16,12 /* Clear low crap in EA */
  197. rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
  198. mtspr SPRN_MAS2,r11
  199. /* Check page size, if not standard, update MAS1 */
  200. rldicl r11,r14,64-8,64-8
  201. #ifdef CONFIG_PPC_64K_PAGES
  202. cmpldi cr0,r11,BOOK3E_PAGESZ_64K
  203. #else
  204. cmpldi cr0,r11,BOOK3E_PAGESZ_4K
  205. #endif
  206. beq- 1f
  207. mfspr r11,SPRN_MAS1
  208. rlwimi r11,r14,31,21,24
  209. rlwinm r11,r11,0,21,19
  210. mtspr SPRN_MAS1,r11
  211. 1:
  212. /* Move RPN in position */
  213. rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
  214. clrldi r15,r11,12 /* Clear crap at the top */
  215. rlwimi r15,r14,32-8,22,25 /* Move in U bits */
  216. rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
  217. /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
  218. andi. r11,r14,_PAGE_DIRTY
  219. bne 1f
  220. li r11,MAS3_SW|MAS3_UW
  221. andc r15,r15,r11
  222. 1:
  223. BEGIN_MMU_FTR_SECTION
  224. srdi r16,r15,32
  225. mtspr SPRN_MAS3,r15
  226. mtspr SPRN_MAS7,r16
  227. MMU_FTR_SECTION_ELSE
  228. mtspr SPRN_MAS7_MAS3,r15
  229. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  230. tlbwe
  231. normal_tlb_miss_done:
  232. /* We don't bother with restoring DEAR or ESR since we know we are
  233. * level 0 and just going back to userland. They are only needed
  234. * if you are going to take an access fault
  235. */
  236. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
  237. TLB_MISS_EPILOG_SUCCESS
  238. rfi
  239. normal_tlb_miss_access_fault:
  240. /* We need to check if it was an instruction miss */
  241. andi. r10,r11,_PAGE_EXEC
  242. bne 1f
  243. ld r14,EX_TLB_DEAR(r12)
  244. ld r15,EX_TLB_ESR(r12)
  245. mtspr SPRN_DEAR,r14
  246. mtspr SPRN_ESR,r15
  247. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  248. TLB_MISS_EPILOG_ERROR
  249. b exc_data_storage_book3e
  250. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  251. TLB_MISS_EPILOG_ERROR
  252. b exc_instruction_storage_book3e
  253. /*
  254. * This is the guts of the second-level TLB miss handler for direct
  255. * misses. We are entered with:
  256. *
  257. * r16 = virtual page table faulting address
  258. * r15 = region (top 4 bits of address)
  259. * r14 = crap (free to use)
  260. * r13 = PACA
  261. * r12 = TLB exception frame in PACA
  262. * r11 = crap (free to use)
  263. * r10 = crap (free to use)
  264. *
  265. * Note that this should only ever be called as a second level handler
  266. * with the current scheme when using SW load.
  267. * That means we can always get the original fault DEAR at
  268. * EX_TLB_DEAR-EX_TLB_SIZE(r12)
  269. *
  270. * It can be re-entered by the linear mapping miss handler. However, to
  271. * avoid too much complication, it will restart the whole fault at level
  272. * 0 so we don't care too much about clobbers
  273. *
  274. * XXX That code was written back when we couldn't clobber r14. We can now,
  275. * so we could probably optimize things a bit
  276. */
  277. virt_page_table_tlb_miss:
  278. /* Are we hitting a kernel page table ? */
  279. andi. r10,r15,0x8
  280. /* The cool thing now is that r10 contains 0 for user and 8 for kernel,
  281. * and we happen to have the swapper_pg_dir at offset 8 from the user
  282. * pgdir in the PACA :-).
  283. */
  284. add r11,r10,r13
  285. /* If kernel, we need to clear MAS1 TID */
  286. beq 1f
  287. /* XXX replace the RMW cycles with immediate loads + writes */
  288. mfspr r10,SPRN_MAS1
  289. rlwinm r10,r10,0,16,1 /* Clear TID */
  290. mtspr SPRN_MAS1,r10
  291. 1:
  292. BEGIN_MMU_FTR_SECTION
  293. /* Search if we already have a TLB entry for that virtual address, and
  294. * if we do, bail out.
  295. */
  296. PPC_TLBSRX_DOT(0,r16)
  297. beq virt_page_table_tlb_miss_done
  298. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
  299. /* Now, we need to walk the page tables. First check if we are in
  300. * range.
  301. */
  302. rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
  303. bne- virt_page_table_tlb_miss_fault
  304. /* Get the PGD pointer */
  305. ld r15,PACAPGD(r11)
  306. cmpldi cr0,r15,0
  307. beq- virt_page_table_tlb_miss_fault
  308. /* Get to PGD entry */
  309. rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
  310. clrrdi r10,r11,3
  311. ldx r15,r10,r15
  312. cmpldi cr0,r15,0
  313. beq virt_page_table_tlb_miss_fault
  314. #ifndef CONFIG_PPC_64K_PAGES
  315. /* Get to PUD entry */
  316. rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
  317. clrrdi r10,r11,3
  318. ldx r15,r10,r15
  319. cmpldi cr0,r15,0
  320. beq virt_page_table_tlb_miss_fault
  321. #endif /* CONFIG_PPC_64K_PAGES */
  322. /* Get to PMD entry */
  323. rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
  324. clrrdi r10,r11,3
  325. ldx r15,r10,r15
  326. cmpldi cr0,r15,0
  327. beq virt_page_table_tlb_miss_fault
  328. /* Ok, we're all right, we can now create a kernel translation for
  329. * a 4K or 64K page from r16 -> r15.
  330. */
  331. /* Now we build the MAS:
  332. *
  333. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  334. * MAS 1 : Almost fully setup
  335. * - PID already updated by caller if necessary
  336. * - TSIZE for now is base page size always
  337. * MAS 2 : Use defaults
  338. * MAS 3+7 : Needs to be done
  339. *
  340. * So we only do MAS 2 and 3 for now...
  341. */
  342. clrldi r11,r15,4 /* remove region ID from RPN */
  343. ori r10,r11,1 /* Or-in SR */
  344. BEGIN_MMU_FTR_SECTION
  345. srdi r16,r10,32
  346. mtspr SPRN_MAS3,r10
  347. mtspr SPRN_MAS7,r16
  348. MMU_FTR_SECTION_ELSE
  349. mtspr SPRN_MAS7_MAS3,r10
  350. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  351. tlbwe
  352. BEGIN_MMU_FTR_SECTION
  353. virt_page_table_tlb_miss_done:
  354. /* We have overriden MAS2:EPN but currently our primary TLB miss
  355. * handler will always restore it so that should not be an issue,
  356. * if we ever optimize the primary handler to not write MAS2 on
  357. * some cases, we'll have to restore MAS2:EPN here based on the
  358. * original fault's DEAR. If we do that we have to modify the
  359. * ITLB miss handler to also store SRR0 in the exception frame
  360. * as DEAR.
  361. *
  362. * However, one nasty thing we did is we cleared the reservation
  363. * (well, potentially we did). We do a trick here thus if we
  364. * are not a level 0 exception (we interrupted the TLB miss) we
  365. * offset the return address by -4 in order to replay the tlbsrx
  366. * instruction there
  367. */
  368. subf r10,r13,r12
  369. cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
  370. bne- 1f
  371. ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
  372. addi r10,r11,-4
  373. std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
  374. 1:
  375. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
  376. /* Return to caller, normal case */
  377. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
  378. TLB_MISS_EPILOG_SUCCESS
  379. rfi
  380. virt_page_table_tlb_miss_fault:
  381. /* If we fault here, things are a little bit tricky. We need to call
  382. * either data or instruction store fault, and we need to retreive
  383. * the original fault address and ESR (for data).
  384. *
  385. * The thing is, we know that in normal circumstances, this is
  386. * always called as a second level tlb miss for SW load or as a first
  387. * level TLB miss for HW load, so we should be able to peek at the
  388. * relevant informations in the first exception frame in the PACA.
  389. *
  390. * However, we do need to double check that, because we may just hit
  391. * a stray kernel pointer or a userland attack trying to hit those
  392. * areas. If that is the case, we do a data fault. (We can't get here
  393. * from an instruction tlb miss anyway).
  394. *
  395. * Note also that when going to a fault, we must unwind the previous
  396. * level as well. Since we are doing that, we don't need to clear or
  397. * restore the TLB reservation neither.
  398. */
  399. subf r10,r13,r12
  400. cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
  401. bne- virt_page_table_tlb_miss_whacko_fault
  402. /* We dig the original DEAR and ESR from slot 0 */
  403. ld r15,EX_TLB_DEAR+PACA_EXTLB(r13)
  404. ld r16,EX_TLB_ESR+PACA_EXTLB(r13)
  405. /* We check for the "special" ESR value for instruction faults */
  406. cmpdi cr0,r16,-1
  407. beq 1f
  408. mtspr SPRN_DEAR,r15
  409. mtspr SPRN_ESR,r16
  410. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
  411. TLB_MISS_EPILOG_ERROR
  412. b exc_data_storage_book3e
  413. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
  414. TLB_MISS_EPILOG_ERROR
  415. b exc_instruction_storage_book3e
  416. virt_page_table_tlb_miss_whacko_fault:
  417. /* The linear fault will restart everything so ESR and DEAR will
  418. * not have been clobbered, let's just fault with what we have
  419. */
  420. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
  421. TLB_MISS_EPILOG_ERROR
  422. b exc_data_storage_book3e
  423. /**************************************************************
  424. * *
  425. * TLB miss handling for Book3E with hw page table support *
  426. * *
  427. **************************************************************/
  428. /* Data TLB miss */
  429. START_EXCEPTION(data_tlb_miss_htw)
  430. TLB_MISS_PROLOG
  431. /* Now we handle the fault proper. We only save DEAR in normal
  432. * fault case since that's the only interesting values here.
  433. * We could probably also optimize by not saving SRR0/1 in the
  434. * linear mapping case but I'll leave that for later
  435. */
  436. mfspr r14,SPRN_ESR
  437. mfspr r16,SPRN_DEAR /* get faulting address */
  438. srdi r11,r16,60 /* get region */
  439. cmpldi cr0,r11,0xc /* linear mapping ? */
  440. TLB_MISS_STATS_SAVE_INFO
  441. beq tlb_load_linear /* yes -> go to linear map load */
  442. /* We do the user/kernel test for the PID here along with the RW test
  443. */
  444. cmpldi cr0,r11,0 /* Check for user region */
  445. ld r15,PACAPGD(r13) /* Load user pgdir */
  446. beq htw_tlb_miss
  447. /* XXX replace the RMW cycles with immediate loads + writes */
  448. 1: mfspr r10,SPRN_MAS1
  449. cmpldi cr0,r11,8 /* Check for vmalloc region */
  450. rlwinm r10,r10,0,16,1 /* Clear TID */
  451. mtspr SPRN_MAS1,r10
  452. ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
  453. beq+ htw_tlb_miss
  454. /* We got a crappy address, just fault with whatever DEAR and ESR
  455. * are here
  456. */
  457. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  458. TLB_MISS_EPILOG_ERROR
  459. b exc_data_storage_book3e
  460. /* Instruction TLB miss */
  461. START_EXCEPTION(instruction_tlb_miss_htw)
  462. TLB_MISS_PROLOG
  463. /* If we take a recursive fault, the second level handler may need
  464. * to know whether we are handling a data or instruction fault in
  465. * order to get to the right store fault handler. We provide that
  466. * info by keeping a crazy value for ESR in r14
  467. */
  468. li r14,-1 /* store to exception frame is done later */
  469. /* Now we handle the fault proper. We only save DEAR in the non
  470. * linear mapping case since we know the linear mapping case will
  471. * not re-enter. We could indeed optimize and also not save SRR0/1
  472. * in the linear mapping case but I'll leave that for later
  473. *
  474. * Faulting address is SRR0 which is already in r16
  475. */
  476. srdi r11,r16,60 /* get region */
  477. cmpldi cr0,r11,0xc /* linear mapping ? */
  478. TLB_MISS_STATS_SAVE_INFO
  479. beq tlb_load_linear /* yes -> go to linear map load */
  480. /* We do the user/kernel test for the PID here along with the RW test
  481. */
  482. cmpldi cr0,r11,0 /* Check for user region */
  483. ld r15,PACAPGD(r13) /* Load user pgdir */
  484. beq htw_tlb_miss
  485. /* XXX replace the RMW cycles with immediate loads + writes */
  486. 1: mfspr r10,SPRN_MAS1
  487. cmpldi cr0,r11,8 /* Check for vmalloc region */
  488. rlwinm r10,r10,0,16,1 /* Clear TID */
  489. mtspr SPRN_MAS1,r10
  490. ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
  491. beq+ htw_tlb_miss
  492. /* We got a crappy address, just fault */
  493. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  494. TLB_MISS_EPILOG_ERROR
  495. b exc_instruction_storage_book3e
  496. /*
  497. * This is the guts of the second-level TLB miss handler for direct
  498. * misses. We are entered with:
  499. *
  500. * r16 = virtual page table faulting address
  501. * r15 = PGD pointer
  502. * r14 = ESR
  503. * r13 = PACA
  504. * r12 = TLB exception frame in PACA
  505. * r11 = crap (free to use)
  506. * r10 = crap (free to use)
  507. *
  508. * It can be re-entered by the linear mapping miss handler. However, to
  509. * avoid too much complication, it will save/restore things for us
  510. */
  511. htw_tlb_miss:
  512. /* Search if we already have a TLB entry for that virtual address, and
  513. * if we do, bail out.
  514. *
  515. * MAS1:IND should be already set based on MAS4
  516. */
  517. PPC_TLBSRX_DOT(0,r16)
  518. beq htw_tlb_miss_done
  519. /* Now, we need to walk the page tables. First check if we are in
  520. * range.
  521. */
  522. rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
  523. bne- htw_tlb_miss_fault
  524. /* Get the PGD pointer */
  525. cmpldi cr0,r15,0
  526. beq- htw_tlb_miss_fault
  527. /* Get to PGD entry */
  528. rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
  529. clrrdi r10,r11,3
  530. ldx r15,r10,r15
  531. cmpldi cr0,r15,0
  532. beq htw_tlb_miss_fault
  533. #ifndef CONFIG_PPC_64K_PAGES
  534. /* Get to PUD entry */
  535. rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
  536. clrrdi r10,r11,3
  537. ldx r15,r10,r15
  538. cmpldi cr0,r15,0
  539. beq htw_tlb_miss_fault
  540. #endif /* CONFIG_PPC_64K_PAGES */
  541. /* Get to PMD entry */
  542. rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
  543. clrrdi r10,r11,3
  544. ldx r15,r10,r15
  545. cmpldi cr0,r15,0
  546. beq htw_tlb_miss_fault
  547. /* Ok, we're all right, we can now create an indirect entry for
  548. * a 1M or 256M page.
  549. *
  550. * The last trick is now that because we use "half" pages for
  551. * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
  552. * for an added LSB bit to the RPN. For 64K pages, there is no
  553. * problem as we already use 32K arrays (half PTE pages), but for
  554. * 4K page we need to extract a bit from the virtual address and
  555. * insert it into the "PA52" bit of the RPN.
  556. */
  557. #ifndef CONFIG_PPC_64K_PAGES
  558. rlwimi r15,r16,32-9,20,20
  559. #endif
  560. /* Now we build the MAS:
  561. *
  562. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  563. * MAS 1 : Almost fully setup
  564. * - PID already updated by caller if necessary
  565. * - TSIZE for now is base ind page size always
  566. * MAS 2 : Use defaults
  567. * MAS 3+7 : Needs to be done
  568. */
  569. #ifdef CONFIG_PPC_64K_PAGES
  570. ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
  571. #else
  572. ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
  573. #endif
  574. BEGIN_MMU_FTR_SECTION
  575. srdi r16,r10,32
  576. mtspr SPRN_MAS3,r10
  577. mtspr SPRN_MAS7,r16
  578. MMU_FTR_SECTION_ELSE
  579. mtspr SPRN_MAS7_MAS3,r10
  580. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  581. tlbwe
  582. htw_tlb_miss_done:
  583. /* We don't bother with restoring DEAR or ESR since we know we are
  584. * level 0 and just going back to userland. They are only needed
  585. * if you are going to take an access fault
  586. */
  587. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
  588. TLB_MISS_EPILOG_SUCCESS
  589. rfi
  590. htw_tlb_miss_fault:
  591. /* We need to check if it was an instruction miss. We know this
  592. * though because r14 would contain -1
  593. */
  594. cmpdi cr0,r14,-1
  595. beq 1f
  596. mtspr SPRN_DEAR,r16
  597. mtspr SPRN_ESR,r14
  598. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
  599. TLB_MISS_EPILOG_ERROR
  600. b exc_data_storage_book3e
  601. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
  602. TLB_MISS_EPILOG_ERROR
  603. b exc_instruction_storage_book3e
  604. /*
  605. * This is the guts of "any" level TLB miss handler for kernel linear
  606. * mapping misses. We are entered with:
  607. *
  608. *
  609. * r16 = faulting address
  610. * r15 = crap (free to use)
  611. * r14 = ESR (data) or -1 (instruction)
  612. * r13 = PACA
  613. * r12 = TLB exception frame in PACA
  614. * r11 = crap (free to use)
  615. * r10 = crap (free to use)
  616. *
  617. * In addition we know that we will not re-enter, so in theory, we could
  618. * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
  619. *
  620. * We also need to be careful about MAS registers here & TLB reservation,
  621. * as we know we'll have clobbered them if we interrupt the main TLB miss
  622. * handlers in which case we probably want to do a full restart at level
  623. * 0 rather than saving / restoring the MAS.
  624. *
  625. * Note: If we care about performance of that core, we can easily shuffle
  626. * a few things around
  627. */
  628. tlb_load_linear:
  629. /* For now, we assume the linear mapping is contiguous and stops at
  630. * linear_map_top. We also assume the size is a multiple of 1G, thus
  631. * we only use 1G pages for now. That might have to be changed in a
  632. * final implementation, especially when dealing with hypervisors
  633. */
  634. ld r11,PACATOC(r13)
  635. ld r11,linear_map_top@got(r11)
  636. ld r10,0(r11)
  637. cmpld cr0,r10,r16
  638. bge tlb_load_linear_fault
  639. /* MAS1 need whole new setup. */
  640. li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
  641. oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */
  642. mtspr SPRN_MAS1,r15
  643. /* Already somebody there ? */
  644. PPC_TLBSRX_DOT(0,r16)
  645. beq tlb_load_linear_done
  646. /* Now we build the remaining MAS. MAS0 and 2 should be fine
  647. * with their defaults, which leaves us with MAS 3 and 7. The
  648. * mapping is linear, so we just take the address, clear the
  649. * region bits, and or in the permission bits which are currently
  650. * hard wired
  651. */
  652. clrrdi r10,r16,30 /* 1G page index */
  653. clrldi r10,r10,4 /* clear region bits */
  654. ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
  655. BEGIN_MMU_FTR_SECTION
  656. srdi r16,r10,32
  657. mtspr SPRN_MAS3,r10
  658. mtspr SPRN_MAS7,r16
  659. MMU_FTR_SECTION_ELSE
  660. mtspr SPRN_MAS7_MAS3,r10
  661. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  662. tlbwe
  663. tlb_load_linear_done:
  664. /* We use the "error" epilog for success as we do want to
  665. * restore to the initial faulting context, whatever it was.
  666. * We do that because we can't resume a fault within a TLB
  667. * miss handler, due to MAS and TLB reservation being clobbered.
  668. */
  669. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
  670. TLB_MISS_EPILOG_ERROR
  671. rfi
  672. tlb_load_linear_fault:
  673. /* We keep the DEAR and ESR around, this shouldn't have happened */
  674. cmpdi cr0,r14,-1
  675. beq 1f
  676. TLB_MISS_EPILOG_ERROR_SPECIAL
  677. b exc_data_storage_book3e
  678. 1: TLB_MISS_EPILOG_ERROR_SPECIAL
  679. b exc_instruction_storage_book3e
  680. #ifdef CONFIG_BOOK3E_MMU_TLB_STATS
  681. .tlb_stat_inc:
  682. 1: ldarx r8,0,r9
  683. addi r8,r8,1
  684. stdcx. r8,0,r9
  685. bne- 1b
  686. blr
  687. #endif