tlb-miss.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /* tlb-miss.S: TLB miss handlers
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/linkage.h>
  13. #include <asm/page.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/highmem.h>
  16. #include <asm/spr-regs.h>
  17. .section .text
  18. .balign 4
  19. .globl __entry_insn_mmu_miss
  20. __entry_insn_mmu_miss:
  21. break
  22. nop
  23. .globl __entry_insn_mmu_exception
  24. __entry_insn_mmu_exception:
  25. break
  26. nop
  27. .globl __entry_data_mmu_miss
  28. __entry_data_mmu_miss:
  29. break
  30. nop
  31. .globl __entry_data_mmu_exception
  32. __entry_data_mmu_exception:
  33. break
  34. nop
  35. ###############################################################################
  36. #
  37. # handle a lookup failure of one sort or another in a kernel TLB handler
  38. # On entry:
  39. # GR29 - faulting address
  40. # SCR2 - saved CCR
  41. #
  42. ###############################################################################
  43. .type __tlb_kernel_fault,@function
  44. __tlb_kernel_fault:
  45. # see if we're supposed to re-enable single-step mode upon return
  46. sethi.p %hi(__break_tlb_miss_return_break),gr30
  47. setlo %lo(__break_tlb_miss_return_break),gr30
  48. movsg pcsr,gr31
  49. subcc gr31,gr30,gr0,icc0
  50. beq icc0,#0,__tlb_kernel_fault_sstep
  51. movsg scr2,gr30
  52. movgs gr30,ccr
  53. movgs gr29,scr2 /* save EAR0 value */
  54. sethi.p %hi(__kernel_current_task),gr29
  55. setlo %lo(__kernel_current_task),gr29
  56. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  57. bra __entry_kernel_handle_mmu_fault
  58. # we've got to re-enable single-stepping
  59. __tlb_kernel_fault_sstep:
  60. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  61. setlo %lo(__break_tlb_miss_real_return_info),gr30
  62. lddi @(gr30,0),gr30
  63. movgs gr30,pcsr
  64. movgs gr31,psr
  65. movsg scr2,gr30
  66. movgs gr30,ccr
  67. movgs gr29,scr2 /* save EAR0 value */
  68. sethi.p %hi(__kernel_current_task),gr29
  69. setlo %lo(__kernel_current_task),gr29
  70. ldi.p @(gr29,#0),gr29 /* restore GR29 */
  71. bra __entry_kernel_handle_mmu_fault_sstep
  72. .size __tlb_kernel_fault, .-__tlb_kernel_fault
  73. ###############################################################################
  74. #
  75. # handle a lookup failure of one sort or another in a user TLB handler
  76. # On entry:
  77. # GR28 - faulting address
  78. # SCR2 - saved CCR
  79. #
  80. ###############################################################################
  81. .type __tlb_user_fault,@function
  82. __tlb_user_fault:
  83. # see if we're supposed to re-enable single-step mode upon return
  84. sethi.p %hi(__break_tlb_miss_return_break),gr30
  85. setlo %lo(__break_tlb_miss_return_break),gr30
  86. movsg pcsr,gr31
  87. subcc gr31,gr30,gr0,icc0
  88. beq icc0,#0,__tlb_user_fault_sstep
  89. movsg scr2,gr30
  90. movgs gr30,ccr
  91. bra __entry_uspace_handle_mmu_fault
  92. # we've got to re-enable single-stepping
  93. __tlb_user_fault_sstep:
  94. sethi.p %hi(__break_tlb_miss_real_return_info),gr30
  95. setlo %lo(__break_tlb_miss_real_return_info),gr30
  96. lddi @(gr30,0),gr30
  97. movgs gr30,pcsr
  98. movgs gr31,psr
  99. movsg scr2,gr30
  100. movgs gr30,ccr
  101. bra __entry_uspace_handle_mmu_fault_sstep
  102. .size __tlb_user_fault, .-__tlb_user_fault
  103. ###############################################################################
  104. #
  105. # Kernel instruction TLB miss handler
  106. # On entry:
  107. # GR1 - kernel stack pointer
  108. # GR28 - saved exception frame pointer
  109. # GR29 - faulting address
  110. # GR31 - EAR0 ^ SCR0
  111. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  112. # DAMR3 - mapped page directory
  113. # DAMR4 - mapped page table as matched by SCR0
  114. #
  115. ###############################################################################
  116. .globl __entry_kernel_insn_tlb_miss
  117. .type __entry_kernel_insn_tlb_miss,@function
  118. __entry_kernel_insn_tlb_miss:
  119. #if 0
  120. sethi.p %hi(0xe1200004),gr30
  121. setlo %lo(0xe1200004),gr30
  122. st gr0,@(gr30,gr0)
  123. sethi.p %hi(0xffc00100),gr30
  124. setlo %lo(0xffc00100),gr30
  125. sth gr30,@(gr30,gr0)
  126. membar
  127. #endif
  128. movsg ccr,gr30 /* save CCR */
  129. movgs gr30,scr2
  130. # see if the cached page table mapping is appropriate
  131. srlicc.p gr31,#26,gr0,icc0
  132. setlos 0x3ffc,gr30
  133. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  134. bne icc0,#0,__itlb_k_PTD_miss
  135. __itlb_k_PTD_mapped:
  136. # access the PTD with EAR0[25:14]
  137. # - DAMLR4 points to the virtual address of the appropriate page table
  138. # - the PTD holds 4096 PTEs
  139. # - the PTD must be accessed uncached
  140. # - the PTE must be marked accessed if it was valid
  141. #
  142. and gr31,gr30,gr31
  143. movsg damlr4,gr30
  144. add gr30,gr31,gr31
  145. ldi @(gr31,#0),gr30 /* fetch the PTE */
  146. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  147. ori.p gr30,#_PAGE_ACCESSED,gr30
  148. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  149. sti.p gr30,@(gr31,#0) /* update the PTE */
  150. andi gr30,#~_PAGE_ACCESSED,gr30
  151. # we're using IAMR1 as an extra TLB entry
  152. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  153. # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
  154. # - IAMPR1 has no WP bit, and we mustn't lose WP information
  155. movsg iampr1,gr31
  156. andicc gr31,#xAMPRx_V,gr0,icc0
  157. setlos.p 0xfffff000,gr31
  158. beq icc0,#0,__itlb_k_nopunt /* punt not required */
  159. movsg iamlr1,gr31
  160. movgs gr31,tplr /* set TPLR.CXN */
  161. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  162. movsg dampr1,gr31
  163. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  164. movgs gr31,tppr
  165. movsg iamlr1,gr31 /* set TPLR.CXN */
  166. movgs gr31,tplr
  167. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  168. movsg tpxr,gr31 /* check the TLB write error flag */
  169. andicc.p gr31,#TPXR_E,gr0,icc0
  170. setlos #0xfffff000,gr31
  171. bne icc0,#0,__tlb_kernel_fault
  172. __itlb_k_nopunt:
  173. # assemble the new TLB entry
  174. and gr29,gr31,gr29
  175. movsg cxnr,gr31
  176. or gr29,gr31,gr29
  177. movgs gr29,iamlr1 /* xAMLR = address | context number */
  178. movgs gr30,iampr1
  179. movgs gr29,damlr1
  180. movgs gr30,dampr1
  181. # return, restoring registers
  182. movsg scr2,gr30
  183. movgs gr30,ccr
  184. sethi.p %hi(__kernel_current_task),gr29
  185. setlo %lo(__kernel_current_task),gr29
  186. ldi @(gr29,#0),gr29
  187. rett #0
  188. beq icc0,#3,0 /* prevent icache prefetch */
  189. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  190. # appropriate page table and map that instead
  191. # - access the PGD with EAR0[31:26]
  192. # - DAMLR3 points to the virtual address of the page directory
  193. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  194. __itlb_k_PTD_miss:
  195. srli gr29,#26,gr31 /* calculate PGE offset */
  196. slli gr31,#8,gr31 /* and clear bottom bits */
  197. movsg damlr3,gr30
  198. ld @(gr31,gr30),gr30 /* access the PGE */
  199. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  200. andicc gr30,#xAMPRx_SS,gr0,icc1
  201. # map this PTD instead and record coverage address
  202. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  203. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  204. slli.p gr31,#18,gr31
  205. bne icc1,#0,__itlb_k_bigpage
  206. movgs gr30,dampr4
  207. movgs gr31,scr0
  208. # we can now resume normal service
  209. setlos 0x3ffc,gr30
  210. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  211. bra __itlb_k_PTD_mapped
  212. __itlb_k_bigpage:
  213. break
  214. nop
  215. .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
  216. ###############################################################################
  217. #
  218. # Kernel data TLB miss handler
  219. # On entry:
  220. # GR1 - kernel stack pointer
  221. # GR28 - saved exception frame pointer
  222. # GR29 - faulting address
  223. # GR31 - EAR0 ^ SCR1
  224. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  225. # DAMR3 - mapped page directory
  226. # DAMR5 - mapped page table as matched by SCR1
  227. #
  228. ###############################################################################
  229. .globl __entry_kernel_data_tlb_miss
  230. .type __entry_kernel_data_tlb_miss,@function
  231. __entry_kernel_data_tlb_miss:
  232. #if 0
  233. sethi.p %hi(0xe1200004),gr30
  234. setlo %lo(0xe1200004),gr30
  235. st gr0,@(gr30,gr0)
  236. sethi.p %hi(0xffc00100),gr30
  237. setlo %lo(0xffc00100),gr30
  238. sth gr30,@(gr30,gr0)
  239. membar
  240. #endif
  241. movsg ccr,gr30 /* save CCR */
  242. movgs gr30,scr2
  243. # see if the cached page table mapping is appropriate
  244. srlicc.p gr31,#26,gr0,icc0
  245. setlos 0x3ffc,gr30
  246. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  247. bne icc0,#0,__dtlb_k_PTD_miss
  248. __dtlb_k_PTD_mapped:
  249. # access the PTD with EAR0[25:14]
  250. # - DAMLR5 points to the virtual address of the appropriate page table
  251. # - the PTD holds 4096 PTEs
  252. # - the PTD must be accessed uncached
  253. # - the PTE must be marked accessed if it was valid
  254. #
  255. and gr31,gr30,gr31
  256. movsg damlr5,gr30
  257. add gr30,gr31,gr31
  258. ldi @(gr31,#0),gr30 /* fetch the PTE */
  259. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  260. ori.p gr30,#_PAGE_ACCESSED,gr30
  261. beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
  262. sti.p gr30,@(gr31,#0) /* update the PTE */
  263. andi gr30,#~_PAGE_ACCESSED,gr30
  264. # we're using DAMR1 as an extra TLB entry
  265. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  266. # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
  267. movsg dampr1,gr31
  268. andicc gr31,#xAMPRx_V,gr0,icc0
  269. setlos.p 0xfffff000,gr31
  270. beq icc0,#0,__dtlb_k_nopunt /* punt not required */
  271. movsg damlr1,gr31
  272. movgs gr31,tplr /* set TPLR.CXN */
  273. tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
  274. movsg dampr1,gr31
  275. ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
  276. movgs gr31,tppr
  277. movsg damlr1,gr31 /* set TPLR.CXN */
  278. movgs gr31,tplr
  279. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  280. movsg tpxr,gr31 /* check the TLB write error flag */
  281. andicc.p gr31,#TPXR_E,gr0,icc0
  282. setlos #0xfffff000,gr31
  283. bne icc0,#0,__tlb_kernel_fault
  284. __dtlb_k_nopunt:
  285. # assemble the new TLB entry
  286. and gr29,gr31,gr29
  287. movsg cxnr,gr31
  288. or gr29,gr31,gr29
  289. movgs gr29,iamlr1 /* xAMLR = address | context number */
  290. movgs gr30,iampr1
  291. movgs gr29,damlr1
  292. movgs gr30,dampr1
  293. # return, restoring registers
  294. movsg scr2,gr30
  295. movgs gr30,ccr
  296. sethi.p %hi(__kernel_current_task),gr29
  297. setlo %lo(__kernel_current_task),gr29
  298. ldi @(gr29,#0),gr29
  299. rett #0
  300. beq icc0,#3,0 /* prevent icache prefetch */
  301. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  302. # appropriate page table and map that instead
  303. # - access the PGD with EAR0[31:26]
  304. # - DAMLR3 points to the virtual address of the page directory
  305. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  306. __dtlb_k_PTD_miss:
  307. srli gr29,#26,gr31 /* calculate PGE offset */
  308. slli gr31,#8,gr31 /* and clear bottom bits */
  309. movsg damlr3,gr30
  310. ld @(gr31,gr30),gr30 /* access the PGE */
  311. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  312. andicc gr30,#xAMPRx_SS,gr0,icc1
  313. # map this PTD instead and record coverage address
  314. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  315. beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
  316. slli.p gr31,#18,gr31
  317. bne icc1,#0,__dtlb_k_bigpage
  318. movgs gr30,dampr5
  319. movgs gr31,scr1
  320. # we can now resume normal service
  321. setlos 0x3ffc,gr30
  322. srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
  323. bra __dtlb_k_PTD_mapped
  324. __dtlb_k_bigpage:
  325. break
  326. nop
  327. .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
  328. ###############################################################################
  329. #
  330. # Userspace instruction TLB miss handler (with PGE prediction)
  331. # On entry:
  332. # GR28 - faulting address
  333. # GR31 - EAR0 ^ SCR0
  334. # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
  335. # DAMR3 - mapped page directory
  336. # DAMR4 - mapped page table as matched by SCR0
  337. #
  338. ###############################################################################
  339. .globl __entry_user_insn_tlb_miss
  340. .type __entry_user_insn_tlb_miss,@function
  341. __entry_user_insn_tlb_miss:
  342. #if 0
  343. sethi.p %hi(0xe1200004),gr30
  344. setlo %lo(0xe1200004),gr30
  345. st gr0,@(gr30,gr0)
  346. sethi.p %hi(0xffc00100),gr30
  347. setlo %lo(0xffc00100),gr30
  348. sth gr30,@(gr30,gr0)
  349. membar
  350. #endif
  351. movsg ccr,gr30 /* save CCR */
  352. movgs gr30,scr2
  353. # see if the cached page table mapping is appropriate
  354. srlicc.p gr31,#26,gr0,icc0
  355. setlos 0x3ffc,gr30
  356. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  357. bne icc0,#0,__itlb_u_PTD_miss
  358. __itlb_u_PTD_mapped:
  359. # access the PTD with EAR0[25:14]
  360. # - DAMLR4 points to the virtual address of the appropriate page table
  361. # - the PTD holds 4096 PTEs
  362. # - the PTD must be accessed uncached
  363. # - the PTE must be marked accessed if it was valid
  364. #
  365. and gr31,gr30,gr31
  366. movsg damlr4,gr30
  367. add gr30,gr31,gr31
  368. ldi @(gr31,#0),gr30 /* fetch the PTE */
  369. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  370. ori.p gr30,#_PAGE_ACCESSED,gr30
  371. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  372. sti.p gr30,@(gr31,#0) /* update the PTE */
  373. andi gr30,#~_PAGE_ACCESSED,gr30
  374. # we're using IAMR1/DAMR1 as an extra TLB entry
  375. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  376. movsg dampr1,gr31
  377. andicc gr31,#xAMPRx_V,gr0,icc0
  378. setlos.p 0xfffff000,gr31
  379. beq icc0,#0,__itlb_u_nopunt /* punt not required */
  380. movsg dampr1,gr31
  381. movgs gr31,tppr
  382. movsg damlr1,gr31 /* set TPLR.CXN */
  383. movgs gr31,tplr
  384. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  385. movsg tpxr,gr31 /* check the TLB write error flag */
  386. andicc.p gr31,#TPXR_E,gr0,icc0
  387. setlos #0xfffff000,gr31
  388. bne icc0,#0,__tlb_user_fault
  389. __itlb_u_nopunt:
  390. # assemble the new TLB entry
  391. and gr28,gr31,gr28
  392. movsg cxnr,gr31
  393. or gr28,gr31,gr28
  394. movgs gr28,iamlr1 /* xAMLR = address | context number */
  395. movgs gr30,iampr1
  396. movgs gr28,damlr1
  397. movgs gr30,dampr1
  398. # return, restoring registers
  399. movsg scr2,gr30
  400. movgs gr30,ccr
  401. rett #0
  402. beq icc0,#3,0 /* prevent icache prefetch */
  403. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  404. # appropriate page table and map that instead
  405. # - access the PGD with EAR0[31:26]
  406. # - DAMLR3 points to the virtual address of the page directory
  407. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  408. __itlb_u_PTD_miss:
  409. srli gr28,#26,gr31 /* calculate PGE offset */
  410. slli gr31,#8,gr31 /* and clear bottom bits */
  411. movsg damlr3,gr30
  412. ld @(gr31,gr30),gr30 /* access the PGE */
  413. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  414. andicc gr30,#xAMPRx_SS,gr0,icc1
  415. # map this PTD instead and record coverage address
  416. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  417. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  418. slli.p gr31,#18,gr31
  419. bne icc1,#0,__itlb_u_bigpage
  420. movgs gr30,dampr4
  421. movgs gr31,scr0
  422. # we can now resume normal service
  423. setlos 0x3ffc,gr30
  424. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  425. bra __itlb_u_PTD_mapped
  426. __itlb_u_bigpage:
  427. break
  428. nop
  429. .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
  430. ###############################################################################
  431. #
  432. # Userspace data TLB miss handler
  433. # On entry:
  434. # GR28 - faulting address
  435. # GR31 - EAR0 ^ SCR1
  436. # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
  437. # DAMR3 - mapped page directory
  438. # DAMR5 - mapped page table as matched by SCR1
  439. #
  440. ###############################################################################
  441. .globl __entry_user_data_tlb_miss
  442. .type __entry_user_data_tlb_miss,@function
  443. __entry_user_data_tlb_miss:
  444. #if 0
  445. sethi.p %hi(0xe1200004),gr30
  446. setlo %lo(0xe1200004),gr30
  447. st gr0,@(gr30,gr0)
  448. sethi.p %hi(0xffc00100),gr30
  449. setlo %lo(0xffc00100),gr30
  450. sth gr30,@(gr30,gr0)
  451. membar
  452. #endif
  453. movsg ccr,gr30 /* save CCR */
  454. movgs gr30,scr2
  455. # see if the cached page table mapping is appropriate
  456. srlicc.p gr31,#26,gr0,icc0
  457. setlos 0x3ffc,gr30
  458. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  459. bne icc0,#0,__dtlb_u_PTD_miss
  460. __dtlb_u_PTD_mapped:
  461. # access the PTD with EAR0[25:14]
  462. # - DAMLR5 points to the virtual address of the appropriate page table
  463. # - the PTD holds 4096 PTEs
  464. # - the PTD must be accessed uncached
  465. # - the PTE must be marked accessed if it was valid
  466. #
  467. and gr31,gr30,gr31
  468. movsg damlr5,gr30
  469. __dtlb_u_using_iPTD:
  470. add gr30,gr31,gr31
  471. ldi @(gr31,#0),gr30 /* fetch the PTE */
  472. andicc gr30,#_PAGE_PRESENT,gr0,icc0
  473. ori.p gr30,#_PAGE_ACCESSED,gr30
  474. beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
  475. sti.p gr30,@(gr31,#0) /* update the PTE */
  476. andi gr30,#~_PAGE_ACCESSED,gr30
  477. # we're using DAMR1 as an extra TLB entry
  478. # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
  479. movsg dampr1,gr31
  480. andicc gr31,#xAMPRx_V,gr0,icc0
  481. setlos.p 0xfffff000,gr31
  482. beq icc0,#0,__dtlb_u_nopunt /* punt not required */
  483. movsg dampr1,gr31
  484. movgs gr31,tppr
  485. movsg damlr1,gr31 /* set TPLR.CXN */
  486. movgs gr31,tplr
  487. tlbpr gr31,gr0,#2,#0 /* save to the TLB */
  488. movsg tpxr,gr31 /* check the TLB write error flag */
  489. andicc.p gr31,#TPXR_E,gr0,icc0
  490. setlos #0xfffff000,gr31
  491. bne icc0,#0,__tlb_user_fault
  492. __dtlb_u_nopunt:
  493. # assemble the new TLB entry
  494. and gr28,gr31,gr28
  495. movsg cxnr,gr31
  496. or gr28,gr31,gr28
  497. movgs gr28,iamlr1 /* xAMLR = address | context number */
  498. movgs gr30,iampr1
  499. movgs gr28,damlr1
  500. movgs gr30,dampr1
  501. # return, restoring registers
  502. movsg scr2,gr30
  503. movgs gr30,ccr
  504. rett #0
  505. beq icc0,#3,0 /* prevent icache prefetch */
  506. # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
  507. # appropriate page table and map that instead
  508. # - first of all, check the insn PGE cache - we may well get a hit there
  509. # - access the PGD with EAR0[31:26]
  510. # - DAMLR3 points to the virtual address of the page directory
  511. # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
  512. __dtlb_u_PTD_miss:
  513. movsg scr0,gr31 /* consult the insn-PGE-cache key */
  514. xor gr28,gr31,gr31
  515. srlicc gr31,#26,gr0,icc0
  516. srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  517. bne icc0,#0,__dtlb_u_iPGE_miss
  518. # what we're looking for is covered by the insn-PGE-cache
  519. setlos 0x3ffc,gr30
  520. and gr31,gr30,gr31
  521. movsg damlr4,gr30
  522. bra __dtlb_u_using_iPTD
  523. __dtlb_u_iPGE_miss:
  524. srli gr28,#26,gr31 /* calculate PGE offset */
  525. slli gr31,#8,gr31 /* and clear bottom bits */
  526. movsg damlr3,gr30
  527. ld @(gr31,gr30),gr30 /* access the PGE */
  528. andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
  529. andicc gr30,#xAMPRx_SS,gr0,icc1
  530. # map this PTD instead and record coverage address
  531. ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
  532. beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
  533. slli.p gr31,#18,gr31
  534. bne icc1,#0,__dtlb_u_bigpage
  535. movgs gr30,dampr5
  536. movgs gr31,scr1
  537. # we can now resume normal service
  538. setlos 0x3ffc,gr30
  539. srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
  540. bra __dtlb_u_PTD_mapped
  541. __dtlb_u_bigpage:
  542. break
  543. nop
  544. .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss