tsb.S 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. .text
  7. .align 32
  8. /* Invoked from TLB miss handler, we are in the
  9. * MMU global registers and they are setup like
  10. * this:
  11. *
  12. * %g1: TSB entry pointer
  13. * %g2: available temporary
  14. * %g3: FAULT_CODE_{D,I}TLB
  15. * %g4: available temporary
  16. * %g5: available temporary
  17. * %g6: TAG TARGET
  18. * %g7: available temporary, will be loaded by us with
  19. * the physical address base of the linux page
  20. * tables for the current address space
  21. */
  22. tsb_miss_dtlb:
  23. mov TLB_TAG_ACCESS, %g4
  24. ldxa [%g4] ASI_DMMU, %g4
  25. ba,pt %xcc, tsb_miss_page_table_walk
  26. nop
  27. tsb_miss_itlb:
  28. mov TLB_TAG_ACCESS, %g4
  29. ldxa [%g4] ASI_IMMU, %g4
  30. ba,pt %xcc, tsb_miss_page_table_walk
  31. nop
  32. /* The sun4v TLB miss handlers jump directly here instead
  33. * of tsb_miss_{d,i}tlb with the missing virtual address
  34. * already loaded into %g4.
  35. */
  36. tsb_miss_page_table_walk:
  37. TRAP_LOAD_PGD_PHYS(%g7, %g5)
  38. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  39. tsb_reload:
  40. TSB_LOCK_TAG(%g1, %g2, %g7)
  41. /* Load and check PTE. */
  42. ldxa [%g5] ASI_PHYS_USE_EC, %g5
  43. brgez,a,pn %g5, tsb_do_fault
  44. TSB_STORE(%g1, %g0)
  45. /* If it is larger than the base page size, don't
  46. * bother putting it into the TSB.
  47. */
  48. srlx %g5, 32, %g2
  49. sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7
  50. and %g2, %g7, %g2
  51. sethi %hi(_PAGE_SZBITS >> 32), %g7
  52. cmp %g2, %g7
  53. bne,a,pn %xcc, tsb_tlb_reload
  54. TSB_STORE(%g1, %g0)
  55. TSB_WRITE(%g1, %g5, %g6)
  56. /* Finally, load TLB and return from trap. */
  57. tsb_tlb_reload:
  58. cmp %g3, FAULT_CODE_DTLB
  59. bne,pn %xcc, tsb_itlb_load
  60. nop
  61. tsb_dtlb_load:
  62. 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
  63. retry
  64. .section .gl_2insn_patch, "ax"
  65. .word 661b
  66. nop
  67. nop
  68. .previous
  69. /* For sun4v the ASI_DTLB_DATA_IN store and the retry
  70. * instruction get nop'd out and we get here to branch
  71. * to the sun4v tlb load code. The registers are setup
  72. * as follows:
  73. *
  74. * %g4: vaddr
  75. * %g5: PTE
  76. * %g6: TAG
  77. *
  78. * The sun4v TLB load wants the PTE in %g3 so we fix that
  79. * up here.
  80. */
  81. ba,pt %xcc, sun4v_dtlb_load
  82. mov %g5, %g3
  83. tsb_itlb_load:
  84. 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
  85. retry
  86. .section .gl_2insn_patch, "ax"
  87. .word 661b
  88. nop
  89. nop
  90. .previous
  91. /* For sun4v the ASI_ITLB_DATA_IN store and the retry
  92. * instruction get nop'd out and we get here to branch
  93. * to the sun4v tlb load code. The registers are setup
  94. * as follows:
  95. *
  96. * %g4: vaddr
  97. * %g5: PTE
  98. * %g6: TAG
  99. *
  100. * The sun4v TLB load wants the PTE in %g3 so we fix that
  101. * up here.
  102. */
  103. ba,pt %xcc, sun4v_itlb_load
  104. mov %g5, %g3
  105. /* No valid entry in the page tables, do full fault
  106. * processing.
  107. */
  108. .globl tsb_do_fault
  109. tsb_do_fault:
  110. cmp %g3, FAULT_CODE_DTLB
  111. 661: rdpr %pstate, %g5
  112. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  113. .section .gl_2insn_patch, "ax"
  114. .word 661b
  115. nop
  116. nop
  117. .previous
  118. bne,pn %xcc, tsb_do_itlb_fault
  119. nop
  120. tsb_do_dtlb_fault:
  121. rdpr %tl, %g3
  122. cmp %g3, 1
  123. 661: mov TLB_TAG_ACCESS, %g4
  124. ldxa [%g4] ASI_DMMU, %g5
  125. .section .gl_2insn_patch, "ax"
  126. .word 661b
  127. mov %g4, %g5
  128. nop
  129. .previous
  130. be,pt %xcc, sparc64_realfault_common
  131. mov FAULT_CODE_DTLB, %g4
  132. ba,pt %xcc, winfix_trampoline
  133. nop
  134. tsb_do_itlb_fault:
  135. rdpr %tpc, %g5
  136. ba,pt %xcc, sparc64_realfault_common
  137. mov FAULT_CODE_ITLB, %g4
  138. .globl sparc64_realfault_common
  139. sparc64_realfault_common:
  140. /* fault code in %g4, fault address in %g5, etrap will
  141. * preserve these two values in %l4 and %l5 respectively
  142. */
  143. ba,pt %xcc, etrap ! Save trap state
  144. 1: rd %pc, %g7 ! ...
  145. stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
  146. stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  147. call do_sparc64_fault ! Call fault handler
  148. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  149. ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
  150. nop ! Delay slot (fill me)
  151. winfix_trampoline:
  152. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  153. or %g3, 0x7c, %g3 ! Compute branch offset
  154. wrpr %g3, %tnpc ! Write it into TNPC
  155. done ! Trap return
  156. /* Insert an entry into the TSB.
  157. *
  158. * %o0: TSB entry pointer (virt or phys address)
  159. * %o1: tag
  160. * %o2: pte
  161. */
  162. .align 32
  163. .globl __tsb_insert
  164. __tsb_insert:
  165. rdpr %pstate, %o5
  166. wrpr %o5, PSTATE_IE, %pstate
  167. TSB_LOCK_TAG(%o0, %g2, %g3)
  168. TSB_WRITE(%o0, %o2, %o1)
  169. wrpr %o5, %pstate
  170. retl
  171. nop
  172. /* Flush the given TSB entry if it has the matching
  173. * tag.
  174. *
  175. * %o0: TSB entry pointer (virt or phys address)
  176. * %o1: tag
  177. */
  178. .align 32
  179. .globl tsb_flush
  180. tsb_flush:
  181. sethi %hi(TSB_TAG_LOCK_HIGH), %g2
  182. 1: TSB_LOAD_TAG(%o0, %g1)
  183. srlx %g1, 32, %o3
  184. andcc %o3, %g2, %g0
  185. bne,pn %icc, 1b
  186. membar #LoadLoad
  187. cmp %g1, %o1
  188. bne,pt %xcc, 2f
  189. clr %o3
  190. TSB_CAS_TAG(%o0, %g1, %o3)
  191. cmp %g1, %o3
  192. bne,pn %xcc, 1b
  193. nop
  194. 2: retl
  195. TSB_MEMBAR
  196. /* Reload MMU related context switch state at
  197. * schedule() time.
  198. *
  199. * %o0: page table physical address
  200. * %o1: TSB register value
  201. * %o2: TSB virtual address
  202. * %o3: TSB mapping locked PTE
  203. *
  204. * We have to run this whole thing with interrupts
  205. * disabled so that the current cpu doesn't change
  206. * due to preemption.
  207. */
  208. .align 32
  209. .globl __tsb_context_switch
  210. __tsb_context_switch:
  211. rdpr %pstate, %o5
  212. wrpr %o5, PSTATE_IE, %pstate
  213. ldub [%g6 + TI_CPU], %g1
  214. sethi %hi(trap_block), %g2
  215. sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
  216. or %g2, %lo(trap_block), %g2
  217. add %g2, %g1, %g2
  218. stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
  219. 661: mov TSB_REG, %g1
  220. stxa %o1, [%g1] ASI_DMMU
  221. .section .gl_2insn_patch, "ax"
  222. .word 661b
  223. mov SCRATCHPAD_UTSBREG1, %g1
  224. stxa %o1, [%g1] ASI_SCRATCHPAD
  225. .previous
  226. membar #Sync
  227. 661: stxa %o1, [%g1] ASI_IMMU
  228. membar #Sync
  229. .section .gl_2insn_patch, "ax"
  230. .word 661b
  231. nop
  232. nop
  233. .previous
  234. brz %o2, 9f
  235. nop
  236. sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4
  237. mov TLB_TAG_ACCESS, %g1
  238. lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
  239. stxa %o2, [%g1] ASI_DMMU
  240. membar #Sync
  241. sllx %g2, 3, %g2
  242. stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
  243. membar #Sync
  244. 9:
  245. wrpr %o5, %pstate
  246. retl
  247. nop