tsb.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. #include <asm/hypervisor.h>
  7. .text
  8. .align 32
  9. /* Invoked from TLB miss handler, we are in the
  10. * MMU global registers and they are setup like
  11. * this:
  12. *
  13. * %g1: TSB entry pointer
  14. * %g2: available temporary
  15. * %g3: FAULT_CODE_{D,I}TLB
  16. * %g4: available temporary
  17. * %g5: available temporary
  18. * %g6: TAG TARGET
  19. * %g7: available temporary, will be loaded by us with
  20. * the physical address base of the linux page
  21. * tables for the current address space
  22. */
  23. tsb_miss_dtlb:
  24. mov TLB_TAG_ACCESS, %g4
  25. ba,pt %xcc, tsb_miss_page_table_walk
  26. ldxa [%g4] ASI_DMMU, %g4
  27. tsb_miss_itlb:
  28. mov TLB_TAG_ACCESS, %g4
  29. ba,pt %xcc, tsb_miss_page_table_walk
  30. ldxa [%g4] ASI_IMMU, %g4
  31. /* At this point we have:
  32. * %g4 -- missing virtual address
  33. * %g1 -- TSB entry address
  34. * %g6 -- TAG TARGET (vaddr >> 22)
  35. */
  36. tsb_miss_page_table_walk:
  37. TRAP_LOAD_PGD_PHYS(%g7, %g5)
  38. /* And now we have the PGD base physical address in %g7. */
  39. tsb_miss_page_table_walk_sun4v_fastpath:
  40. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  41. tsb_reload:
  42. TSB_LOCK_TAG(%g1, %g2, %g7)
  43. /* Load and check PTE. */
  44. ldxa [%g5] ASI_PHYS_USE_EC, %g5
  45. mov 1, %g7
  46. sllx %g7, TSB_TAG_INVALID_BIT, %g7
  47. brgez,a,pn %g5, tsb_do_fault
  48. TSB_STORE(%g1, %g7)
  49. /* If it is larger than the base page size, don't
  50. * bother putting it into the TSB.
  51. */
  52. sethi %hi(_PAGE_ALL_SZ_BITS), %g7
  53. ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
  54. and %g5, %g7, %g2
  55. sethi %hi(_PAGE_SZBITS), %g7
  56. ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
  57. cmp %g2, %g7
  58. mov 1, %g7
  59. sllx %g7, TSB_TAG_INVALID_BIT, %g7
  60. bne,a,pn %xcc, tsb_tlb_reload
  61. TSB_STORE(%g1, %g7)
  62. TSB_WRITE(%g1, %g5, %g6)
  63. /* Finally, load TLB and return from trap. */
  64. tsb_tlb_reload:
  65. cmp %g3, FAULT_CODE_DTLB
  66. bne,pn %xcc, tsb_itlb_load
  67. nop
  68. tsb_dtlb_load:
  69. 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
  70. retry
  71. .section .sun4v_2insn_patch, "ax"
  72. .word 661b
  73. nop
  74. nop
  75. .previous
  76. /* For sun4v the ASI_DTLB_DATA_IN store and the retry
  77. * instruction get nop'd out and we get here to branch
  78. * to the sun4v tlb load code. The registers are setup
  79. * as follows:
  80. *
  81. * %g4: vaddr
  82. * %g5: PTE
  83. * %g6: TAG
  84. *
  85. * The sun4v TLB load wants the PTE in %g3 so we fix that
  86. * up here.
  87. */
  88. ba,pt %xcc, sun4v_dtlb_load
  89. mov %g5, %g3
  90. tsb_itlb_load:
  91. 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
  92. retry
  93. .section .sun4v_2insn_patch, "ax"
  94. .word 661b
  95. nop
  96. nop
  97. .previous
  98. /* For sun4v the ASI_ITLB_DATA_IN store and the retry
  99. * instruction get nop'd out and we get here to branch
  100. * to the sun4v tlb load code. The registers are setup
  101. * as follows:
  102. *
  103. * %g4: vaddr
  104. * %g5: PTE
  105. * %g6: TAG
  106. *
  107. * The sun4v TLB load wants the PTE in %g3 so we fix that
  108. * up here.
  109. */
  110. ba,pt %xcc, sun4v_itlb_load
  111. mov %g5, %g3
  112. /* No valid entry in the page tables, do full fault
  113. * processing.
  114. */
  115. .globl tsb_do_fault
  116. tsb_do_fault:
  117. cmp %g3, FAULT_CODE_DTLB
  118. 661: rdpr %pstate, %g5
  119. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  120. .section .sun4v_2insn_patch, "ax"
  121. .word 661b
  122. SET_GL(1)
  123. ldxa [%g0] ASI_SCRATCHPAD, %g4
  124. .previous
  125. bne,pn %xcc, tsb_do_itlb_fault
  126. nop
  127. tsb_do_dtlb_fault:
  128. rdpr %tl, %g3
  129. cmp %g3, 1
  130. 661: mov TLB_TAG_ACCESS, %g4
  131. ldxa [%g4] ASI_DMMU, %g5
  132. .section .sun4v_2insn_patch, "ax"
  133. .word 661b
  134. ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
  135. nop
  136. .previous
  137. be,pt %xcc, sparc64_realfault_common
  138. mov FAULT_CODE_DTLB, %g4
  139. ba,pt %xcc, winfix_trampoline
  140. nop
  141. tsb_do_itlb_fault:
  142. rdpr %tpc, %g5
  143. ba,pt %xcc, sparc64_realfault_common
  144. mov FAULT_CODE_ITLB, %g4
  145. .globl sparc64_realfault_common
  146. sparc64_realfault_common:
  147. /* fault code in %g4, fault address in %g5, etrap will
  148. * preserve these two values in %l4 and %l5 respectively
  149. */
  150. ba,pt %xcc, etrap ! Save trap state
  151. 1: rd %pc, %g7 ! ...
  152. stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
  153. stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  154. call do_sparc64_fault ! Call fault handler
  155. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  156. ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
  157. nop ! Delay slot (fill me)
  158. winfix_trampoline:
  159. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  160. or %g3, 0x7c, %g3 ! Compute branch offset
  161. wrpr %g3, %tnpc ! Write it into TNPC
  162. done ! Trap return
  163. /* Insert an entry into the TSB.
  164. *
  165. * %o0: TSB entry pointer (virt or phys address)
  166. * %o1: tag
  167. * %o2: pte
  168. */
  169. .align 32
  170. .globl __tsb_insert
  171. __tsb_insert:
  172. rdpr %pstate, %o5
  173. wrpr %o5, PSTATE_IE, %pstate
  174. TSB_LOCK_TAG(%o0, %g2, %g3)
  175. TSB_WRITE(%o0, %o2, %o1)
  176. wrpr %o5, %pstate
  177. retl
  178. nop
  179. /* Flush the given TSB entry if it has the matching
  180. * tag.
  181. *
  182. * %o0: TSB entry pointer (virt or phys address)
  183. * %o1: tag
  184. */
  185. .align 32
  186. .globl tsb_flush
  187. tsb_flush:
  188. sethi %hi(TSB_TAG_LOCK_HIGH), %g2
  189. 1: TSB_LOAD_TAG(%o0, %g1)
  190. srlx %g1, 32, %o3
  191. andcc %o3, %g2, %g0
  192. bne,pn %icc, 1b
  193. membar #LoadLoad
  194. cmp %g1, %o1
  195. mov 1, %o3
  196. bne,pt %xcc, 2f
  197. sllx %o3, TSB_TAG_INVALID_BIT, %o3
  198. TSB_CAS_TAG(%o0, %g1, %o3)
  199. cmp %g1, %o3
  200. bne,pn %xcc, 1b
  201. nop
  202. 2: retl
  203. TSB_MEMBAR
  204. /* Reload MMU related context switch state at
  205. * schedule() time.
  206. *
  207. * %o0: page table physical address
  208. * %o1: TSB register value
  209. * %o2: TSB virtual address
  210. * %o3: TSB mapping locked PTE
  211. * %o4: Hypervisor TSB descriptor physical address
  212. *
  213. * We have to run this whole thing with interrupts
  214. * disabled so that the current cpu doesn't change
  215. * due to preemption.
  216. */
  217. .align 32
  218. .globl __tsb_context_switch
  219. __tsb_context_switch:
  220. rdpr %pstate, %o5
  221. wrpr %o5, PSTATE_IE, %pstate
  222. ldub [%g6 + TI_CPU], %g1
  223. sethi %hi(trap_block), %g2
  224. sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
  225. or %g2, %lo(trap_block), %g2
  226. add %g2, %g1, %g2
  227. stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
  228. sethi %hi(tlb_type), %g1
  229. lduw [%g1 + %lo(tlb_type)], %g1
  230. cmp %g1, 3
  231. bne,pt %icc, 1f
  232. nop
  233. /* Hypervisor TSB switch. */
  234. mov SCRATCHPAD_UTSBREG1, %g1
  235. stxa %o1, [%g1] ASI_SCRATCHPAD
  236. mov -1, %g2
  237. mov SCRATCHPAD_UTSBREG2, %g1
  238. stxa %g2, [%g1] ASI_SCRATCHPAD
  239. /* Save away %o5's %pstate, we have to use %o5 for
  240. * the hypervisor call.
  241. */
  242. mov %o5, %g1
  243. mov HV_FAST_MMU_TSB_CTXNON0, %o5
  244. mov 1, %o0
  245. mov %o4, %o1
  246. ta HV_FAST_TRAP
  247. /* Finish up and restore %o5. */
  248. ba,pt %xcc, 9f
  249. mov %g1, %o5
  250. /* SUN4U TSB switch. */
  251. 1: mov TSB_REG, %g1
  252. stxa %o1, [%g1] ASI_DMMU
  253. membar #Sync
  254. stxa %o1, [%g1] ASI_IMMU
  255. membar #Sync
  256. 2: brz %o2, 9f
  257. nop
  258. sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
  259. mov TLB_TAG_ACCESS, %g1
  260. lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
  261. stxa %o2, [%g1] ASI_DMMU
  262. membar #Sync
  263. sllx %g2, 3, %g2
  264. stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
  265. membar #Sync
  266. 9:
  267. wrpr %o5, %pstate
  268. retl
  269. nop