tsb.S 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. .text
  7. .align 32
  8. /* Invoked from TLB miss handler, we are in the
  9. * MMU global registers and they are setup like
  10. * this:
  11. *
  12. * %g1: TSB entry pointer
  13. * %g2: available temporary
  14. * %g3: FAULT_CODE_{D,I}TLB
  15. * %g4: available temporary
  16. * %g5: available temporary
  17. * %g6: TAG TARGET
  18. * %g7: physical address base of the linux page
  19. * tables for the current address space
  20. */
  21. .globl tsb_miss_dtlb
  22. tsb_miss_dtlb:
  23. mov TLB_TAG_ACCESS, %g4
  24. ldxa [%g4] ASI_DMMU, %g4
  25. ba,pt %xcc, tsb_miss_page_table_walk
  26. nop
  27. .globl tsb_miss_itlb
  28. tsb_miss_itlb:
  29. mov TLB_TAG_ACCESS, %g4
  30. ldxa [%g4] ASI_IMMU, %g4
  31. ba,pt %xcc, tsb_miss_page_table_walk
  32. nop
  33. tsb_miss_page_table_walk:
  34. /* This clobbers %g1 and %g6, preserve them... */
  35. mov %g1, %g5
  36. mov %g6, %g2
  37. TRAP_LOAD_PGD_PHYS
  38. mov %g2, %g6
  39. mov %g5, %g1
  40. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  41. tsb_reload:
  42. TSB_LOCK_TAG(%g1, %g2, %g4)
  43. /* Load and check PTE. */
  44. ldxa [%g5] ASI_PHYS_USE_EC, %g5
  45. brgez,a,pn %g5, tsb_do_fault
  46. TSB_STORE(%g1, %g0)
  47. /* If it is larger than the base page size, don't
  48. * bother putting it into the TSB.
  49. */
  50. srlx %g5, 32, %g2
  51. sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4
  52. sethi %hi(_PAGE_SZBITS >> 32), %g7
  53. and %g2, %g4, %g2
  54. cmp %g2, %g7
  55. bne,a,pn %xcc, tsb_tlb_reload
  56. TSB_STORE(%g1, %g0)
  57. TSB_WRITE(%g1, %g5, %g6)
  58. /* Finally, load TLB and return from trap. */
  59. tsb_tlb_reload:
  60. cmp %g3, FAULT_CODE_DTLB
  61. bne,pn %xcc, tsb_itlb_load
  62. nop
  63. tsb_dtlb_load:
  64. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  65. retry
  66. tsb_itlb_load:
  67. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  68. retry
  69. /* No valid entry in the page tables, do full fault
  70. * processing.
  71. */
  72. .globl tsb_do_fault
  73. tsb_do_fault:
  74. cmp %g3, FAULT_CODE_DTLB
  75. rdpr %pstate, %g5
  76. bne,pn %xcc, tsb_do_itlb_fault
  77. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  78. tsb_do_dtlb_fault:
  79. rdpr %tl, %g4
  80. cmp %g4, 1
  81. mov TLB_TAG_ACCESS, %g4
  82. ldxa [%g4] ASI_DMMU, %g5
  83. be,pt %xcc, sparc64_realfault_common
  84. mov FAULT_CODE_DTLB, %g4
  85. ba,pt %xcc, winfix_trampoline
  86. nop
  87. tsb_do_itlb_fault:
  88. rdpr %tpc, %g5
  89. ba,pt %xcc, sparc64_realfault_common
  90. mov FAULT_CODE_ITLB, %g4
  91. .globl sparc64_realfault_common
  92. sparc64_realfault_common:
  93. /* fault code in %g4, fault address in %g5, etrap will
  94. * preserve these two values in %l4 and %l5 respectively
  95. */
  96. ba,pt %xcc, etrap ! Save trap state
  97. 1: rd %pc, %g7 ! ...
  98. stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
  99. stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  100. call do_sparc64_fault ! Call fault handler
  101. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  102. ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
  103. nop ! Delay slot (fill me)
  104. .globl winfix_trampoline
  105. winfix_trampoline:
  106. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  107. or %g3, 0x7c, %g3 ! Compute branch offset
  108. wrpr %g3, %tnpc ! Write it into TNPC
  109. done ! Trap return
  110. /* Insert an entry into the TSB.
  111. *
  112. * %o0: TSB entry pointer (virt or phys address)
  113. * %o1: tag
  114. * %o2: pte
  115. */
  116. .align 32
  117. .globl __tsb_insert
  118. __tsb_insert:
  119. rdpr %pstate, %o5
  120. wrpr %o5, PSTATE_IE, %pstate
  121. TSB_LOCK_TAG(%o0, %g2, %g3)
  122. TSB_WRITE(%o0, %o2, %o1)
  123. wrpr %o5, %pstate
  124. retl
  125. nop
  126. /* Flush the given TSB entry if it has the matching
  127. * tag.
  128. *
  129. * %o0: TSB entry pointer (virt or phys address)
  130. * %o1: tag
  131. */
  132. .align 32
  133. .globl tsb_flush
  134. tsb_flush:
  135. sethi %hi(TSB_TAG_LOCK_HIGH), %g2
  136. 1: TSB_LOAD_TAG(%o0, %g1)
  137. srlx %g1, 32, %o3
  138. andcc %o3, %g2, %g0
  139. bne,pn %icc, 1b
  140. membar #LoadLoad
  141. cmp %g1, %o1
  142. bne,pt %xcc, 2f
  143. clr %o3
  144. TSB_CAS_TAG(%o0, %g1, %o3)
  145. cmp %g1, %o3
  146. bne,pn %xcc, 1b
  147. nop
  148. 2: retl
  149. TSB_MEMBAR
  150. /* Reload MMU related context switch state at
  151. * schedule() time.
  152. *
  153. * %o0: page table physical address
  154. * %o1: TSB register value
  155. * %o2: TSB virtual address
  156. * %o3: TSB mapping locked PTE
  157. *
  158. * We have to run this whole thing with interrupts
  159. * disabled so that the current cpu doesn't change
  160. * due to preemption.
  161. */
  162. .align 32
  163. .globl __tsb_context_switch
  164. __tsb_context_switch:
  165. rdpr %pstate, %o5
  166. wrpr %o5, PSTATE_IE, %pstate
  167. ldub [%g6 + TI_CPU], %g1
  168. sethi %hi(trap_block), %g2
  169. sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
  170. or %g2, %lo(trap_block), %g2
  171. add %g2, %g1, %g2
  172. stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
  173. mov TSB_REG, %g1
  174. stxa %o1, [%g1] ASI_DMMU
  175. membar #Sync
  176. stxa %o1, [%g1] ASI_IMMU
  177. membar #Sync
  178. brz %o2, 9f
  179. nop
  180. sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4
  181. mov TLB_TAG_ACCESS, %g1
  182. lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
  183. stxa %o2, [%g1] ASI_DMMU
  184. membar #Sync
  185. sllx %g2, 3, %g2
  186. stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
  187. membar #Sync
  188. 9:
  189. wrpr %o5, %pstate
  190. retl
  191. nop