tsb.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. .text
  7. .align 32
  8. /* Invoked from TLB miss handler, we are in the
  9. * MMU global registers and they are setup like
  10. * this:
  11. *
  12. * %g1: TSB entry pointer
  13. * %g2: available temporary
  14. * %g3: FAULT_CODE_{D,I}TLB
  15. * %g4: available temporary
  16. * %g5: available temporary
  17. * %g6: TAG TARGET
  18. * %g7: physical address base of the linux page
  19. * tables for the current address space
  20. */
  21. .globl tsb_miss_dtlb
  22. tsb_miss_dtlb:
  23. mov TLB_TAG_ACCESS, %g4
  24. ldxa [%g4] ASI_DMMU, %g4
  25. ba,pt %xcc, tsb_miss_page_table_walk
  26. nop
  27. .globl tsb_miss_itlb
  28. tsb_miss_itlb:
  29. mov TLB_TAG_ACCESS, %g4
  30. ldxa [%g4] ASI_IMMU, %g4
  31. ba,pt %xcc, tsb_miss_page_table_walk
  32. nop
  33. tsb_miss_page_table_walk:
  34. /* This clobbers %g1 and %g6, preserve them... */
  35. mov %g1, %g5
  36. mov %g6, %g2
  37. TRAP_LOAD_PGD_PHYS
  38. mov %g2, %g6
  39. mov %g5, %g1
  40. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  41. tsb_reload:
  42. TSB_LOCK_TAG(%g1, %g2, %g4)
  43. /* Load and check PTE. */
  44. ldxa [%g5] ASI_PHYS_USE_EC, %g5
  45. brgez,a,pn %g5, tsb_do_fault
  46. stx %g0, [%g1]
  47. /* If it is larger than the base page size, don't
  48. * bother putting it into the TSB.
  49. */
  50. srlx %g5, 32, %g2
  51. sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4
  52. sethi %hi(_PAGE_SZBITS >> 32), %g7
  53. and %g2, %g4, %g2
  54. cmp %g2, %g7
  55. bne,a,pn %xcc, tsb_tlb_reload
  56. stx %g0, [%g1]
  57. TSB_WRITE(%g1, %g5, %g6)
  58. /* Finally, load TLB and return from trap. */
  59. tsb_tlb_reload:
  60. cmp %g3, FAULT_CODE_DTLB
  61. bne,pn %xcc, tsb_itlb_load
  62. nop
  63. tsb_dtlb_load:
  64. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  65. retry
  66. tsb_itlb_load:
  67. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  68. retry
  69. /* No valid entry in the page tables, do full fault
  70. * processing.
  71. */
  72. .globl tsb_do_fault
  73. tsb_do_fault:
  74. cmp %g3, FAULT_CODE_DTLB
  75. rdpr %pstate, %g5
  76. bne,pn %xcc, tsb_do_itlb_fault
  77. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  78. tsb_do_dtlb_fault:
  79. rdpr %tl, %g4
  80. cmp %g4, 1
  81. mov TLB_TAG_ACCESS, %g4
  82. ldxa [%g4] ASI_DMMU, %g5
  83. be,pt %xcc, sparc64_realfault_common
  84. mov FAULT_CODE_DTLB, %g4
  85. ba,pt %xcc, winfix_trampoline
  86. nop
  87. tsb_do_itlb_fault:
  88. rdpr %tpc, %g5
  89. ba,pt %xcc, sparc64_realfault_common
  90. mov FAULT_CODE_ITLB, %g4
  91. .globl sparc64_realfault_common
  92. sparc64_realfault_common:
  93. stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code
  94. stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  95. ba,pt %xcc, etrap ! Save trap state
  96. 1: rd %pc, %g7 ! ...
  97. call do_sparc64_fault ! Call fault handler
  98. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  99. ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
  100. nop ! Delay slot (fill me)
  101. .globl winfix_trampoline
  102. winfix_trampoline:
  103. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  104. or %g3, 0x7c, %g3 ! Compute branch offset
  105. wrpr %g3, %tnpc ! Write it into TNPC
  106. done ! Trap return
  107. /* Insert an entry into the TSB.
  108. *
  109. * %o0: TSB entry pointer
  110. * %o1: tag
  111. * %o2: pte
  112. */
  113. .align 32
  114. .globl tsb_insert
  115. tsb_insert:
  116. rdpr %pstate, %o5
  117. wrpr %o5, PSTATE_IE, %pstate
  118. TSB_LOCK_TAG(%o0, %g2, %g3)
  119. TSB_WRITE(%o0, %o2, %o1)
  120. wrpr %o5, %pstate
  121. retl
  122. nop
  123. /* Reload MMU related context switch state at
  124. * schedule() time.
  125. *
  126. * %o0: page table physical address
  127. * %o1: TSB register value
  128. * %o2: TSB virtual address
  129. * %o3: TSB mapping locked PTE
  130. *
  131. * We have to run this whole thing with interrupts
  132. * disabled so that the current cpu doesn't change
  133. * due to preemption.
  134. */
  135. .align 32
  136. .globl __tsb_context_switch
  137. __tsb_context_switch:
  138. rdpr %pstate, %o5
  139. wrpr %o5, PSTATE_IE, %pstate
  140. ldub [%g6 + TI_CPU], %g1
  141. sethi %hi(trap_block), %g2
  142. sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
  143. or %g2, %lo(trap_block), %g2
  144. add %g2, %g1, %g2
  145. stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
  146. mov TSB_REG, %g1
  147. stxa %o1, [%g1] ASI_DMMU
  148. membar #Sync
  149. stxa %o1, [%g1] ASI_IMMU
  150. membar #Sync
  151. brz %o2, 9f
  152. nop
  153. sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4
  154. mov TLB_TAG_ACCESS, %g1
  155. lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
  156. stxa %o2, [%g1] ASI_DMMU
  157. membar #Sync
  158. sllx %g2, 3, %g2
  159. stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS
  160. membar #Sync
  161. 9:
  162. wrpr %o5, %pstate
  163. retl
  164. nop