tsb.S 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. .text
  7. .align 32
  8. /* Invoked from TLB miss handler, we are in the
  9. * MMU global registers and they are setup like
  10. * this:
  11. *
  12. * %g1: TSB entry pointer
  13. * %g2: available temporary
  14. * %g3: FAULT_CODE_{D,I}TLB
  15. * %g4: available temporary
  16. * %g5: available temporary
  17. * %g6: TAG TARGET
  18. * %g7: physical address base of the linux page
  19. * tables for the current address space
  20. */
  21. .globl tsb_miss_dtlb
  22. tsb_miss_dtlb:
  23. mov TLB_TAG_ACCESS, %g4
  24. ldxa [%g4] ASI_DMMU, %g4
  25. ba,pt %xcc, tsb_miss_page_table_walk
  26. nop
  27. .globl tsb_miss_itlb
  28. tsb_miss_itlb:
  29. mov TLB_TAG_ACCESS, %g4
  30. ldxa [%g4] ASI_IMMU, %g4
  31. ba,pt %xcc, tsb_miss_page_table_walk
  32. nop
  33. tsb_miss_page_table_walk:
  34. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  35. tsb_reload:
  36. TSB_LOCK_TAG(%g1, %g2, %g4)
  37. /* Load and check PTE. */
  38. ldxa [%g5] ASI_PHYS_USE_EC, %g5
  39. brgez,a,pn %g5, tsb_do_fault
  40. stx %g0, [%g1]
  41. TSB_WRITE(%g1, %g5, %g6)
  42. /* Finally, load TLB and return from trap. */
  43. tsb_tlb_reload:
  44. cmp %g3, FAULT_CODE_DTLB
  45. bne,pn %xcc, tsb_itlb_load
  46. nop
  47. tsb_dtlb_load:
  48. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  49. retry
  50. tsb_itlb_load:
  51. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  52. retry
  53. /* No valid entry in the page tables, do full fault
  54. * processing.
  55. */
  56. .globl tsb_do_fault
  57. tsb_do_fault:
  58. cmp %g3, FAULT_CODE_DTLB
  59. rdpr %pstate, %g5
  60. bne,pn %xcc, tsb_do_itlb_fault
  61. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  62. tsb_do_dtlb_fault:
  63. rdpr %tl, %g4
  64. cmp %g4, 1
  65. mov TLB_TAG_ACCESS, %g4
  66. ldxa [%g4] ASI_DMMU, %g5
  67. be,pt %xcc, sparc64_realfault_common
  68. mov FAULT_CODE_DTLB, %g4
  69. ba,pt %xcc, winfix_trampoline
  70. nop
  71. tsb_do_itlb_fault:
  72. rdpr %tpc, %g5
  73. ba,pt %xcc, sparc64_realfault_common
  74. mov FAULT_CODE_ITLB, %g4
  75. .globl sparc64_realfault_common
  76. sparc64_realfault_common:
  77. stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code
  78. stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  79. ba,pt %xcc, etrap ! Save trap state
  80. 1: rd %pc, %g7 ! ...
  81. call do_sparc64_fault ! Call fault handler
  82. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  83. ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
  84. nop ! Delay slot (fill me)
  85. .globl winfix_trampoline
  86. winfix_trampoline:
  87. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  88. or %g3, 0x7c, %g3 ! Compute branch offset
  89. wrpr %g3, %tnpc ! Write it into TNPC
  90. done ! Trap return
  91. /* Reload MMU related context switch state at
  92. * schedule() time.
  93. *
  94. * %o0: page table physical address
  95. * %o1: TSB address
  96. */
  97. .globl tsb_context_switch
  98. tsb_context_switch:
  99. wrpr %g0, PSTATE_MG | PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV, %pstate
  100. /* Set page table base alternate global. */
  101. mov %o0, %g7
  102. /* XXX can this happen? */
  103. brz,pn %o1, 9f
  104. nop
  105. /* Lock TSB into D-TLB. */
  106. sethi %hi(PAGE_SIZE), %o3
  107. and %o3, %o1, %o3
  108. sethi %hi(TSBMAP_BASE), %o2
  109. add %o2, %o3, %o2
  110. /* XXX handle PAGE_SIZE != 8K correctly... */
  111. mov TSB_REG, %g1
  112. stxa %o2, [%g1] ASI_DMMU
  113. membar #Sync
  114. stxa %o2, [%g1] ASI_IMMU
  115. membar #Sync
  116. #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
  117. #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
  118. sethi %uhi(KERN_HIGHBITS), %g2
  119. or %g2, %ulo(KERN_HIGHBITS), %g2
  120. sllx %g2, 32, %g2
  121. or %g2, KERN_LOWBITS, %g2
  122. #undef KERN_HIGHBITS
  123. #undef KERN_LOWBITS
  124. xor %o1, %g2, %o1
  125. /* We use entry 61 for this locked entry. This is the spitfire
  126. * TLB entry number, and luckily cheetah masks the value with
  127. * 15 ending us up with entry 13 which is what we want in that
  128. * case too.
  129. *
  130. * XXX Interactions with prom_world()...
  131. */
  132. mov TLB_TAG_ACCESS, %g1
  133. stxa %o2, [%g1] ASI_DMMU
  134. membar #Sync
  135. mov (61 << 3), %g1
  136. stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS
  137. membar #Sync
  138. 9:
  139. wrpr %g0, PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE, %pstate
  140. retl
  141. mov %o2, %o0