ktlb.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2. *
  3. * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
  4. * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
  5. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  6. * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <asm/head.h>
  10. #include <asm/asi.h>
  11. #include <asm/page.h>
  12. #include <asm/pgtable.h>
  13. .text
  14. .align 32
  15. /*
  16. * On a second level vpte miss, check whether the original fault is to the OBP
  17. * range (note that this is only possible for instruction miss, data misses to
  18. * obp range do not use vpte). If so, go back directly to the faulting address.
  19. * This is because we want to read the tpc, otherwise we have no way of knowing
  20. * the 8k aligned faulting address if we are using >8k kernel pagesize. This
  21. * also ensures no vpte range addresses are dropped into tlb while obp is
  22. * executing (see inherit_locked_prom_mappings() rant).
  23. */
  24. sparc64_vpte_nucleus:
  25. /* Note that kvmap below has verified that the address is
  26. * in the range MODULES_VADDR --> VMALLOC_END already. So
  27. * here we need only check if it is an OBP address or not.
  28. */
  29. sethi %hi(LOW_OBP_ADDRESS), %g5
  30. cmp %g4, %g5
  31. blu,pn %xcc, kern_vpte
  32. mov 0x1, %g5
  33. sllx %g5, 32, %g5
  34. cmp %g4, %g5
  35. blu,pn %xcc, vpte_insn_obp
  36. nop
  37. /* These two instructions are patched by paginig_init(). */
  38. kern_vpte:
  39. sethi %hi(swapper_pgd_zero), %g5
  40. lduw [%g5 + %lo(swapper_pgd_zero)], %g5
  41. /* With kernel PGD in %g5, branch back into dtlb_backend. */
  42. ba,pt %xcc, sparc64_kpte_continue
  43. andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
  44. vpte_noent:
  45. /* Restore previous TAG_ACCESS, %g5 is zero, and we will
  46. * skip over the trap instruction so that the top level
  47. * TLB miss handler will thing this %g5 value is just an
  48. * invalid PTE, thus branching to full fault processing.
  49. */
  50. mov TLB_SFSR, %g1
  51. stxa %g4, [%g1 + %g1] ASI_DMMU
  52. done
  53. vpte_insn_obp:
  54. /* Behave as if we are at TL0. */
  55. wrpr %g0, 1, %tl
  56. rdpr %tpc, %g4 /* Find original faulting iaddr */
  57. srlx %g4, 13, %g4 /* Throw out context bits */
  58. sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
  59. /* Restore previous TAG_ACCESS. */
  60. mov TLB_SFSR, %g1
  61. stxa %g4, [%g1 + %g1] ASI_IMMU
  62. sethi %hi(prom_trans), %g5
  63. or %g5, %lo(prom_trans), %g5
  64. 1: ldx [%g5 + 0x00], %g6 ! base
  65. brz,a,pn %g6, longpath ! no more entries, fail
  66. mov TLB_SFSR, %g1 ! and restore %g1
  67. ldx [%g5 + 0x08], %g1 ! len
  68. add %g6, %g1, %g1 ! end
  69. cmp %g6, %g4
  70. bgu,pt %xcc, 2f
  71. cmp %g4, %g1
  72. bgeu,pt %xcc, 2f
  73. ldx [%g5 + 0x10], %g1 ! PTE
  74. /* TLB load, restore %g1, and return from trap. */
  75. sub %g4, %g6, %g6
  76. add %g1, %g6, %g5
  77. mov TLB_SFSR, %g1
  78. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  79. retry
  80. 2: ba,pt %xcc, 1b
  81. add %g5, (3 * 8), %g5 ! next entry
  82. kvmap_do_obp:
  83. sethi %hi(prom_trans), %g5
  84. or %g5, %lo(prom_trans), %g5
  85. srlx %g4, 13, %g4
  86. sllx %g4, 13, %g4
  87. 1: ldx [%g5 + 0x00], %g6 ! base
  88. brz,a,pn %g6, longpath ! no more entries, fail
  89. mov TLB_SFSR, %g1 ! and restore %g1
  90. ldx [%g5 + 0x08], %g1 ! len
  91. add %g6, %g1, %g1 ! end
  92. cmp %g6, %g4
  93. bgu,pt %xcc, 2f
  94. cmp %g4, %g1
  95. bgeu,pt %xcc, 2f
  96. ldx [%g5 + 0x10], %g1 ! PTE
  97. /* TLB load, restore %g1, and return from trap. */
  98. sub %g4, %g6, %g6
  99. add %g1, %g6, %g5
  100. mov TLB_SFSR, %g1
  101. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  102. retry
  103. 2: ba,pt %xcc, 1b
  104. add %g5, (3 * 8), %g5 ! next entry
  105. /*
  106. * On a first level data miss, check whether this is to the OBP range (note
  107. * that such accesses can be made by prom, as well as by kernel using
  108. * prom_getproperty on "address"), and if so, do not use vpte access ...
  109. * rather, use information saved during inherit_prom_mappings() using 8k
  110. * pagesize.
  111. */
  112. .align 32
  113. kvmap:
  114. brgez,pn %g4, kvmap_nonlinear
  115. nop
  116. #ifdef CONFIG_DEBUG_PAGEALLOC
  117. .globl kvmap_linear_patch
  118. kvmap_linear_patch:
  119. #endif
  120. ba,pt %xcc, kvmap_load
  121. xor %g2, %g4, %g5
  122. #ifdef CONFIG_DEBUG_PAGEALLOC
  123. sethi %hi(swapper_pg_dir), %g5
  124. or %g5, %lo(swapper_pg_dir), %g5
  125. sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
  126. srlx %g6, 64 - PAGE_SHIFT, %g6
  127. andn %g6, 0x3, %g6
  128. lduw [%g5 + %g6], %g5
  129. brz,pn %g5, longpath
  130. sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
  131. srlx %g6, 64 - PAGE_SHIFT, %g6
  132. sllx %g5, 11, %g5
  133. andn %g6, 0x3, %g6
  134. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  135. brz,pn %g5, longpath
  136. sllx %g4, 64 - PMD_SHIFT, %g6
  137. srlx %g6, 64 - PAGE_SHIFT, %g6
  138. sllx %g5, 11, %g5
  139. andn %g6, 0x7, %g6
  140. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  141. brz,pn %g5, longpath
  142. nop
  143. ba,a,pt %xcc, kvmap_load
  144. #endif
  145. kvmap_nonlinear:
  146. sethi %hi(MODULES_VADDR), %g5
  147. cmp %g4, %g5
  148. blu,pn %xcc, longpath
  149. mov (VMALLOC_END >> 24), %g5
  150. sllx %g5, 24, %g5
  151. cmp %g4, %g5
  152. bgeu,pn %xcc, longpath
  153. nop
  154. kvmap_check_obp:
  155. sethi %hi(LOW_OBP_ADDRESS), %g5
  156. cmp %g4, %g5
  157. blu,pn %xcc, kvmap_vmalloc_addr
  158. mov 0x1, %g5
  159. sllx %g5, 32, %g5
  160. cmp %g4, %g5
  161. blu,pn %xcc, kvmap_do_obp
  162. nop
  163. kvmap_vmalloc_addr:
  164. /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
  165. ldxa [%g3 + %g6] ASI_N, %g5
  166. brgez,pn %g5, longpath
  167. nop
  168. kvmap_load:
  169. /* PTE is valid, load into TLB and return from trap. */
  170. stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
  171. retry