ktlb.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2. *
  3. * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
  4. * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
  5. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  6. * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <asm/head.h>
  10. #include <asm/asi.h>
  11. #include <asm/page.h>
  12. #include <asm/pgtable.h>
  13. .text
  14. .align 32
  15. /*
  16. * On a second level vpte miss, check whether the original fault is to the OBP
  17. * range (note that this is only possible for instruction miss, data misses to
  18. * obp range do not use vpte). If so, go back directly to the faulting address.
  19. * This is because we want to read the tpc, otherwise we have no way of knowing
  20. * the 8k aligned faulting address if we are using >8k kernel pagesize. This
  21. * also ensures no vpte range addresses are dropped into tlb while obp is
  22. * executing (see inherit_locked_prom_mappings() rant).
  23. */
  24. sparc64_vpte_nucleus:
  25. /* Note that kvmap below has verified that the address is
  26. * in the range MODULES_VADDR --> VMALLOC_END already. So
  27. * here we need only check if it is an OBP address or not.
  28. */
  29. sethi %hi(LOW_OBP_ADDRESS), %g5
  30. cmp %g4, %g5
  31. blu,pn %xcc, kern_vpte
  32. mov 0x1, %g5
  33. sllx %g5, 32, %g5
  34. cmp %g4, %g5
  35. blu,pn %xcc, vpte_insn_obp
  36. nop
  37. /* These two instructions are patched by paginig_init(). */
  38. kern_vpte:
  39. sethi %hi(swapper_pgd_zero), %g5
  40. lduw [%g5 + %lo(swapper_pgd_zero)], %g5
  41. /* With kernel PGD in %g5, branch back into dtlb_backend. */
  42. ba,pt %xcc, sparc64_kpte_continue
  43. andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
  44. vpte_noent:
  45. /* Restore previous TAG_ACCESS, %g5 is zero, and we will
  46. * skip over the trap instruction so that the top level
  47. * TLB miss handler will thing this %g5 value is just an
  48. * invalid PTE, thus branching to full fault processing.
  49. */
  50. mov TLB_SFSR, %g1
  51. stxa %g4, [%g1 + %g1] ASI_DMMU
  52. done
  53. vpte_insn_obp:
  54. sethi %hi(prom_pmd_phys), %g5
  55. ldx [%g5 + %lo(prom_pmd_phys)], %g5
  56. /* Behave as if we are at TL0. */
  57. wrpr %g0, 1, %tl
  58. rdpr %tpc, %g4 /* Find original faulting iaddr */
  59. srlx %g4, 13, %g4 /* Throw out context bits */
  60. sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
  61. /* Restore previous TAG_ACCESS. */
  62. mov TLB_SFSR, %g1
  63. stxa %g4, [%g1 + %g1] ASI_IMMU
  64. /* Get PMD offset. */
  65. srlx %g4, 23, %g6
  66. and %g6, 0x7ff, %g6
  67. sllx %g6, 2, %g6
  68. /* Load PMD, is it valid? */
  69. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  70. brz,pn %g5, longpath
  71. sllx %g5, 11, %g5
  72. /* Get PTE offset. */
  73. srlx %g4, 13, %g6
  74. and %g6, 0x3ff, %g6
  75. sllx %g6, 3, %g6
  76. /* Load PTE. */
  77. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  78. brgez,pn %g5, longpath
  79. nop
  80. /* TLB load and return from trap. */
  81. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  82. retry
  83. kvmap_do_obp:
  84. sethi %hi(prom_pmd_phys), %g5
  85. ldx [%g5 + %lo(prom_pmd_phys)], %g5
  86. /* Get PMD offset. */
  87. srlx %g4, 23, %g6
  88. and %g6, 0x7ff, %g6
  89. sllx %g6, 2, %g6
  90. /* Load PMD, is it valid? */
  91. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  92. brz,pn %g5, longpath
  93. sllx %g5, 11, %g5
  94. /* Get PTE offset. */
  95. srlx %g4, 13, %g6
  96. and %g6, 0x3ff, %g6
  97. sllx %g6, 3, %g6
  98. /* Load PTE. */
  99. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  100. brgez,pn %g5, longpath
  101. nop
  102. /* TLB load and return from trap. */
  103. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  104. retry
  105. /*
  106. * On a first level data miss, check whether this is to the OBP range (note
  107. * that such accesses can be made by prom, as well as by kernel using
  108. * prom_getproperty on "address"), and if so, do not use vpte access ...
  109. * rather, use information saved during inherit_prom_mappings() using 8k
  110. * pagesize.
  111. */
  112. .align 32
  113. kvmap:
  114. brgez,pn %g4, kvmap_nonlinear
  115. nop
  116. #ifdef CONFIG_DEBUG_PAGEALLOC
  117. .globl kvmap_linear_patch
  118. kvmap_linear_patch:
  119. #endif
  120. ba,pt %xcc, kvmap_load
  121. xor %g2, %g4, %g5
  122. #ifdef CONFIG_DEBUG_PAGEALLOC
  123. sethi %hi(swapper_pg_dir), %g5
  124. or %g5, %lo(swapper_pg_dir), %g5
  125. sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
  126. srlx %g6, 64 - PAGE_SHIFT, %g6
  127. andn %g6, 0x3, %g6
  128. lduw [%g5 + %g6], %g5
  129. brz,pn %g5, longpath
  130. sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
  131. srlx %g6, 64 - PAGE_SHIFT, %g6
  132. sllx %g5, 11, %g5
  133. andn %g6, 0x3, %g6
  134. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  135. brz,pn %g5, longpath
  136. sllx %g4, 64 - PMD_SHIFT, %g6
  137. srlx %g6, 64 - PAGE_SHIFT, %g6
  138. sllx %g5, 11, %g5
  139. andn %g6, 0x7, %g6
  140. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  141. brz,pn %g5, longpath
  142. nop
  143. ba,a,pt %xcc, kvmap_load
  144. #endif
  145. kvmap_nonlinear:
  146. sethi %hi(MODULES_VADDR), %g5
  147. cmp %g4, %g5
  148. blu,pn %xcc, longpath
  149. mov (VMALLOC_END >> 24), %g5
  150. sllx %g5, 24, %g5
  151. cmp %g4, %g5
  152. bgeu,pn %xcc, longpath
  153. nop
  154. kvmap_check_obp:
  155. sethi %hi(LOW_OBP_ADDRESS), %g5
  156. cmp %g4, %g5
  157. blu,pn %xcc, kvmap_vmalloc_addr
  158. mov 0x1, %g5
  159. sllx %g5, 32, %g5
  160. cmp %g4, %g5
  161. blu,pn %xcc, kvmap_do_obp
  162. nop
  163. kvmap_vmalloc_addr:
  164. /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
  165. ldxa [%g3 + %g6] ASI_N, %g5
  166. brgez,pn %g5, longpath
  167. nop
  168. kvmap_load:
  169. /* PTE is valid, load into TLB and return from trap. */
  170. stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
  171. retry