ktlb.S 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2. *
  3. * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
  4. * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
  5. * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  6. * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <asm/head.h>
  10. #include <asm/asi.h>
  11. #include <asm/page.h>
  12. #include <asm/pgtable.h>
  13. .text
  14. .align 32
  15. .globl sparc64_vpte_patchme1
  16. .globl sparc64_vpte_patchme2
  17. /*
  18. * On a second level vpte miss, check whether the original fault is to the OBP
  19. * range (note that this is only possible for instruction miss, data misses to
  20. * obp range do not use vpte). If so, go back directly to the faulting address.
  21. * This is because we want to read the tpc, otherwise we have no way of knowing
  22. * the 8k aligned faulting address if we are using >8k kernel pagesize. This
  23. * also ensures no vpte range addresses are dropped into tlb while obp is
  24. * executing (see inherit_locked_prom_mappings() rant).
  25. */
  26. sparc64_vpte_nucleus:
  27. /* Note that kvmap below has verified that the address is
  28. * in the range MODULES_VADDR --> VMALLOC_END already. So
  29. * here we need only check if it is an OBP address or not.
  30. */
  31. sethi %hi(LOW_OBP_ADDRESS), %g5
  32. cmp %g4, %g5
  33. blu,pn %xcc, sparc64_vpte_patchme1
  34. mov 0x1, %g5
  35. sllx %g5, 32, %g5
  36. cmp %g4, %g5
  37. blu,pn %xcc, obp_iaddr_patch
  38. nop
  39. /* These two instructions are patched by paginig_init(). */
  40. sparc64_vpte_patchme1:
  41. sethi %hi(0), %g5
  42. sparc64_vpte_patchme2:
  43. or %g5, %lo(0), %g5
  44. /* With kernel PGD in %g5, branch back into dtlb_backend. */
  45. ba,pt %xcc, sparc64_kpte_continue
  46. andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
  47. vpte_noent:
  48. /* Restore previous TAG_ACCESS, %g5 is zero, and we will
  49. * skip over the trap instruction so that the top level
  50. * TLB miss handler will thing this %g5 value is just an
  51. * invalid PTE, thus branching to full fault processing.
  52. */
  53. mov TLB_SFSR, %g1
  54. stxa %g4, [%g1 + %g1] ASI_DMMU
  55. done
  56. .globl obp_iaddr_patch
  57. obp_iaddr_patch:
  58. /* These two instructions patched by inherit_prom_mappings(). */
  59. sethi %hi(0), %g5
  60. or %g5, %lo(0), %g5
  61. /* Behave as if we are at TL0. */
  62. wrpr %g0, 1, %tl
  63. rdpr %tpc, %g4 /* Find original faulting iaddr */
  64. srlx %g4, 13, %g4 /* Throw out context bits */
  65. sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
  66. /* Restore previous TAG_ACCESS. */
  67. mov TLB_SFSR, %g1
  68. stxa %g4, [%g1 + %g1] ASI_IMMU
  69. /* Get PMD offset. */
  70. srlx %g4, 23, %g6
  71. and %g6, 0x7ff, %g6
  72. sllx %g6, 2, %g6
  73. /* Load PMD, is it valid? */
  74. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  75. brz,pn %g5, longpath
  76. sllx %g5, 11, %g5
  77. /* Get PTE offset. */
  78. srlx %g4, 13, %g6
  79. and %g6, 0x3ff, %g6
  80. sllx %g6, 3, %g6
  81. /* Load PTE. */
  82. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  83. brgez,pn %g5, longpath
  84. nop
  85. /* TLB load and return from trap. */
  86. stxa %g5, [%g0] ASI_ITLB_DATA_IN
  87. retry
  88. .globl obp_daddr_patch
  89. obp_daddr_patch:
  90. /* These two instructions patched by inherit_prom_mappings(). */
  91. sethi %hi(0), %g5
  92. or %g5, %lo(0), %g5
  93. /* Get PMD offset. */
  94. srlx %g4, 23, %g6
  95. and %g6, 0x7ff, %g6
  96. sllx %g6, 2, %g6
  97. /* Load PMD, is it valid? */
  98. lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  99. brz,pn %g5, longpath
  100. sllx %g5, 11, %g5
  101. /* Get PTE offset. */
  102. srlx %g4, 13, %g6
  103. and %g6, 0x3ff, %g6
  104. sllx %g6, 3, %g6
  105. /* Load PTE. */
  106. ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
  107. brgez,pn %g5, longpath
  108. nop
  109. /* TLB load and return from trap. */
  110. stxa %g5, [%g0] ASI_DTLB_DATA_IN
  111. retry
  112. /*
  113. * On a first level data miss, check whether this is to the OBP range (note
  114. * that such accesses can be made by prom, as well as by kernel using
  115. * prom_getproperty on "address"), and if so, do not use vpte access ...
  116. * rather, use information saved during inherit_prom_mappings() using 8k
  117. * pagesize.
  118. */
  119. .align 32
  120. kvmap:
  121. brlz,pt %g4, kvmap_load
  122. xor %g2, %g4, %g5
  123. kvmap_nonlinear:
  124. sethi %hi(MODULES_VADDR), %g5
  125. cmp %g4, %g5
  126. blu,pn %xcc, longpath
  127. mov (VMALLOC_END >> 24), %g5
  128. sllx %g5, 24, %g5
  129. cmp %g4, %g5
  130. bgeu,pn %xcc, longpath
  131. nop
  132. kvmap_check_obp:
  133. sethi %hi(LOW_OBP_ADDRESS), %g5
  134. cmp %g4, %g5
  135. blu,pn %xcc, kvmap_vmalloc_addr
  136. mov 0x1, %g5
  137. sllx %g5, 32, %g5
  138. cmp %g4, %g5
  139. blu,pn %xcc, obp_daddr_patch
  140. nop
  141. kvmap_vmalloc_addr:
  142. /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
  143. ldxa [%g3 + %g6] ASI_N, %g5
  144. brgez,pn %g5, longpath
  145. nop
  146. kvmap_load:
  147. /* PTE is valid, load into TLB and return from trap. */
  148. stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
  149. retry