slb_low.S 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * arch/ppc64/mm/slb_low.S
  3. *
  4. * Low-level SLB routines
  5. *
  6. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  7. *
  8. * Based on earlier C version:
  9. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  10. * Copyright (c) 2001 Dave Engebretsen
  11. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. */
  18. #include <linux/config.h>
  19. #include <asm/processor.h>
  20. #include <asm/ppc_asm.h>
  21. #include <asm/asm-offsets.h>
  22. #include <asm/cputable.h>
  23. #include <asm/page.h>
  24. #include <asm/mmu.h>
  25. #include <asm/pgtable.h>
  26. /* void slb_allocate_realmode(unsigned long ea);
  27. *
  28. * Create an SLB entry for the given EA (user or kernel).
  29. * r3 = faulting address, r13 = PACA
  30. * r9, r10, r11 are clobbered by this function
  31. * No other registers are examined or changed.
  32. */
  33. _GLOBAL(slb_allocate_realmode)
  34. /* r3 = faulting address */
  35. srdi r9,r3,60 /* get region */
  36. srdi r10,r3,28 /* get esid */
  37. cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
  38. /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
  39. blt cr7,0f /* user or kernel? */
  40. /* kernel address: proto-VSID = ESID */
  41. /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
  42. * this code will generate the protoVSID 0xfffffffff for the
  43. * top segment. That's ok, the scramble below will translate
  44. * it to VSID 0, which is reserved as a bad VSID - one which
  45. * will never have any pages in it. */
  46. /* Check if hitting the linear mapping of the vmalloc/ioremap
  47. * kernel space
  48. */
  49. bne cr7,1f
  50. /* Linear mapping encoding bits, the "li" instruction below will
  51. * be patched by the kernel at boot
  52. */
  53. _GLOBAL(slb_miss_kernel_load_linear)
  54. li r11,0
  55. b slb_finish_load
  56. 1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
  57. * will be patched by the kernel at boot
  58. */
  59. _GLOBAL(slb_miss_kernel_load_virtual)
  60. li r11,0
  61. b slb_finish_load
  62. 0: /* user address: proto-VSID = context << 15 | ESID. First check
  63. * if the address is within the boundaries of the user region
  64. */
  65. srdi. r9,r10,USER_ESID_BITS
  66. bne- 8f /* invalid ea bits set */
  67. /* Figure out if the segment contains huge pages */
  68. #ifdef CONFIG_HUGETLB_PAGE
  69. BEGIN_FTR_SECTION
  70. b 1f
  71. END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
  72. cmpldi r10,16
  73. lhz r9,PACALOWHTLBAREAS(r13)
  74. mr r11,r10
  75. blt 5f
  76. lhz r9,PACAHIGHHTLBAREAS(r13)
  77. srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
  78. 5: srd r9,r9,r11
  79. andi. r9,r9,1
  80. beq 1f
  81. _GLOBAL(slb_miss_user_load_huge)
  82. li r11,0
  83. b 2f
  84. 1:
  85. #endif /* CONFIG_HUGETLB_PAGE */
  86. _GLOBAL(slb_miss_user_load_normal)
  87. li r11,0
  88. 2:
  89. ld r9,PACACONTEXTID(r13)
  90. rldimi r10,r9,USER_ESID_BITS,0
  91. b slb_finish_load
  92. 8: /* invalid EA */
  93. li r10,0 /* BAD_VSID */
  94. li r11,SLB_VSID_USER /* flags don't much matter */
  95. b slb_finish_load
  96. #ifdef __DISABLED__
  97. /* void slb_allocate_user(unsigned long ea);
  98. *
  99. * Create an SLB entry for the given EA (user or kernel).
  100. * r3 = faulting address, r13 = PACA
  101. * r9, r10, r11 are clobbered by this function
  102. * No other registers are examined or changed.
  103. *
  104. * It is called with translation enabled in order to be able to walk the
  105. * page tables. This is not currently used.
  106. */
  107. _GLOBAL(slb_allocate_user)
  108. /* r3 = faulting address */
  109. srdi r10,r3,28 /* get esid */
  110. crset 4*cr7+lt /* set "user" flag for later */
  111. /* check if we fit in the range covered by the pagetables*/
  112. srdi. r9,r3,PGTABLE_EADDR_SIZE
  113. crnot 4*cr0+eq,4*cr0+eq
  114. beqlr
  115. /* now we need to get to the page tables in order to get the page
  116. * size encoding from the PMD. In the future, we'll be able to deal
  117. * with 1T segments too by getting the encoding from the PGD instead
  118. */
  119. ld r9,PACAPGDIR(r13)
  120. cmpldi cr0,r9,0
  121. beqlr
  122. rlwinm r11,r10,8,25,28
  123. ldx r9,r9,r11 /* get pgd_t */
  124. cmpldi cr0,r9,0
  125. beqlr
  126. rlwinm r11,r10,3,17,28
  127. ldx r9,r9,r11 /* get pmd_t */
  128. cmpldi cr0,r9,0
  129. beqlr
  130. /* build vsid flags */
  131. andi. r11,r9,SLB_VSID_LLP
  132. ori r11,r11,SLB_VSID_USER
  133. /* get context to calculate proto-VSID */
  134. ld r9,PACACONTEXTID(r13)
  135. rldimi r10,r9,USER_ESID_BITS,0
  136. /* fall through slb_finish_load */
  137. #endif /* __DISABLED__ */
  138. /*
  139. * Finish loading of an SLB entry and return
  140. *
  141. * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  142. */
  143. slb_finish_load:
  144. ASM_VSID_SCRAMBLE(r10,r9)
  145. rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
  146. /* r3 = EA, r11 = VSID data */
  147. /*
  148. * Find a slot, round robin. Previously we tried to find a
  149. * free slot first but that took too long. Unfortunately we
  150. * dont have any LRU information to help us choose a slot.
  151. */
  152. #ifdef CONFIG_PPC_ISERIES
  153. /*
  154. * On iSeries, the "bolted" stack segment can be cast out on
  155. * shared processor switch so we need to check for a miss on
  156. * it and restore it to the right slot.
  157. */
  158. ld r9,PACAKSAVE(r13)
  159. clrrdi r9,r9,28
  160. clrrdi r3,r3,28
  161. li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
  162. cmpld r9,r3
  163. beq 3f
  164. #endif /* CONFIG_PPC_ISERIES */
  165. ld r10,PACASTABRR(r13)
  166. addi r10,r10,1
  167. /* use a cpu feature mask if we ever change our slb size */
  168. cmpldi r10,SLB_NUM_ENTRIES
  169. blt+ 4f
  170. li r10,SLB_NUM_BOLTED
  171. 4:
  172. std r10,PACASTABRR(r13)
  173. 3:
  174. rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
  175. oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
  176. /* r3 = ESID data, r11 = VSID data */
  177. /*
  178. * No need for an isync before or after this slbmte. The exception
  179. * we enter with and the rfid we exit with are context synchronizing.
  180. */
  181. slbmte r11,r10
  182. /* we're done for kernel addresses */
  183. crclr 4*cr0+eq /* set result to "success" */
  184. bgelr cr7
  185. /* Update the slb cache */
  186. lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
  187. cmpldi r3,SLB_CACHE_ENTRIES
  188. bge 1f
  189. /* still room in the slb cache */
  190. sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
  191. rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
  192. add r11,r11,r13 /* r11 = (u16 *)paca + offset */
  193. sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
  194. addi r3,r3,1 /* offset++ */
  195. b 2f
  196. 1: /* offset >= SLB_CACHE_ENTRIES */
  197. li r3,SLB_CACHE_ENTRIES+1
  198. 2:
  199. sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
  200. crclr 4*cr0+eq /* set result to "success" */
  201. blr