tlb_nohash_low.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * This file contains low-level functions for performing various
  3. * types of TLB invalidations on various processors with no hash
  4. * table.
  5. *
  6. * This file implements the following functions for all no-hash
  7. * processors. Some aren't implemented for some variants. Some
  8. * are inline in tlbflush.h
  9. *
  10. * - tlbil_va
  11. * - tlbil_pid
  12. * - tlbil_all
  13. * - tlbivax_bcast (not yet)
  14. *
  15. * Code mostly moved over from misc_32.S
  16. *
  17. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  18. *
  19. * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
  20. * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
  21. *
  22. * This program is free software; you can redistribute it and/or
  23. * modify it under the terms of the GNU General Public License
  24. * as published by the Free Software Foundation; either version
  25. * 2 of the License, or (at your option) any later version.
  26. *
  27. */
  28. #include <asm/reg.h>
  29. #include <asm/page.h>
  30. #include <asm/cputable.h>
  31. #include <asm/mmu.h>
  32. #include <asm/ppc_asm.h>
  33. #include <asm/asm-offsets.h>
  34. #include <asm/processor.h>
  35. #if defined(CONFIG_40x)
  36. /*
  37. * 40x implementation needs only tlbil_va
  38. */
  39. _GLOBAL(__tlbil_va)
  40. /* We run the search with interrupts disabled because we have to change
  41. * the PID and I don't want to preempt when that happens.
  42. */
  43. mfmsr r5
  44. mfspr r6,SPRN_PID
  45. wrteei 0
  46. mtspr SPRN_PID,r4
  47. tlbsx. r3, 0, r3
  48. mtspr SPRN_PID,r6
  49. wrtee r5
  50. bne 1f
  51. sync
  52. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
  53. * clear. Since 25 is the V bit in the TLB_TAG, loading this value
  54. * will invalidate the TLB entry. */
  55. tlbwe r3, r3, TLB_TAG
  56. isync
  57. 1: blr
  58. #elif defined(CONFIG_8xx)
  59. /*
  60. * Nothing to do for 8xx, everything is inline
  61. */
  62. #elif defined(CONFIG_44x)
  63. /*
  64. * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
  65. * of the TLB for everything else.
  66. */
  67. _GLOBAL(__tlbil_va)
  68. mfspr r5,SPRN_MMUCR
  69. rlwimi r5,r4,0,24,31 /* Set TID */
  70. /* We have to run the search with interrupts disabled, otherwise
  71. * an interrupt which causes a TLB miss can clobber the MMUCR
  72. * between the mtspr and the tlbsx.
  73. *
  74. * Critical and Machine Check interrupts take care of saving
  75. * and restoring MMUCR, so only normal interrupts have to be
  76. * taken care of.
  77. */
  78. mfmsr r4
  79. wrteei 0
  80. mtspr SPRN_MMUCR,r5
  81. tlbsx. r3, 0, r3
  82. wrtee r4
  83. bne 1f
  84. sync
  85. /* There are only 64 TLB entries, so r3 < 64,
  86. * which means bit 22, is clear. Since 22 is
  87. * the V bit in the TLB_PAGEID, loading this
  88. * value will invalidate the TLB entry.
  89. */
  90. tlbwe r3, r3, PPC44x_TLB_PAGEID
  91. isync
  92. 1: blr
  93. _GLOBAL(_tlbil_all)
  94. _GLOBAL(_tlbil_pid)
  95. li r3,0
  96. sync
  97. /* Load high watermark */
  98. lis r4,tlb_44x_hwater@ha
  99. lwz r5,tlb_44x_hwater@l(r4)
  100. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  101. addi r3,r3,1
  102. cmpw 0,r3,r5
  103. ble 1b
  104. isync
  105. blr
  106. #elif defined(CONFIG_FSL_BOOKE)
  107. /*
  108. * FSL BookE implementations.
  109. *
  110. * Since feature sections are using _SECTION_ELSE we need
  111. * to have the larger code path before the _SECTION_ELSE
  112. */
  113. /*
  114. * Flush MMU TLB on the local processor
  115. */
  116. _GLOBAL(_tlbil_all)
  117. BEGIN_MMU_FTR_SECTION
  118. li r3,(MMUCSR0_TLBFI)@l
  119. mtspr SPRN_MMUCSR0, r3
  120. 1:
  121. mfspr r3,SPRN_MMUCSR0
  122. andi. r3,r3,MMUCSR0_TLBFI@l
  123. bne 1b
  124. MMU_FTR_SECTION_ELSE
  125. PPC_TLBILX_ALL(0,0)
  126. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
  127. msync
  128. isync
  129. blr
  130. _GLOBAL(_tlbil_pid)
  131. BEGIN_MMU_FTR_SECTION
  132. slwi r3,r3,16
  133. mfmsr r10
  134. wrteei 0
  135. mfspr r4,SPRN_MAS6 /* save MAS6 */
  136. mtspr SPRN_MAS6,r3
  137. PPC_TLBILX_PID(0,0)
  138. mtspr SPRN_MAS6,r4 /* restore MAS6 */
  139. wrtee r10
  140. MMU_FTR_SECTION_ELSE
  141. li r3,(MMUCSR0_TLBFI)@l
  142. mtspr SPRN_MMUCSR0, r3
  143. 1:
  144. mfspr r3,SPRN_MMUCSR0
  145. andi. r3,r3,MMUCSR0_TLBFI@l
  146. bne 1b
  147. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)
  148. msync
  149. isync
  150. blr
  151. /*
  152. * Flush MMU TLB for a particular address, but only on the local processor
  153. * (no broadcast)
  154. */
  155. _GLOBAL(__tlbil_va)
  156. mfmsr r10
  157. wrteei 0
  158. slwi r4,r4,16
  159. ori r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l
  160. mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  161. BEGIN_MMU_FTR_SECTION
  162. tlbsx 0,r3
  163. mfspr r4,SPRN_MAS1 /* check valid */
  164. andis. r3,r4,MAS1_VALID@h
  165. beq 1f
  166. rlwinm r4,r4,0,1,31
  167. mtspr SPRN_MAS1,r4
  168. tlbwe
  169. MMU_FTR_SECTION_ELSE
  170. PPC_TLBILX_VA(0,r3)
  171. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
  172. msync
  173. isync
  174. 1: wrtee r10
  175. blr
  176. #elif defined(CONFIG_PPC_BOOK3E)
  177. /*
  178. * New Book3E (>= 2.06) implementation
  179. *
  180. * Note: We may be able to get away without the interrupt masking stuff
  181. * if we save/restore MAS6 on exceptions that might modify it
  182. */
  183. _GLOBAL(_tlbil_pid)
  184. slwi r4,r3,MAS6_SPID_SHIFT
  185. mfmsr r10
  186. wrteei 0
  187. mtspr SPRN_MAS6,r4
  188. PPC_TLBILX_PID(0,0)
  189. wrtee r10
  190. msync
  191. isync
  192. blr
  193. _GLOBAL(_tlbil_pid_noind)
  194. slwi r4,r3,MAS6_SPID_SHIFT
  195. mfmsr r10
  196. ori r4,r4,MAS6_SIND
  197. wrteei 0
  198. mtspr SPRN_MAS6,r4
  199. PPC_TLBILX_PID(0,0)
  200. wrtee r10
  201. msync
  202. isync
  203. blr
  204. _GLOBAL(_tlbil_all)
  205. PPC_TLBILX_ALL(0,0)
  206. msync
  207. isync
  208. blr
  209. _GLOBAL(_tlbil_va)
  210. mfmsr r10
  211. wrteei 0
  212. cmpwi cr0,r6,0
  213. slwi r4,r4,MAS6_SPID_SHIFT
  214. rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
  215. beq 1f
  216. rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
  217. 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  218. PPC_TLBILX_VA(0,r3)
  219. msync
  220. isync
  221. wrtee r10
  222. blr
  223. _GLOBAL(_tlbivax_bcast)
  224. mfmsr r10
  225. wrteei 0
  226. cmpwi cr0,r6,0
  227. slwi r4,r4,MAS6_SPID_SHIFT
  228. rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
  229. beq 1f
  230. rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
  231. 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  232. PPC_TLBIVAX(0,r3)
  233. eieio
  234. tlbsync
  235. sync
  236. wrtee r10
  237. blr
  238. _GLOBAL(set_context)
  239. #ifdef CONFIG_BDI_SWITCH
  240. /* Context switch the PTE pointer for the Abatron BDI2000.
  241. * The PGDIR is the second parameter.
  242. */
  243. lis r5, abatron_pteptrs@h
  244. ori r5, r5, abatron_pteptrs@l
  245. stw r4, 0x4(r5)
  246. #endif
  247. mtspr SPRN_PID,r3
  248. isync /* Force context change */
  249. blr
  250. #else
  251. #error Unsupported processor type !
  252. #endif