mmu_context.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /*
  2. * include/asm-xtensa/mmu_context.h
  3. *
  4. * Switch an MMU context.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_MMU_CONTEXT_H
  13. #define _XTENSA_MMU_CONTEXT_H
  14. #include <linux/config.h>
  15. #include <linux/stringify.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/tlbflush.h>
  20. /*
  21. * Linux was ported to Xtensa assuming all auto-refill ways in set 0
  22. * had the same properties (a very likely assumption). Multiple sets
  23. * of auto-refill ways will still work properly, but not as optimally
  24. * as the Xtensa designer may have assumed.
  25. *
  26. * We make this case a hard #error, killing the kernel build, to alert
  27. * the developer to this condition (which is more likely an error).
  28. * You super-duper clever developers can change it to a warning or
  29. * remove it altogether if you think you know what you're doing. :)
  30. */
  31. #if (XCHAL_HAVE_TLBS != 1)
  32. # error "Linux must have an MMU!"
  33. #endif
  34. #if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
  35. # error "MMU must have auto-refill ways"
  36. #endif
  37. #if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
  38. # error Linux may not use all auto-refill ways as efficiently as you think
  39. #endif
  40. #if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
  41. # error Only one page size allowed!
  42. #endif
  43. extern unsigned long asid_cache;
  44. extern pgd_t *current_pgd;
  45. /*
  46. * Define the number of entries per auto-refill way in set 0 of both I and D
  47. * TLBs. We deal only with set 0 here (an assumption further explained in
  48. * assertions.h). Also, define the total number of ARF entries in both TLBs.
  49. */
  50. #define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
  51. #define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
  52. #define ITLB_ENTRIES \
  53. (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
  54. #define DTLB_ENTRIES \
  55. (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
  56. /*
  57. * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
  58. * In practice, they are probably equal. This macro simplifies function
  59. * flush_tlb_range().
  60. */
  61. #if (DTLB_ENTRIES < ITLB_ENTRIES)
  62. # define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
  63. #else
  64. # define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
  65. #endif
  66. /*
  67. * asid_cache tracks only the ASID[USER_RING] field of the RASID special
  68. * register, which is the current user-task asid allocation value.
  69. * mm->context has the same meaning. When it comes time to write the
  70. * asid_cache or mm->context values to the RASID special register, we first
  71. * shift the value left by 8, then insert the value.
  72. * ASID[0] always contains the kernel's asid value, and we reserve three
  73. * other asid values that we never assign to user tasks.
  74. */
  75. #define ASID_INC 0x1
  76. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  77. /*
  78. * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
  79. * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
  80. * Xtensa processor constant indicating the kernel address space. They can
  81. * be arbitrary values.
  82. *
  83. * We identify three more unique, reserved ASID values to use in the unused
  84. * ring positions. No other user process will be assigned these reserved
  85. * ASID values.
  86. *
  87. * For example, given that
  88. *
  89. * XCHAL_MMU_ASID_INVALID == 0
  90. * XCHAL_MMU_ASID_KERNEL == 1
  91. *
  92. * the following maze of #if statements would generate
  93. *
  94. * ASID_RESERVED_1 == 2
  95. * ASID_RESERVED_2 == 3
  96. * ASID_RESERVED_3 == 4
  97. * ASID_FIRST_NONRESERVED == 5
  98. */
  99. #if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
  100. # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
  101. #else
  102. # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
  103. #endif
  104. #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
  105. # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
  106. #else
  107. # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
  108. #endif
  109. #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
  110. # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
  111. #else
  112. # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
  113. #endif
  114. #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
  115. # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
  116. #else
  117. # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
  118. #endif
  119. #define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
  120. ((ASID_RESERVED_2) << 16) + \
  121. ((ASID_RESERVED_3) << 8) + \
  122. ((XCHAL_MMU_ASID_KERNEL)) )
  123. /*
  124. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  125. * any user or kernel context. NO_CONTEXT is a better mnemonic than
  126. * XCHAL_MMU_ASID_INVALID, so we use it in code instead.
  127. */
  128. #define NO_CONTEXT XCHAL_MMU_ASID_INVALID
  129. #if (KERNEL_RING != 0)
  130. # error The KERNEL_RING really should be zero.
  131. #endif
  132. #if (USER_RING >= XCHAL_MMU_RINGS)
  133. # error USER_RING cannot be greater than the highest numbered ring.
  134. #endif
  135. #if (USER_RING == KERNEL_RING)
  136. # error The user and kernel rings really should not be equal.
  137. #endif
  138. #if (USER_RING == 1)
  139. #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
  140. ((ASID_RESERVED_2) << 16) + \
  141. (((x) & (ASID_MASK)) << 8) + \
  142. ((XCHAL_MMU_ASID_KERNEL)) )
  143. #elif (USER_RING == 2)
  144. #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
  145. (((x) & (ASID_MASK)) << 16) + \
  146. ((ASID_RESERVED_2) << 8) + \
  147. ((XCHAL_MMU_ASID_KERNEL)) )
  148. #elif (USER_RING == 3)
  149. #define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
  150. ((ASID_RESERVED_1) << 16) + \
  151. ((ASID_RESERVED_2) << 8) + \
  152. ((XCHAL_MMU_ASID_KERNEL)) )
  153. #else
  154. #error Goofy value for USER_RING
  155. #endif /* USER_RING == 1 */
  156. /*
  157. * All unused by hardware upper bits will be considered
  158. * as a software asid extension.
  159. */
  160. #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
  161. #define ASID_FIRST_VERSION \
  162. ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
  163. static inline void set_rasid_register (unsigned long val)
  164. {
  165. __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
  166. " isync\n" : : "a" (val));
  167. }
  168. static inline unsigned long get_rasid_register (void)
  169. {
  170. unsigned long tmp;
  171. __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
  172. return tmp;
  173. }
  174. #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
  175. static inline void
  176. get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
  177. {
  178. extern void flush_tlb_all(void);
  179. if (! ((asid += ASID_INC) & ASID_MASK) ) {
  180. flush_tlb_all(); /* start new asid cycle */
  181. if (!asid) /* fix version if needed */
  182. asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
  183. asid += ASID_FIRST_NONRESERVED;
  184. }
  185. mm->context = asid_cache = asid;
  186. }
  187. #else
  188. #warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
  189. /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
  190. really the best, but if you insist... */
  191. static inline int validate_asid (unsigned long asid)
  192. {
  193. switch (asid) {
  194. case XCHAL_MMU_ASID_INVALID:
  195. case XCHAL_MMU_ASID_KERNEL:
  196. case ASID_RESERVED_1:
  197. case ASID_RESERVED_2:
  198. case ASID_RESERVED_3:
  199. return 0; /* can't use these values as ASIDs */
  200. }
  201. return 1; /* valid */
  202. }
  203. static inline void
  204. get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
  205. {
  206. extern void flush_tlb_all(void);
  207. while (1) {
  208. asid += ASID_INC;
  209. if ( ! (asid & ASID_MASK) ) {
  210. flush_tlb_all(); /* start new asid cycle */
  211. if (!asid) /* fix version if needed */
  212. asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
  213. asid += ASID_FIRST_NONRESERVED;
  214. break; /* no need to validate here */
  215. }
  216. if (validate_asid (asid & ASID_MASK))
  217. break;
  218. }
  219. mm->context = asid_cache = asid;
  220. }
  221. #endif
  222. /*
  223. * Initialize the context related info for a new mm_struct
  224. * instance.
  225. */
  226. static inline int
  227. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  228. {
  229. mm->context = NO_CONTEXT;
  230. return 0;
  231. }
  232. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  233. struct task_struct *tsk)
  234. {
  235. unsigned long asid = asid_cache;
  236. /* Check if our ASID is of an older version and thus invalid */
  237. if ((next->context ^ asid) & ASID_VERSION_MASK)
  238. get_new_mmu_context(next, asid);
  239. set_rasid_register (ASID_INSERT(next->context));
  240. invalidate_page_directory();
  241. }
  242. #define deactivate_mm(tsk, mm) do { } while(0)
  243. /*
  244. * Destroy context related info for an mm_struct that is about
  245. * to be put to rest.
  246. */
  247. static inline void destroy_context(struct mm_struct *mm)
  248. {
  249. /* Nothing to do. */
  250. }
  251. /*
  252. * After we have set current->mm to a new value, this activates
  253. * the context for the new mm so we see the new mappings.
  254. */
  255. static inline void
  256. activate_mm(struct mm_struct *prev, struct mm_struct *next)
  257. {
  258. /* Unconditionally get a new ASID. */
  259. get_new_mmu_context(next, asid_cache);
  260. set_rasid_register (ASID_INSERT(next->context));
  261. invalidate_page_directory();
  262. }
  263. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  264. {
  265. /* Nothing to do. */
  266. }
  267. #endif /* _XTENSA_MMU_CONTEXT_H */