mmu_context.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (C) 1999 Niibe Yutaka
  3. * Copyright (C) 2003 Paul Mundt
  4. *
  5. * ASID handling idea taken from MIPS implementation.
  6. */
  7. #ifndef __ASM_SH_MMU_CONTEXT_H
  8. #define __ASM_SH_MMU_CONTEXT_H
  9. #ifdef __KERNEL__
  10. #include <asm/cpu/mmu_context.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/io.h>
  14. /*
  15. * The MMU "context" consists of two things:
  16. * (a) TLB cache version (or round, cycle whatever expression you like)
  17. * (b) ASID (Address Space IDentifier)
  18. */
  19. /*
  20. * Cache of MMU context last used.
  21. */
  22. extern unsigned long mmu_context_cache;
  23. #define MMU_CONTEXT_ASID_MASK 0x000000ff
  24. #define MMU_CONTEXT_VERSION_MASK 0xffffff00
  25. #define MMU_CONTEXT_FIRST_VERSION 0x00000100
  26. #define NO_CONTEXT 0
  27. /* ASID is 8-bit value, so it can't be 0x100 */
  28. #define MMU_NO_ASID 0x100
  29. /*
  30. * Virtual Page Number mask
  31. */
  32. #define MMU_VPN_MASK 0xfffff000
  33. #ifdef CONFIG_MMU
  34. /*
  35. * Get MMU context if needed.
  36. */
  37. static inline void get_mmu_context(struct mm_struct *mm)
  38. {
  39. unsigned long mc = mmu_context_cache;
  40. /* Check if we have old version of context. */
  41. if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
  42. /* It's up to date, do nothing */
  43. return;
  44. /* It's old, we need to get new context with new version. */
  45. mc = ++mmu_context_cache;
  46. if (!(mc & MMU_CONTEXT_ASID_MASK)) {
  47. /*
  48. * We exhaust ASID of this version.
  49. * Flush all TLB and start new cycle.
  50. */
  51. flush_tlb_all();
  52. /*
  53. * Fix version; Note that we avoid version #0
  54. * to distingush NO_CONTEXT.
  55. */
  56. if (!mc)
  57. mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
  58. }
  59. mm->context.id = mc;
  60. }
  61. /*
  62. * Initialize the context related info for a new mm_struct
  63. * instance.
  64. */
  65. static inline int init_new_context(struct task_struct *tsk,
  66. struct mm_struct *mm)
  67. {
  68. mm->context.id = NO_CONTEXT;
  69. return 0;
  70. }
  71. /*
  72. * Destroy context related info for an mm_struct that is about
  73. * to be put to rest.
  74. */
  75. static inline void destroy_context(struct mm_struct *mm)
  76. {
  77. /* Do nothing */
  78. }
  79. static inline void set_asid(unsigned long asid)
  80. {
  81. unsigned long __dummy;
  82. __asm__ __volatile__ ("mov.l %2, %0\n\t"
  83. "and %3, %0\n\t"
  84. "or %1, %0\n\t"
  85. "mov.l %0, %2"
  86. : "=&r" (__dummy)
  87. : "r" (asid), "m" (__m(MMU_PTEH)),
  88. "r" (0xffffff00));
  89. }
  90. static inline unsigned long get_asid(void)
  91. {
  92. unsigned long asid;
  93. __asm__ __volatile__ ("mov.l %1, %0"
  94. : "=r" (asid)
  95. : "m" (__m(MMU_PTEH)));
  96. asid &= MMU_CONTEXT_ASID_MASK;
  97. return asid;
  98. }
  99. /*
  100. * After we have set current->mm to a new value, this activates
  101. * the context for the new mm so we see the new mappings.
  102. */
  103. static inline void activate_context(struct mm_struct *mm)
  104. {
  105. get_mmu_context(mm);
  106. set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
  107. }
  108. /* MMU_TTB is used for optimizing the fault handling. */
  109. static inline void set_TTB(pgd_t *pgd)
  110. {
  111. ctrl_outl((unsigned long)pgd, MMU_TTB);
  112. }
  113. static inline pgd_t *get_TTB(void)
  114. {
  115. return (pgd_t *)ctrl_inl(MMU_TTB);
  116. }
  117. static inline void switch_mm(struct mm_struct *prev,
  118. struct mm_struct *next,
  119. struct task_struct *tsk)
  120. {
  121. if (likely(prev != next)) {
  122. set_TTB(next->pgd);
  123. activate_context(next);
  124. }
  125. }
  126. #define deactivate_mm(tsk,mm) do { } while (0)
  127. #define activate_mm(prev, next) \
  128. switch_mm((prev),(next),NULL)
  129. static inline void
  130. enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  131. {
  132. }
  133. #else /* !CONFIG_MMU */
  134. #define get_mmu_context(mm) do { } while (0)
  135. #define init_new_context(tsk,mm) (0)
  136. #define destroy_context(mm) do { } while (0)
  137. #define set_asid(asid) do { } while (0)
  138. #define get_asid() (0)
  139. #define activate_context(mm) do { } while (0)
  140. #define switch_mm(prev,next,tsk) do { } while (0)
  141. #define deactivate_mm(tsk,mm) do { } while (0)
  142. #define activate_mm(prev,next) do { } while (0)
  143. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  144. #endif /* CONFIG_MMU */
  145. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
  146. /*
  147. * If this processor has an MMU, we need methods to turn it off/on ..
  148. * paging_init() will also have to be updated for the processor in
  149. * question.
  150. */
  151. static inline void enable_mmu(void)
  152. {
  153. /* Enable MMU */
  154. ctrl_outl(MMU_CONTROL_INIT, MMUCR);
  155. ctrl_barrier();
  156. if (mmu_context_cache == NO_CONTEXT)
  157. mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
  158. set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
  159. }
  160. static inline void disable_mmu(void)
  161. {
  162. unsigned long cr;
  163. cr = ctrl_inl(MMUCR);
  164. cr &= ~MMU_CONTROL_INIT;
  165. ctrl_outl(cr, MMUCR);
  166. ctrl_barrier();
  167. }
  168. #else
  169. /*
  170. * MMU control handlers for processors lacking memory
  171. * management hardware.
  172. */
  173. #define enable_mmu() do { BUG(); } while (0)
  174. #define disable_mmu() do { BUG(); } while (0)
  175. #endif
  176. #endif /* __KERNEL__ */
  177. #endif /* __ASM_SH_MMU_CONTEXT_H */