mmu_context.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * Copyright (C) 1999 Niibe Yutaka
  3. * Copyright (C) 2003 Paul Mundt
  4. *
  5. * ASID handling idea taken from MIPS implementation.
  6. */
  7. #ifndef __ASM_SH_MMU_CONTEXT_H
  8. #define __ASM_SH_MMU_CONTEXT_H
  9. #ifdef __KERNEL__
  10. #include <asm/cpu/mmu_context.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/uaccess.h>
  14. #include <asm/io.h>
  15. /*
  16. * The MMU "context" consists of two things:
  17. * (a) TLB cache version (or round, cycle whatever expression you like)
  18. * (b) ASID (Address Space IDentifier)
  19. */
  20. /*
  21. * Cache of MMU context last used.
  22. */
  23. extern unsigned long mmu_context_cache;
  24. #define MMU_CONTEXT_ASID_MASK 0x000000ff
  25. #define MMU_CONTEXT_VERSION_MASK 0xffffff00
  26. #define MMU_CONTEXT_FIRST_VERSION 0x00000100
  27. #define NO_CONTEXT 0
  28. /* ASID is 8-bit value, so it can't be 0x100 */
  29. #define MMU_NO_ASID 0x100
  30. /*
  31. * Virtual Page Number mask
  32. */
  33. #define MMU_VPN_MASK 0xfffff000
  34. #ifdef CONFIG_MMU
  35. /*
  36. * Get MMU context if needed.
  37. */
  38. static __inline__ void
  39. get_mmu_context(struct mm_struct *mm)
  40. {
  41. extern void flush_tlb_all(void);
  42. unsigned long mc = mmu_context_cache;
  43. /* Check if we have old version of context. */
  44. if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
  45. /* It's up to date, do nothing */
  46. return;
  47. /* It's old, we need to get new context with new version. */
  48. mc = ++mmu_context_cache;
  49. if (!(mc & MMU_CONTEXT_ASID_MASK)) {
  50. /*
  51. * We exhaust ASID of this version.
  52. * Flush all TLB and start new cycle.
  53. */
  54. flush_tlb_all();
  55. /*
  56. * Fix version; Note that we avoid version #0
  57. * to distingush NO_CONTEXT.
  58. */
  59. if (!mc)
  60. mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
  61. }
  62. mm->context = mc;
  63. }
  64. /*
  65. * Initialize the context related info for a new mm_struct
  66. * instance.
  67. */
  68. static __inline__ int init_new_context(struct task_struct *tsk,
  69. struct mm_struct *mm)
  70. {
  71. mm->context = NO_CONTEXT;
  72. return 0;
  73. }
  74. /*
  75. * Destroy context related info for an mm_struct that is about
  76. * to be put to rest.
  77. */
  78. static __inline__ void destroy_context(struct mm_struct *mm)
  79. {
  80. /* Do nothing */
  81. }
  82. static __inline__ void set_asid(unsigned long asid)
  83. {
  84. unsigned long __dummy;
  85. __asm__ __volatile__ ("mov.l %2, %0\n\t"
  86. "and %3, %0\n\t"
  87. "or %1, %0\n\t"
  88. "mov.l %0, %2"
  89. : "=&r" (__dummy)
  90. : "r" (asid), "m" (__m(MMU_PTEH)),
  91. "r" (0xffffff00));
  92. }
  93. static __inline__ unsigned long get_asid(void)
  94. {
  95. unsigned long asid;
  96. __asm__ __volatile__ ("mov.l %1, %0"
  97. : "=r" (asid)
  98. : "m" (__m(MMU_PTEH)));
  99. asid &= MMU_CONTEXT_ASID_MASK;
  100. return asid;
  101. }
  102. /*
  103. * After we have set current->mm to a new value, this activates
  104. * the context for the new mm so we see the new mappings.
  105. */
  106. static __inline__ void activate_context(struct mm_struct *mm)
  107. {
  108. get_mmu_context(mm);
  109. set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
  110. }
  111. /* MMU_TTB can be used for optimizing the fault handling.
  112. (Currently not used) */
  113. static __inline__ void switch_mm(struct mm_struct *prev,
  114. struct mm_struct *next,
  115. struct task_struct *tsk)
  116. {
  117. if (likely(prev != next)) {
  118. unsigned long __pgdir = (unsigned long)next->pgd;
  119. __asm__ __volatile__("mov.l %0, %1"
  120. : /* no output */
  121. : "r" (__pgdir), "m" (__m(MMU_TTB)));
  122. activate_context(next);
  123. }
  124. }
  125. #define deactivate_mm(tsk,mm) do { } while (0)
  126. #define activate_mm(prev, next) \
  127. switch_mm((prev),(next),NULL)
  128. static __inline__ void
  129. enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  130. {
  131. }
  132. #else /* !CONFIG_MMU */
  133. #define get_mmu_context(mm) do { } while (0)
  134. #define init_new_context(tsk,mm) (0)
  135. #define destroy_context(mm) do { } while (0)
  136. #define set_asid(asid) do { } while (0)
  137. #define get_asid() (0)
  138. #define activate_context(mm) do { } while (0)
  139. #define switch_mm(prev,next,tsk) do { } while (0)
  140. #define deactivate_mm(tsk,mm) do { } while (0)
  141. #define activate_mm(prev,next) do { } while (0)
  142. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  143. #endif /* CONFIG_MMU */
  144. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
  145. /*
  146. * If this processor has an MMU, we need methods to turn it off/on ..
  147. * paging_init() will also have to be updated for the processor in
  148. * question.
  149. */
  150. static inline void enable_mmu(void)
  151. {
  152. /* Enable MMU */
  153. ctrl_outl(MMU_CONTROL_INIT, MMUCR);
  154. /* The manual suggests doing some nops after turning on the MMU */
  155. __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop\n\t");
  156. if (mmu_context_cache == NO_CONTEXT)
  157. mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
  158. set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
  159. }
  160. static inline void disable_mmu(void)
  161. {
  162. unsigned long cr;
  163. cr = ctrl_inl(MMUCR);
  164. cr &= ~MMU_CONTROL_INIT;
  165. ctrl_outl(cr, MMUCR);
  166. __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop\n\t");
  167. }
  168. #else
  169. /*
  170. * MMU control handlers for processors lacking memory
  171. * management hardware.
  172. */
  173. #define enable_mmu() do { BUG(); } while (0)
  174. #define disable_mmu() do { BUG(); } while (0)
  175. #endif
  176. #endif /* __KERNEL__ */
  177. #endif /* __ASM_SH_MMU_CONTEXT_H */