mmu_context.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Copyright (C) 1999 Niibe Yutaka
  3. * Copyright (C) 2003 - 2006 Paul Mundt
  4. *
  5. * ASID handling idea taken from MIPS implementation.
  6. */
  7. #ifndef __ASM_SH_MMU_CONTEXT_H
  8. #define __ASM_SH_MMU_CONTEXT_H
  9. #ifdef __KERNEL__
  10. #include <asm/cpu/mmu_context.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/io.h>
  14. /*
  15. * The MMU "context" consists of two things:
  16. * (a) TLB cache version (or round, cycle whatever expression you like)
  17. * (b) ASID (Address Space IDentifier)
  18. */
  19. #define MMU_CONTEXT_ASID_MASK 0x000000ff
  20. #define MMU_CONTEXT_VERSION_MASK 0xffffff00
  21. #define MMU_CONTEXT_FIRST_VERSION 0x00000100
  22. #define NO_CONTEXT 0
  23. /* ASID is 8-bit value, so it can't be 0x100 */
  24. #define MMU_NO_ASID 0x100
  25. #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
  26. #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
  27. MMU_CONTEXT_ASID_MASK)
  28. #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
  29. /*
  30. * Virtual Page Number mask
  31. */
  32. #define MMU_VPN_MASK 0xfffff000
  33. #ifdef CONFIG_MMU
  34. /*
  35. * Get MMU context if needed.
  36. */
  37. static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  38. {
  39. unsigned long asid = asid_cache(cpu);
  40. /* Check if we have old version of context. */
  41. if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
  42. /* It's up to date, do nothing */
  43. return;
  44. /* It's old, we need to get new context with new version. */
  45. if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
  46. /*
  47. * We exhaust ASID of this version.
  48. * Flush all TLB and start new cycle.
  49. */
  50. flush_tlb_all();
  51. /*
  52. * Fix version; Note that we avoid version #0
  53. * to distingush NO_CONTEXT.
  54. */
  55. if (!asid)
  56. asid = MMU_CONTEXT_FIRST_VERSION;
  57. }
  58. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  59. }
  60. /*
  61. * Initialize the context related info for a new mm_struct
  62. * instance.
  63. */
  64. static inline int init_new_context(struct task_struct *tsk,
  65. struct mm_struct *mm)
  66. {
  67. int i;
  68. for (i = 0; i < num_online_cpus(); i++)
  69. cpu_context(i, mm) = NO_CONTEXT;
  70. return 0;
  71. }
  72. /*
  73. * Destroy context related info for an mm_struct that is about
  74. * to be put to rest.
  75. */
  76. static inline void destroy_context(struct mm_struct *mm)
  77. {
  78. /* Do nothing */
  79. }
  80. static inline void set_asid(unsigned long asid)
  81. {
  82. unsigned long __dummy;
  83. __asm__ __volatile__ ("mov.l %2, %0\n\t"
  84. "and %3, %0\n\t"
  85. "or %1, %0\n\t"
  86. "mov.l %0, %2"
  87. : "=&r" (__dummy)
  88. : "r" (asid), "m" (__m(MMU_PTEH)),
  89. "r" (0xffffff00));
  90. }
  91. static inline unsigned long get_asid(void)
  92. {
  93. unsigned long asid;
  94. __asm__ __volatile__ ("mov.l %1, %0"
  95. : "=r" (asid)
  96. : "m" (__m(MMU_PTEH)));
  97. asid &= MMU_CONTEXT_ASID_MASK;
  98. return asid;
  99. }
  100. /*
  101. * After we have set current->mm to a new value, this activates
  102. * the context for the new mm so we see the new mappings.
  103. */
  104. static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
  105. {
  106. get_mmu_context(mm, cpu);
  107. set_asid(cpu_asid(cpu, mm));
  108. }
  109. /* MMU_TTB is used for optimizing the fault handling. */
  110. static inline void set_TTB(pgd_t *pgd)
  111. {
  112. ctrl_outl((unsigned long)pgd, MMU_TTB);
  113. }
  114. static inline pgd_t *get_TTB(void)
  115. {
  116. return (pgd_t *)ctrl_inl(MMU_TTB);
  117. }
  118. static inline void switch_mm(struct mm_struct *prev,
  119. struct mm_struct *next,
  120. struct task_struct *tsk)
  121. {
  122. unsigned int cpu = smp_processor_id();
  123. if (likely(prev != next)) {
  124. cpu_set(cpu, next->cpu_vm_mask);
  125. set_TTB(next->pgd);
  126. activate_context(next, cpu);
  127. } else
  128. if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
  129. activate_context(next, cpu);
  130. }
  131. #define deactivate_mm(tsk,mm) do { } while (0)
  132. #define activate_mm(prev, next) \
  133. switch_mm((prev),(next),NULL)
  134. static inline void
  135. enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  136. {
  137. }
  138. #else /* !CONFIG_MMU */
  139. #define get_mmu_context(mm) do { } while (0)
  140. #define init_new_context(tsk,mm) (0)
  141. #define destroy_context(mm) do { } while (0)
  142. #define set_asid(asid) do { } while (0)
  143. #define get_asid() (0)
  144. #define activate_context(mm,cpu) do { } while (0)
  145. #define switch_mm(prev,next,tsk) do { } while (0)
  146. #define deactivate_mm(tsk,mm) do { } while (0)
  147. #define activate_mm(prev,next) do { } while (0)
  148. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  149. #endif /* CONFIG_MMU */
  150. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
  151. /*
  152. * If this processor has an MMU, we need methods to turn it off/on ..
  153. * paging_init() will also have to be updated for the processor in
  154. * question.
  155. */
  156. static inline void enable_mmu(void)
  157. {
  158. unsigned int cpu = smp_processor_id();
  159. /* Enable MMU */
  160. ctrl_outl(MMU_CONTROL_INIT, MMUCR);
  161. ctrl_barrier();
  162. if (asid_cache(cpu) == NO_CONTEXT)
  163. asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
  164. set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
  165. }
  166. static inline void disable_mmu(void)
  167. {
  168. unsigned long cr;
  169. cr = ctrl_inl(MMUCR);
  170. cr &= ~MMU_CONTROL_INIT;
  171. ctrl_outl(cr, MMUCR);
  172. ctrl_barrier();
  173. }
  174. #else
  175. /*
  176. * MMU control handlers for processors lacking memory
  177. * management hardware.
  178. */
  179. #define enable_mmu() do { BUG(); } while (0)
  180. #define disable_mmu() do { BUG(); } while (0)
  181. #endif
  182. #endif /* __KERNEL__ */
  183. #endif /* __ASM_SH_MMU_CONTEXT_H */