mmu_context.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. #ifndef __M68K_MMU_CONTEXT_H
  2. #define __M68K_MMU_CONTEXT_H
  3. #include <asm-generic/mm_hooks.h>
  4. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  5. {
  6. }
  7. #ifdef CONFIG_MMU
  8. #if defined(CONFIG_COLDFIRE)
  9. #include <asm/atomic.h>
  10. #include <asm/bitops.h>
  11. #include <asm/mcfmmu.h>
  12. #include <asm/mmu.h>
  13. #define NO_CONTEXT 256
  14. #define LAST_CONTEXT 255
  15. #define FIRST_CONTEXT 1
  16. extern unsigned long context_map[];
  17. extern mm_context_t next_mmu_context;
  18. extern atomic_t nr_free_contexts;
  19. extern struct mm_struct *context_mm[LAST_CONTEXT+1];
  20. extern void steal_context(void);
  21. static inline void get_mmu_context(struct mm_struct *mm)
  22. {
  23. mm_context_t ctx;
  24. if (mm->context != NO_CONTEXT)
  25. return;
  26. while (atomic_dec_and_test_lt(&nr_free_contexts)) {
  27. atomic_inc(&nr_free_contexts);
  28. steal_context();
  29. }
  30. ctx = next_mmu_context;
  31. while (test_and_set_bit(ctx, context_map)) {
  32. ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
  33. if (ctx > LAST_CONTEXT)
  34. ctx = 0;
  35. }
  36. next_mmu_context = (ctx + 1) & LAST_CONTEXT;
  37. mm->context = ctx;
  38. context_mm[ctx] = mm;
  39. }
  40. /*
  41. * Set up the context for a new address space.
  42. */
  43. #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
  44. /*
  45. * We're finished using the context for an address space.
  46. */
  47. static inline void destroy_context(struct mm_struct *mm)
  48. {
  49. if (mm->context != NO_CONTEXT) {
  50. clear_bit(mm->context, context_map);
  51. mm->context = NO_CONTEXT;
  52. atomic_inc(&nr_free_contexts);
  53. }
  54. }
  55. static inline void set_context(mm_context_t context, pgd_t *pgd)
  56. {
  57. __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
  58. }
  59. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  60. struct task_struct *tsk)
  61. {
  62. get_mmu_context(tsk->mm);
  63. set_context(tsk->mm->context, next->pgd);
  64. }
  65. /*
  66. * After we have set current->mm to a new value, this activates
  67. * the context for the new mm so we see the new mappings.
  68. */
  69. static inline void activate_mm(struct mm_struct *active_mm,
  70. struct mm_struct *mm)
  71. {
  72. get_mmu_context(mm);
  73. set_context(mm->context, mm->pgd);
  74. }
  75. #define deactivate_mm(tsk, mm) do { } while (0)
  76. extern void mmu_context_init(void);
  77. #define prepare_arch_switch(next) load_ksp_mmu(next)
  78. static inline void load_ksp_mmu(struct task_struct *task)
  79. {
  80. unsigned long flags;
  81. struct mm_struct *mm;
  82. int asid;
  83. pgd_t *pgd;
  84. pmd_t *pmd;
  85. pte_t *pte;
  86. unsigned long mmuar;
  87. local_irq_save(flags);
  88. mmuar = task->thread.ksp;
  89. /* Search for a valid TLB entry, if one is found, don't remap */
  90. mmu_write(MMUAR, mmuar);
  91. mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
  92. if (mmu_read(MMUSR) & MMUSR_HIT)
  93. goto end;
  94. if (mmuar >= PAGE_OFFSET) {
  95. mm = &init_mm;
  96. } else {
  97. pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
  98. mm = task->mm;
  99. }
  100. if (!mm)
  101. goto bug;
  102. pgd = pgd_offset(mm, mmuar);
  103. if (pgd_none(*pgd))
  104. goto bug;
  105. pmd = pmd_offset(pgd, mmuar);
  106. if (pmd_none(*pmd))
  107. goto bug;
  108. pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
  109. : pte_offset_map(pmd, mmuar);
  110. if (pte_none(*pte) || !pte_present(*pte))
  111. goto bug;
  112. set_pte(pte, pte_mkyoung(*pte));
  113. asid = mm->context & 0xff;
  114. if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
  115. set_pte(pte, pte_wrprotect(*pte));
  116. mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
  117. (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
  118. >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
  119. mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
  120. ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
  121. mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
  122. goto end;
  123. bug:
  124. pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
  125. end:
  126. local_irq_restore(flags);
  127. }
  128. #elif defined(CONFIG_SUN3)
  129. #include <asm/sun3mmu.h>
  130. #include <linux/sched.h>
  131. extern unsigned long get_free_context(struct mm_struct *mm);
  132. extern void clear_context(unsigned long context);
  133. /* set the context for a new task to unmapped */
  134. static inline int init_new_context(struct task_struct *tsk,
  135. struct mm_struct *mm)
  136. {
  137. mm->context = SUN3_INVALID_CONTEXT;
  138. return 0;
  139. }
  140. /* find the context given to this process, and if it hasn't already
  141. got one, go get one for it. */
  142. static inline void get_mmu_context(struct mm_struct *mm)
  143. {
  144. if (mm->context == SUN3_INVALID_CONTEXT)
  145. mm->context = get_free_context(mm);
  146. }
  147. /* flush context if allocated... */
  148. static inline void destroy_context(struct mm_struct *mm)
  149. {
  150. if (mm->context != SUN3_INVALID_CONTEXT)
  151. clear_context(mm->context);
  152. }
  153. static inline void activate_context(struct mm_struct *mm)
  154. {
  155. get_mmu_context(mm);
  156. sun3_put_context(mm->context);
  157. }
  158. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  159. struct task_struct *tsk)
  160. {
  161. activate_context(tsk->mm);
  162. }
  163. #define deactivate_mm(tsk, mm) do { } while (0)
  164. static inline void activate_mm(struct mm_struct *prev_mm,
  165. struct mm_struct *next_mm)
  166. {
  167. activate_context(next_mm);
  168. }
  169. #else
  170. #include <asm/setup.h>
  171. #include <asm/page.h>
  172. #include <asm/pgalloc.h>
  173. static inline int init_new_context(struct task_struct *tsk,
  174. struct mm_struct *mm)
  175. {
  176. mm->context = virt_to_phys(mm->pgd);
  177. return 0;
  178. }
  179. #define destroy_context(mm) do { } while(0)
  180. static inline void switch_mm_0230(struct mm_struct *mm)
  181. {
  182. unsigned long crp[2] = {
  183. 0x80000000 | _PAGE_TABLE, mm->context
  184. };
  185. unsigned long tmp;
  186. asm volatile (".chip 68030");
  187. /* flush MC68030/MC68020 caches (they are virtually addressed) */
  188. asm volatile (
  189. "movec %%cacr,%0;"
  190. "orw %1,%0; "
  191. "movec %0,%%cacr"
  192. : "=d" (tmp) : "di" (FLUSH_I_AND_D));
  193. /* Switch the root pointer. For a 030-only kernel,
  194. * avoid flushing the whole ATC, we only need to
  195. * flush the user entries. The 68851 does this by
  196. * itself. Avoid a runtime check here.
  197. */
  198. asm volatile (
  199. #ifdef CPU_M68030_ONLY
  200. "pmovefd %0,%%crp; "
  201. "pflush #0,#4"
  202. #else
  203. "pmove %0,%%crp"
  204. #endif
  205. : : "m" (crp[0]));
  206. asm volatile (".chip 68k");
  207. }
  208. static inline void switch_mm_0460(struct mm_struct *mm)
  209. {
  210. asm volatile (".chip 68040");
  211. /* flush address translation cache (user entries) */
  212. asm volatile ("pflushan");
  213. /* switch the root pointer */
  214. asm volatile ("movec %0,%%urp" : : "r" (mm->context));
  215. if (CPU_IS_060) {
  216. unsigned long tmp;
  217. /* clear user entries in the branch cache */
  218. asm volatile (
  219. "movec %%cacr,%0; "
  220. "orl %1,%0; "
  221. "movec %0,%%cacr"
  222. : "=d" (tmp): "di" (0x00200000));
  223. }
  224. asm volatile (".chip 68k");
  225. }
  226. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  227. {
  228. if (prev != next) {
  229. if (CPU_IS_020_OR_030)
  230. switch_mm_0230(next);
  231. else
  232. switch_mm_0460(next);
  233. }
  234. }
  235. #define deactivate_mm(tsk,mm) do { } while (0)
  236. static inline void activate_mm(struct mm_struct *prev_mm,
  237. struct mm_struct *next_mm)
  238. {
  239. next_mm->context = virt_to_phys(next_mm->pgd);
  240. if (CPU_IS_020_OR_030)
  241. switch_mm_0230(next_mm);
  242. else
  243. switch_mm_0460(next_mm);
  244. }
  245. #endif
  246. #else /* !CONFIG_MMU */
  247. static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  248. {
  249. return 0;
  250. }
  251. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  252. {
  253. }
  254. #define destroy_context(mm) do { } while (0)
  255. #define deactivate_mm(tsk,mm) do { } while (0)
  256. static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
  257. {
  258. }
  259. #endif /* CONFIG_MMU */
  260. #endif /* __M68K_MMU_CONTEXT_H */