mmu_context.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #ifndef __M68K_MMU_CONTEXT_H
  2. #define __M68K_MMU_CONTEXT_H
  3. #include <asm-generic/mm_hooks.h>
  4. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  5. {
  6. }
  7. #ifdef CONFIG_MMU
  8. #ifndef CONFIG_SUN3
  9. #include <asm/setup.h>
  10. #include <asm/page.h>
  11. #include <asm/pgalloc.h>
  12. static inline int init_new_context(struct task_struct *tsk,
  13. struct mm_struct *mm)
  14. {
  15. mm->context = virt_to_phys(mm->pgd);
  16. return 0;
  17. }
  18. #define destroy_context(mm) do { } while(0)
  19. static inline void switch_mm_0230(struct mm_struct *mm)
  20. {
  21. unsigned long crp[2] = {
  22. 0x80000000 | _PAGE_TABLE, mm->context
  23. };
  24. unsigned long tmp;
  25. asm volatile (".chip 68030");
  26. /* flush MC68030/MC68020 caches (they are virtually addressed) */
  27. asm volatile (
  28. "movec %%cacr,%0;"
  29. "orw %1,%0; "
  30. "movec %0,%%cacr"
  31. : "=d" (tmp) : "di" (FLUSH_I_AND_D));
  32. /* Switch the root pointer. For a 030-only kernel,
  33. * avoid flushing the whole ATC, we only need to
  34. * flush the user entries. The 68851 does this by
  35. * itself. Avoid a runtime check here.
  36. */
  37. asm volatile (
  38. #ifdef CPU_M68030_ONLY
  39. "pmovefd %0,%%crp; "
  40. "pflush #0,#4"
  41. #else
  42. "pmove %0,%%crp"
  43. #endif
  44. : : "m" (crp[0]));
  45. asm volatile (".chip 68k");
  46. }
  47. static inline void switch_mm_0460(struct mm_struct *mm)
  48. {
  49. asm volatile (".chip 68040");
  50. /* flush address translation cache (user entries) */
  51. asm volatile ("pflushan");
  52. /* switch the root pointer */
  53. asm volatile ("movec %0,%%urp" : : "r" (mm->context));
  54. if (CPU_IS_060) {
  55. unsigned long tmp;
  56. /* clear user entries in the branch cache */
  57. asm volatile (
  58. "movec %%cacr,%0; "
  59. "orl %1,%0; "
  60. "movec %0,%%cacr"
  61. : "=d" (tmp): "di" (0x00200000));
  62. }
  63. asm volatile (".chip 68k");
  64. }
  65. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  66. {
  67. if (prev != next) {
  68. if (CPU_IS_020_OR_030)
  69. switch_mm_0230(next);
  70. else
  71. switch_mm_0460(next);
  72. }
  73. }
  74. #define deactivate_mm(tsk,mm) do { } while (0)
  75. static inline void activate_mm(struct mm_struct *prev_mm,
  76. struct mm_struct *next_mm)
  77. {
  78. next_mm->context = virt_to_phys(next_mm->pgd);
  79. if (CPU_IS_020_OR_030)
  80. switch_mm_0230(next_mm);
  81. else
  82. switch_mm_0460(next_mm);
  83. }
  84. #else /* CONFIG_SUN3 */
  85. #include <asm/sun3mmu.h>
  86. #include <linux/sched.h>
  87. extern unsigned long get_free_context(struct mm_struct *mm);
  88. extern void clear_context(unsigned long context);
  89. /* set the context for a new task to unmapped */
  90. static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  91. {
  92. mm->context = SUN3_INVALID_CONTEXT;
  93. return 0;
  94. }
  95. /* find the context given to this process, and if it hasn't already
  96. got one, go get one for it. */
  97. static inline void get_mmu_context(struct mm_struct *mm)
  98. {
  99. if(mm->context == SUN3_INVALID_CONTEXT)
  100. mm->context = get_free_context(mm);
  101. }
  102. /* flush context if allocated... */
  103. static inline void destroy_context(struct mm_struct *mm)
  104. {
  105. if(mm->context != SUN3_INVALID_CONTEXT)
  106. clear_context(mm->context);
  107. }
  108. static inline void activate_context(struct mm_struct *mm)
  109. {
  110. get_mmu_context(mm);
  111. sun3_put_context(mm->context);
  112. }
  113. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  114. {
  115. activate_context(tsk->mm);
  116. }
  117. #define deactivate_mm(tsk,mm) do { } while (0)
  118. static inline void activate_mm(struct mm_struct *prev_mm,
  119. struct mm_struct *next_mm)
  120. {
  121. activate_context(next_mm);
  122. }
  123. #endif
  124. #else /* !CONFIG_MMU */
  125. static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  126. {
  127. return 0;
  128. }
  129. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  130. {
  131. }
  132. #define destroy_context(mm) do { } while (0)
  133. #define deactivate_mm(tsk,mm) do { } while (0)
  134. static inline void activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
  135. {
  136. }
  137. #endif /* CONFIG_MMU */
  138. #endif /* __M68K_MMU_CONTEXT_H */