mmu_context.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #ifndef __M68K_MMU_CONTEXT_H
  2. #define __M68K_MMU_CONTEXT_H
  3. #include <asm-generic/mm_hooks.h>
  4. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  5. {
  6. }
  7. #ifndef CONFIG_SUN3
  8. #include <asm/setup.h>
  9. #include <asm/page.h>
  10. #include <asm/pgalloc.h>
  11. static inline int init_new_context(struct task_struct *tsk,
  12. struct mm_struct *mm)
  13. {
  14. mm->context = virt_to_phys(mm->pgd);
  15. return 0;
  16. }
  17. #define destroy_context(mm) do { } while(0)
  18. static inline void switch_mm_0230(struct mm_struct *mm)
  19. {
  20. unsigned long crp[2] = {
  21. 0x80000000 | _PAGE_TABLE, mm->context
  22. };
  23. unsigned long tmp;
  24. asm volatile (".chip 68030");
  25. /* flush MC68030/MC68020 caches (they are virtually addressed) */
  26. asm volatile (
  27. "movec %%cacr,%0;"
  28. "orw %1,%0; "
  29. "movec %0,%%cacr"
  30. : "=d" (tmp) : "di" (FLUSH_I_AND_D));
  31. /* Switch the root pointer. For a 030-only kernel,
  32. * avoid flushing the whole ATC, we only need to
  33. * flush the user entries. The 68851 does this by
  34. * itself. Avoid a runtime check here.
  35. */
  36. asm volatile (
  37. #ifdef CPU_M68030_ONLY
  38. "pmovefd %0,%%crp; "
  39. "pflush #0,#4"
  40. #else
  41. "pmove %0,%%crp"
  42. #endif
  43. : : "m" (crp[0]));
  44. asm volatile (".chip 68k");
  45. }
  46. static inline void switch_mm_0460(struct mm_struct *mm)
  47. {
  48. asm volatile (".chip 68040");
  49. /* flush address translation cache (user entries) */
  50. asm volatile ("pflushan");
  51. /* switch the root pointer */
  52. asm volatile ("movec %0,%%urp" : : "r" (mm->context));
  53. if (CPU_IS_060) {
  54. unsigned long tmp;
  55. /* clear user entries in the branch cache */
  56. asm volatile (
  57. "movec %%cacr,%0; "
  58. "orl %1,%0; "
  59. "movec %0,%%cacr"
  60. : "=d" (tmp): "di" (0x00200000));
  61. }
  62. asm volatile (".chip 68k");
  63. }
  64. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  65. {
  66. if (prev != next) {
  67. if (CPU_IS_020_OR_030)
  68. switch_mm_0230(next);
  69. else
  70. switch_mm_0460(next);
  71. }
  72. }
  73. #define deactivate_mm(tsk,mm) do { } while (0)
  74. static inline void activate_mm(struct mm_struct *prev_mm,
  75. struct mm_struct *next_mm)
  76. {
  77. next_mm->context = virt_to_phys(next_mm->pgd);
  78. if (CPU_IS_020_OR_030)
  79. switch_mm_0230(next_mm);
  80. else
  81. switch_mm_0460(next_mm);
  82. }
  83. #else /* CONFIG_SUN3 */
  84. #include <asm/sun3mmu.h>
  85. #include <linux/sched.h>
  86. extern unsigned long get_free_context(struct mm_struct *mm);
  87. extern void clear_context(unsigned long context);
  88. /* set the context for a new task to unmapped */
  89. static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  90. {
  91. mm->context = SUN3_INVALID_CONTEXT;
  92. return 0;
  93. }
  94. /* find the context given to this process, and if it hasn't already
  95. got one, go get one for it. */
  96. static inline void get_mmu_context(struct mm_struct *mm)
  97. {
  98. if(mm->context == SUN3_INVALID_CONTEXT)
  99. mm->context = get_free_context(mm);
  100. }
  101. /* flush context if allocated... */
  102. static inline void destroy_context(struct mm_struct *mm)
  103. {
  104. if(mm->context != SUN3_INVALID_CONTEXT)
  105. clear_context(mm->context);
  106. }
  107. static inline void activate_context(struct mm_struct *mm)
  108. {
  109. get_mmu_context(mm);
  110. sun3_put_context(mm->context);
  111. }
  112. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  113. {
  114. activate_context(tsk->mm);
  115. }
  116. #define deactivate_mm(tsk,mm) do { } while (0)
  117. static inline void activate_mm(struct mm_struct *prev_mm,
  118. struct mm_struct *next_mm)
  119. {
  120. activate_context(next_mm);
  121. }
  122. #endif
  123. #endif