mmu_context.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef __M68K_MMU_CONTEXT_H
  2. #define __M68K_MMU_CONTEXT_H
  3. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  4. {
  5. }
  6. #ifndef CONFIG_SUN3
  7. #include <asm/setup.h>
  8. #include <asm/page.h>
  9. #include <asm/pgalloc.h>
  10. static inline int init_new_context(struct task_struct *tsk,
  11. struct mm_struct *mm)
  12. {
  13. mm->context = virt_to_phys(mm->pgd);
  14. return 0;
  15. }
  16. #define destroy_context(mm) do { } while(0)
  17. static inline void switch_mm_0230(struct mm_struct *mm)
  18. {
  19. unsigned long crp[2] = {
  20. 0x80000000 | _PAGE_TABLE, mm->context
  21. };
  22. unsigned long tmp;
  23. asm volatile (".chip 68030");
  24. /* flush MC68030/MC68020 caches (they are virtually addressed) */
  25. asm volatile (
  26. "movec %%cacr,%0;"
  27. "orw %1,%0; "
  28. "movec %0,%%cacr"
  29. : "=d" (tmp) : "di" (FLUSH_I_AND_D));
  30. /* Switch the root pointer. For a 030-only kernel,
  31. * avoid flushing the whole ATC, we only need to
  32. * flush the user entries. The 68851 does this by
  33. * itself. Avoid a runtime check here.
  34. */
  35. asm volatile (
  36. #ifdef CPU_M68030_ONLY
  37. "pmovefd %0,%%crp; "
  38. "pflush #0,#4"
  39. #else
  40. "pmove %0,%%crp"
  41. #endif
  42. : : "m" (crp[0]));
  43. asm volatile (".chip 68k");
  44. }
  45. static inline void switch_mm_0460(struct mm_struct *mm)
  46. {
  47. asm volatile (".chip 68040");
  48. /* flush address translation cache (user entries) */
  49. asm volatile ("pflushan");
  50. /* switch the root pointer */
  51. asm volatile ("movec %0,%%urp" : : "r" (mm->context));
  52. if (CPU_IS_060) {
  53. unsigned long tmp;
  54. /* clear user entries in the branch cache */
  55. asm volatile (
  56. "movec %%cacr,%0; "
  57. "orl %1,%0; "
  58. "movec %0,%%cacr"
  59. : "=d" (tmp): "di" (0x00200000));
  60. }
  61. asm volatile (".chip 68k");
  62. }
  63. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  64. {
  65. if (prev != next) {
  66. if (CPU_IS_020_OR_030)
  67. switch_mm_0230(next);
  68. else
  69. switch_mm_0460(next);
  70. }
  71. }
  72. #define deactivate_mm(tsk,mm) do { } while (0)
  73. static inline void activate_mm(struct mm_struct *prev_mm,
  74. struct mm_struct *next_mm)
  75. {
  76. next_mm->context = virt_to_phys(next_mm->pgd);
  77. if (CPU_IS_020_OR_030)
  78. switch_mm_0230(next_mm);
  79. else
  80. switch_mm_0460(next_mm);
  81. }
  82. #else /* CONFIG_SUN3 */
  83. #include <asm/sun3mmu.h>
  84. #include <linux/sched.h>
  85. extern unsigned long get_free_context(struct mm_struct *mm);
  86. extern void clear_context(unsigned long context);
  87. /* set the context for a new task to unmapped */
  88. static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  89. {
  90. mm->context = SUN3_INVALID_CONTEXT;
  91. return 0;
  92. }
  93. /* find the context given to this process, and if it hasn't already
  94. got one, go get one for it. */
  95. static inline void get_mmu_context(struct mm_struct *mm)
  96. {
  97. if(mm->context == SUN3_INVALID_CONTEXT)
  98. mm->context = get_free_context(mm);
  99. }
  100. /* flush context if allocated... */
  101. static inline void destroy_context(struct mm_struct *mm)
  102. {
  103. if(mm->context != SUN3_INVALID_CONTEXT)
  104. clear_context(mm->context);
  105. }
  106. static inline void activate_context(struct mm_struct *mm)
  107. {
  108. get_mmu_context(mm);
  109. sun3_put_context(mm->context);
  110. }
  111. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
  112. {
  113. activate_context(tsk->mm);
  114. }
  115. #define deactivate_mm(tsk,mm) do { } while (0)
  116. static inline void activate_mm(struct mm_struct *prev_mm,
  117. struct mm_struct *next_mm)
  118. {
  119. activate_context(next_mm);
  120. }
  121. #endif
  122. #endif