mmu_context.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * include/asm-xtensa/mmu_context.h
  3. *
  4. * Switch an MMU context.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_MMU_CONTEXT_H
  13. #define _XTENSA_MMU_CONTEXT_H
  14. #include <linux/stringify.h>
  15. #include <linux/sched.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm-generic/mm_hooks.h>
  20. #define XCHAL_MMU_ASID_BITS 8
  21. #if (XCHAL_HAVE_TLBS != 1)
  22. # error "Linux must have an MMU!"
  23. #endif
  24. extern unsigned long asid_cache;
  25. /*
  26. * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  27. * any user or kernel context.
  28. *
  29. * 0 invalid
  30. * 1 kernel
  31. * 2 reserved
  32. * 3 reserved
  33. * 4...255 available
  34. */
  35. #define NO_CONTEXT 0
  36. #define ASID_USER_FIRST 4
  37. #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
  38. #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
  39. static inline void set_rasid_register (unsigned long val)
  40. {
  41. __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
  42. " isync\n" : : "a" (val));
  43. }
  44. static inline unsigned long get_rasid_register (void)
  45. {
  46. unsigned long tmp;
  47. __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
  48. return tmp;
  49. }
  50. static inline void
  51. __get_new_mmu_context(struct mm_struct *mm)
  52. {
  53. extern void flush_tlb_all(void);
  54. if (! (++asid_cache & ASID_MASK) ) {
  55. flush_tlb_all(); /* start new asid cycle */
  56. asid_cache += ASID_USER_FIRST;
  57. }
  58. mm->context = asid_cache;
  59. }
  60. static inline void
  61. __load_mmu_context(struct mm_struct *mm)
  62. {
  63. set_rasid_register(ASID_INSERT(mm->context));
  64. invalidate_page_directory();
  65. }
  66. /*
  67. * Initialize the context related info for a new mm_struct
  68. * instance.
  69. */
  70. static inline int
  71. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  72. {
  73. mm->context = NO_CONTEXT;
  74. return 0;
  75. }
  76. /*
  77. * After we have set current->mm to a new value, this activates
  78. * the context for the new mm so we see the new mappings.
  79. */
  80. static inline void
  81. activate_mm(struct mm_struct *prev, struct mm_struct *next)
  82. {
  83. /* Unconditionally get a new ASID. */
  84. __get_new_mmu_context(next);
  85. __load_mmu_context(next);
  86. }
  87. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  88. struct task_struct *tsk)
  89. {
  90. unsigned long asid = asid_cache;
  91. /* Check if our ASID is of an older version and thus invalid */
  92. if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
  93. __get_new_mmu_context(next);
  94. __load_mmu_context(next);
  95. }
  96. #define deactivate_mm(tsk, mm) do { } while(0)
  97. /*
  98. * Destroy context related info for an mm_struct that is about
  99. * to be put to rest.
  100. */
  101. static inline void destroy_context(struct mm_struct *mm)
  102. {
  103. invalidate_page_directory();
  104. }
  105. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  106. {
  107. /* Nothing to do. */
  108. }
  109. #endif /* _XTENSA_MMU_CONTEXT_H */