tlbflush.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. #ifndef _ALPHA_TLBFLUSH_H
  2. #define _ALPHA_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <asm/compiler.h>
  5. #include <asm/pgalloc.h>
  6. #ifndef __EXTERN_INLINE
  7. #define __EXTERN_INLINE extern inline
  8. #define __MMU_EXTERN_INLINE
  9. #endif
  10. extern void __load_new_mm_context(struct mm_struct *);
  11. /* Use a few helper functions to hide the ugly broken ASN
  12. numbers on early Alphas (ev4 and ev45). */
  13. __EXTERN_INLINE void
  14. ev4_flush_tlb_current(struct mm_struct *mm)
  15. {
  16. __load_new_mm_context(mm);
  17. tbiap();
  18. }
  19. __EXTERN_INLINE void
  20. ev5_flush_tlb_current(struct mm_struct *mm)
  21. {
  22. __load_new_mm_context(mm);
  23. }
  24. /* Flush just one page in the current TLB set. We need to be very
  25. careful about the icache here, there is no way to invalidate a
  26. specific icache page. */
  27. __EXTERN_INLINE void
  28. ev4_flush_tlb_current_page(struct mm_struct * mm,
  29. struct vm_area_struct *vma,
  30. unsigned long addr)
  31. {
  32. int tbi_flag = 2;
  33. if (vma->vm_flags & VM_EXEC) {
  34. __load_new_mm_context(mm);
  35. tbi_flag = 3;
  36. }
  37. tbi(tbi_flag, addr);
  38. }
  39. __EXTERN_INLINE void
  40. ev5_flush_tlb_current_page(struct mm_struct * mm,
  41. struct vm_area_struct *vma,
  42. unsigned long addr)
  43. {
  44. if (vma->vm_flags & VM_EXEC)
  45. __load_new_mm_context(mm);
  46. else
  47. tbi(2, addr);
  48. }
  49. #ifdef CONFIG_ALPHA_GENERIC
  50. # define flush_tlb_current alpha_mv.mv_flush_tlb_current
  51. # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
  52. #else
  53. # ifdef CONFIG_ALPHA_EV4
  54. # define flush_tlb_current ev4_flush_tlb_current
  55. # define flush_tlb_current_page ev4_flush_tlb_current_page
  56. # else
  57. # define flush_tlb_current ev5_flush_tlb_current
  58. # define flush_tlb_current_page ev5_flush_tlb_current_page
  59. # endif
  60. #endif
  61. #ifdef __MMU_EXTERN_INLINE
  62. #undef __EXTERN_INLINE
  63. #undef __MMU_EXTERN_INLINE
  64. #endif
  65. /* Flush current user mapping. */
  66. static inline void
  67. flush_tlb(void)
  68. {
  69. flush_tlb_current(current->active_mm);
  70. }
  71. /* Flush someone else's user mapping. */
  72. static inline void
  73. flush_tlb_other(struct mm_struct *mm)
  74. {
  75. unsigned long *mmc = &mm->context[smp_processor_id()];
  76. /* Check it's not zero first to avoid cacheline ping pong
  77. when possible. */
  78. if (*mmc) *mmc = 0;
  79. }
  80. #ifndef CONFIG_SMP
  81. /* Flush everything (kernel mapping may also have changed
  82. due to vmalloc/vfree). */
  83. static inline void flush_tlb_all(void)
  84. {
  85. tbia();
  86. }
  87. /* Flush a specified user mapping. */
  88. static inline void
  89. flush_tlb_mm(struct mm_struct *mm)
  90. {
  91. if (mm == current->active_mm)
  92. flush_tlb_current(mm);
  93. else
  94. flush_tlb_other(mm);
  95. }
  96. /* Page-granular tlb flush. */
  97. static inline void
  98. flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  99. {
  100. struct mm_struct *mm = vma->vm_mm;
  101. if (mm == current->active_mm)
  102. flush_tlb_current_page(mm, vma, addr);
  103. else
  104. flush_tlb_other(mm);
  105. }
  106. /* Flush a specified range of user mapping. On the Alpha we flush
  107. the whole user tlb. */
  108. static inline void
  109. flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  110. unsigned long end)
  111. {
  112. flush_tlb_mm(vma->vm_mm);
  113. }
  114. #else /* CONFIG_SMP */
  115. extern void flush_tlb_all(void);
  116. extern void flush_tlb_mm(struct mm_struct *);
  117. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  118. extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
  119. unsigned long);
  120. #endif /* CONFIG_SMP */
  121. static inline void flush_tlb_kernel_range(unsigned long start,
  122. unsigned long end)
  123. {
  124. flush_tlb_all();
  125. }
  126. #endif /* _ALPHA_TLBFLUSH_H */