tlbflush.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. #ifndef _ALPHA_TLBFLUSH_H
  2. #define _ALPHA_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <asm/compiler.h>
  5. #ifndef __EXTERN_INLINE
  6. #define __EXTERN_INLINE extern inline
  7. #define __MMU_EXTERN_INLINE
  8. #endif
  9. extern void __load_new_mm_context(struct mm_struct *);
  10. /* Use a few helper functions to hide the ugly broken ASN
  11. numbers on early Alphas (ev4 and ev45). */
  12. __EXTERN_INLINE void
  13. ev4_flush_tlb_current(struct mm_struct *mm)
  14. {
  15. __load_new_mm_context(mm);
  16. tbiap();
  17. }
  18. __EXTERN_INLINE void
  19. ev5_flush_tlb_current(struct mm_struct *mm)
  20. {
  21. __load_new_mm_context(mm);
  22. }
  23. /* Flush just one page in the current TLB set. We need to be very
  24. careful about the icache here, there is no way to invalidate a
  25. specific icache page. */
  26. __EXTERN_INLINE void
  27. ev4_flush_tlb_current_page(struct mm_struct * mm,
  28. struct vm_area_struct *vma,
  29. unsigned long addr)
  30. {
  31. int tbi_flag = 2;
  32. if (vma->vm_flags & VM_EXEC) {
  33. __load_new_mm_context(mm);
  34. tbi_flag = 3;
  35. }
  36. tbi(tbi_flag, addr);
  37. }
  38. __EXTERN_INLINE void
  39. ev5_flush_tlb_current_page(struct mm_struct * mm,
  40. struct vm_area_struct *vma,
  41. unsigned long addr)
  42. {
  43. if (vma->vm_flags & VM_EXEC)
  44. __load_new_mm_context(mm);
  45. else
  46. tbi(2, addr);
  47. }
  48. #ifdef CONFIG_ALPHA_GENERIC
  49. # define flush_tlb_current alpha_mv.mv_flush_tlb_current
  50. # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
  51. #else
  52. # ifdef CONFIG_ALPHA_EV4
  53. # define flush_tlb_current ev4_flush_tlb_current
  54. # define flush_tlb_current_page ev4_flush_tlb_current_page
  55. # else
  56. # define flush_tlb_current ev5_flush_tlb_current
  57. # define flush_tlb_current_page ev5_flush_tlb_current_page
  58. # endif
  59. #endif
  60. #ifdef __MMU_EXTERN_INLINE
  61. #undef __EXTERN_INLINE
  62. #undef __MMU_EXTERN_INLINE
  63. #endif
  64. /* Flush current user mapping. */
  65. static inline void
  66. flush_tlb(void)
  67. {
  68. flush_tlb_current(current->active_mm);
  69. }
  70. /* Flush someone else's user mapping. */
  71. static inline void
  72. flush_tlb_other(struct mm_struct *mm)
  73. {
  74. unsigned long *mmc = &mm->context[smp_processor_id()];
  75. /* Check it's not zero first to avoid cacheline ping pong
  76. when possible. */
  77. if (*mmc) *mmc = 0;
  78. }
  79. /* Flush a specified range of user mapping page tables from TLB.
  80. Although Alpha uses VPTE caches, this can be a nop, as Alpha does
  81. not have finegrained tlb flushing, so it will flush VPTE stuff
  82. during next flush_tlb_range. */
  83. static inline void
  84. flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
  85. unsigned long end)
  86. {
  87. }
  88. #ifndef CONFIG_SMP
  89. /* Flush everything (kernel mapping may also have changed
  90. due to vmalloc/vfree). */
  91. static inline void flush_tlb_all(void)
  92. {
  93. tbia();
  94. }
  95. /* Flush a specified user mapping. */
  96. static inline void
  97. flush_tlb_mm(struct mm_struct *mm)
  98. {
  99. if (mm == current->active_mm)
  100. flush_tlb_current(mm);
  101. else
  102. flush_tlb_other(mm);
  103. }
  104. /* Page-granular tlb flush. */
  105. static inline void
  106. flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  107. {
  108. struct mm_struct *mm = vma->vm_mm;
  109. if (mm == current->active_mm)
  110. flush_tlb_current_page(mm, vma, addr);
  111. else
  112. flush_tlb_other(mm);
  113. }
  114. /* Flush a specified range of user mapping. On the Alpha we flush
  115. the whole user tlb. */
  116. static inline void
  117. flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  118. unsigned long end)
  119. {
  120. flush_tlb_mm(vma->vm_mm);
  121. }
  122. #else /* CONFIG_SMP */
  123. extern void flush_tlb_all(void);
  124. extern void flush_tlb_mm(struct mm_struct *);
  125. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  126. extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
  127. unsigned long);
  128. #endif /* CONFIG_SMP */
  129. #define flush_tlb_kernel_range(start, end) flush_tlb_all()
  130. #endif /* _ALPHA_TLBFLUSH_H */