tlbflush.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #ifndef _ALPHA_TLBFLUSH_H
  2. #define _ALPHA_TLBFLUSH_H
  3. #include <linux/config.h>
  4. #include <linux/mm.h>
  5. #include <asm/compiler.h>
  6. #ifndef __EXTERN_INLINE
  7. #define __EXTERN_INLINE extern inline
  8. #define __MMU_EXTERN_INLINE
  9. #endif
  10. extern void __load_new_mm_context(struct mm_struct *);
  11. /* Use a few helper functions to hide the ugly broken ASN
  12. numbers on early Alphas (ev4 and ev45). */
  13. __EXTERN_INLINE void
  14. ev4_flush_tlb_current(struct mm_struct *mm)
  15. {
  16. __load_new_mm_context(mm);
  17. tbiap();
  18. }
  19. __EXTERN_INLINE void
  20. ev5_flush_tlb_current(struct mm_struct *mm)
  21. {
  22. __load_new_mm_context(mm);
  23. }
  24. /* Flush just one page in the current TLB set. We need to be very
  25. careful about the icache here, there is no way to invalidate a
  26. specific icache page. */
  27. __EXTERN_INLINE void
  28. ev4_flush_tlb_current_page(struct mm_struct * mm,
  29. struct vm_area_struct *vma,
  30. unsigned long addr)
  31. {
  32. int tbi_flag = 2;
  33. if (vma->vm_flags & VM_EXEC) {
  34. __load_new_mm_context(mm);
  35. tbi_flag = 3;
  36. }
  37. tbi(tbi_flag, addr);
  38. }
  39. __EXTERN_INLINE void
  40. ev5_flush_tlb_current_page(struct mm_struct * mm,
  41. struct vm_area_struct *vma,
  42. unsigned long addr)
  43. {
  44. if (vma->vm_flags & VM_EXEC)
  45. __load_new_mm_context(mm);
  46. else
  47. tbi(2, addr);
  48. }
  49. #ifdef CONFIG_ALPHA_GENERIC
  50. # define flush_tlb_current alpha_mv.mv_flush_tlb_current
  51. # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
  52. #else
  53. # ifdef CONFIG_ALPHA_EV4
  54. # define flush_tlb_current ev4_flush_tlb_current
  55. # define flush_tlb_current_page ev4_flush_tlb_current_page
  56. # else
  57. # define flush_tlb_current ev5_flush_tlb_current
  58. # define flush_tlb_current_page ev5_flush_tlb_current_page
  59. # endif
  60. #endif
  61. #ifdef __MMU_EXTERN_INLINE
  62. #undef __EXTERN_INLINE
  63. #undef __MMU_EXTERN_INLINE
  64. #endif
  65. /* Flush current user mapping. */
  66. static inline void
  67. flush_tlb(void)
  68. {
  69. flush_tlb_current(current->active_mm);
  70. }
  71. /* Flush someone else's user mapping. */
  72. static inline void
  73. flush_tlb_other(struct mm_struct *mm)
  74. {
  75. unsigned long *mmc = &mm->context[smp_processor_id()];
  76. /* Check it's not zero first to avoid cacheline ping pong
  77. when possible. */
  78. if (*mmc) *mmc = 0;
  79. }
  80. /* Flush a specified range of user mapping page tables from TLB.
  81. Although Alpha uses VPTE caches, this can be a nop, as Alpha does
  82. not have finegrained tlb flushing, so it will flush VPTE stuff
  83. during next flush_tlb_range. */
  84. static inline void
  85. flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
  86. unsigned long end)
  87. {
  88. }
  89. #ifndef CONFIG_SMP
  90. /* Flush everything (kernel mapping may also have changed
  91. due to vmalloc/vfree). */
  92. static inline void flush_tlb_all(void)
  93. {
  94. tbia();
  95. }
  96. /* Flush a specified user mapping. */
  97. static inline void
  98. flush_tlb_mm(struct mm_struct *mm)
  99. {
  100. if (mm == current->active_mm)
  101. flush_tlb_current(mm);
  102. else
  103. flush_tlb_other(mm);
  104. }
  105. /* Page-granular tlb flush. */
  106. static inline void
  107. flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  108. {
  109. struct mm_struct *mm = vma->vm_mm;
  110. if (mm == current->active_mm)
  111. flush_tlb_current_page(mm, vma, addr);
  112. else
  113. flush_tlb_other(mm);
  114. }
  115. /* Flush a specified range of user mapping. On the Alpha we flush
  116. the whole user tlb. */
  117. static inline void
  118. flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  119. unsigned long end)
  120. {
  121. flush_tlb_mm(vma->vm_mm);
  122. }
  123. #else /* CONFIG_SMP */
  124. extern void flush_tlb_all(void);
  125. extern void flush_tlb_mm(struct mm_struct *);
  126. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  127. extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
  128. unsigned long);
  129. #endif /* CONFIG_SMP */
  130. #define flush_tlb_kernel_range(start, end) flush_tlb_all()
  131. #endif /* _ALPHA_TLBFLUSH_H */