tlbflush.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. #ifndef _ASM_X86_TLBFLUSH_H
  2. #define _ASM_X86_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/system.h>
  7. #ifdef CONFIG_PARAVIRT
  8. #include <asm/paravirt.h>
  9. #else
  10. #define __flush_tlb() __native_flush_tlb()
  11. #define __flush_tlb_global() __native_flush_tlb_global()
  12. #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
  13. #endif
  14. static inline void __native_flush_tlb(void)
  15. {
  16. write_cr3(read_cr3());
  17. }
  18. static inline void __native_flush_tlb_global(void)
  19. {
  20. unsigned long cr4 = read_cr4();
  21. /* clear PGE */
  22. write_cr4(cr4 & ~X86_CR4_PGE);
  23. /* write old PGE again and flush TLBs */
  24. write_cr4(cr4);
  25. }
  26. static inline void __native_flush_tlb_single(unsigned long addr)
  27. {
  28. asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  29. }
  30. static inline void __flush_tlb_all(void)
  31. {
  32. if (cpu_has_pge)
  33. __flush_tlb_global();
  34. else
  35. __flush_tlb();
  36. }
  37. static inline void __flush_tlb_one(unsigned long addr)
  38. {
  39. if (cpu_has_invlpg)
  40. __flush_tlb_single(addr);
  41. else
  42. __flush_tlb();
  43. }
  44. #ifdef CONFIG_X86_32
  45. # define TLB_FLUSH_ALL 0xffffffff
  46. #else
  47. # define TLB_FLUSH_ALL -1ULL
  48. #endif
  49. /*
  50. * TLB flushing:
  51. *
  52. * - flush_tlb() flushes the current mm struct TLBs
  53. * - flush_tlb_all() flushes all processes TLBs
  54. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  55. * - flush_tlb_page(vma, vmaddr) flushes one page
  56. * - flush_tlb_range(vma, start, end) flushes a range of pages
  57. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  58. * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
  59. *
  60. * ..but the i386 has somewhat limited tlb flushing capabilities,
  61. * and page-granular flushes are available only on i486 and up.
  62. *
  63. * x86-64 can only flush individual pages or full VMs. For a range flush
  64. * we always do the full VM. Might be worth trying if for a small
  65. * range a few INVLPGs in a row are a win.
  66. */
  67. #ifndef CONFIG_SMP
  68. #define flush_tlb() __flush_tlb()
  69. #define flush_tlb_all() __flush_tlb_all()
  70. #define local_flush_tlb() __flush_tlb()
  71. static inline void flush_tlb_mm(struct mm_struct *mm)
  72. {
  73. if (mm == current->active_mm)
  74. __flush_tlb();
  75. }
  76. static inline void flush_tlb_page(struct vm_area_struct *vma,
  77. unsigned long addr)
  78. {
  79. if (vma->vm_mm == current->active_mm)
  80. __flush_tlb_one(addr);
  81. }
  82. static inline void flush_tlb_range(struct vm_area_struct *vma,
  83. unsigned long start, unsigned long end)
  84. {
  85. if (vma->vm_mm == current->active_mm)
  86. __flush_tlb();
  87. }
  88. static inline void native_flush_tlb_others(const cpumask_t *cpumask,
  89. struct mm_struct *mm,
  90. unsigned long va)
  91. {
  92. }
  93. #else /* SMP */
  94. #include <asm/smp.h>
  95. #define local_flush_tlb() __flush_tlb()
  96. extern void flush_tlb_all(void);
  97. extern void flush_tlb_current_task(void);
  98. extern void flush_tlb_mm(struct mm_struct *);
  99. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  100. #define flush_tlb() flush_tlb_current_task()
  101. static inline void flush_tlb_range(struct vm_area_struct *vma,
  102. unsigned long start, unsigned long end)
  103. {
  104. flush_tlb_mm(vma->vm_mm);
  105. }
  106. void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
  107. unsigned long va);
  108. #define TLBSTATE_OK 1
  109. #define TLBSTATE_LAZY 2
  110. #ifdef CONFIG_X86_32
  111. struct tlb_state {
  112. struct mm_struct *active_mm;
  113. int state;
  114. char __cacheline_padding[L1_CACHE_BYTES-8];
  115. };
  116. DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
  117. #endif
  118. #endif /* SMP */
  119. #ifndef CONFIG_PARAVIRT
  120. #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
  121. #endif
  122. static inline void flush_tlb_kernel_range(unsigned long start,
  123. unsigned long end)
  124. {
  125. flush_tlb_all();
  126. }
  127. #endif /* _ASM_X86_TLBFLUSH_H */