tlbflush.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef _X8664_TLBFLUSH_H
  2. #define _X8664_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <asm/processor.h>
  5. static inline unsigned long get_cr3(void)
  6. {
  7. unsigned long cr3;
  8. asm volatile("mov %%cr3,%0" : "=r" (cr3));
  9. return cr3;
  10. }
  11. static inline void set_cr3(unsigned long cr3)
  12. {
  13. asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
  14. }
  15. static inline void __flush_tlb(void)
  16. {
  17. set_cr3(get_cr3());
  18. }
  19. static inline unsigned long get_cr4(void)
  20. {
  21. unsigned long cr4;
  22. asm volatile("mov %%cr4,%0" : "=r" (cr4));
  23. return cr4;
  24. }
  25. static inline void set_cr4(unsigned long cr4)
  26. {
  27. asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
  28. }
  29. static inline void __flush_tlb_all(void)
  30. {
  31. unsigned long cr4 = get_cr4();
  32. set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
  33. set_cr4(cr4); /* write old PGE again and flush TLBs */
  34. }
  35. #define __flush_tlb_one(addr) \
  36. __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
  37. /*
  38. * TLB flushing:
  39. *
  40. * - flush_tlb() flushes the current mm struct TLBs
  41. * - flush_tlb_all() flushes all processes TLBs
  42. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  43. * - flush_tlb_page(vma, vmaddr) flushes one page
  44. * - flush_tlb_range(vma, start, end) flushes a range of pages
  45. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  46. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  47. *
  48. * x86-64 can only flush individual pages or full VMs. For a range flush
  49. * we always do the full VM. Might be worth trying if for a small
  50. * range a few INVLPGs in a row are a win.
  51. */
  52. #ifndef CONFIG_SMP
  53. #define flush_tlb() __flush_tlb()
  54. #define flush_tlb_all() __flush_tlb_all()
  55. #define local_flush_tlb() __flush_tlb()
  56. static inline void flush_tlb_mm(struct mm_struct *mm)
  57. {
  58. if (mm == current->active_mm)
  59. __flush_tlb();
  60. }
  61. static inline void flush_tlb_page(struct vm_area_struct *vma,
  62. unsigned long addr)
  63. {
  64. if (vma->vm_mm == current->active_mm)
  65. __flush_tlb_one(addr);
  66. }
  67. static inline void flush_tlb_range(struct vm_area_struct *vma,
  68. unsigned long start, unsigned long end)
  69. {
  70. if (vma->vm_mm == current->active_mm)
  71. __flush_tlb();
  72. }
  73. #else
  74. #include <asm/smp.h>
  75. #define local_flush_tlb() \
  76. __flush_tlb()
  77. extern void flush_tlb_all(void);
  78. extern void flush_tlb_current_task(void);
  79. extern void flush_tlb_mm(struct mm_struct *);
  80. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  81. #define flush_tlb() flush_tlb_current_task()
  82. static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
  83. {
  84. flush_tlb_mm(vma->vm_mm);
  85. }
  86. #define TLBSTATE_OK 1
  87. #define TLBSTATE_LAZY 2
  88. /* Roughly an IPI every 20MB with 4k pages for freeing page table
  89. ranges. Cost is about 42k of memory for each CPU. */
  90. #define ARCH_FREE_PTE_NR 5350
  91. #endif
  92. #define flush_tlb_kernel_range(start, end) flush_tlb_all()
  93. static inline void flush_tlb_pgtables(struct mm_struct *mm,
  94. unsigned long start, unsigned long end)
  95. {
  96. /* x86_64 does not keep any page table caches in a software TLB.
  97. The CPUs do in their hardware TLBs, but they are handled
  98. by the normal TLB flushing algorithms. */
  99. }
  100. #endif /* _X8664_TLBFLUSH_H */