tlbflush_64.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #ifndef _X8664_TLBFLUSH_H
  2. #define _X8664_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/system.h>
  7. static inline void __flush_tlb(void)
  8. {
  9. write_cr3(read_cr3());
  10. }
  11. static inline void __flush_tlb_all(void)
  12. {
  13. unsigned long cr4 = read_cr4();
  14. write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
  15. write_cr4(cr4); /* write old PGE again and flush TLBs */
  16. }
  17. #define __flush_tlb_one(addr) \
  18. __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
  19. /*
  20. * TLB flushing:
  21. *
  22. * - flush_tlb() flushes the current mm struct TLBs
  23. * - flush_tlb_all() flushes all processes TLBs
  24. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  25. * - flush_tlb_page(vma, vmaddr) flushes one page
  26. * - flush_tlb_range(vma, start, end) flushes a range of pages
  27. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  28. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  29. *
  30. * x86-64 can only flush individual pages or full VMs. For a range flush
  31. * we always do the full VM. Might be worth trying if for a small
  32. * range a few INVLPGs in a row are a win.
  33. */
  34. #ifndef CONFIG_SMP
  35. #define flush_tlb() __flush_tlb()
  36. #define flush_tlb_all() __flush_tlb_all()
  37. #define local_flush_tlb() __flush_tlb()
  38. static inline void flush_tlb_mm(struct mm_struct *mm)
  39. {
  40. if (mm == current->active_mm)
  41. __flush_tlb();
  42. }
  43. static inline void flush_tlb_page(struct vm_area_struct *vma,
  44. unsigned long addr)
  45. {
  46. if (vma->vm_mm == current->active_mm)
  47. __flush_tlb_one(addr);
  48. }
  49. static inline void flush_tlb_range(struct vm_area_struct *vma,
  50. unsigned long start, unsigned long end)
  51. {
  52. if (vma->vm_mm == current->active_mm)
  53. __flush_tlb();
  54. }
  55. #else
  56. #include <asm/smp.h>
  57. #define local_flush_tlb() \
  58. __flush_tlb()
  59. extern void flush_tlb_all(void);
  60. extern void flush_tlb_current_task(void);
  61. extern void flush_tlb_mm(struct mm_struct *);
  62. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  63. #define flush_tlb() flush_tlb_current_task()
  64. static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
  65. {
  66. flush_tlb_mm(vma->vm_mm);
  67. }
  68. #define TLBSTATE_OK 1
  69. #define TLBSTATE_LAZY 2
  70. /* Roughly an IPI every 20MB with 4k pages for freeing page table
  71. ranges. Cost is about 42k of memory for each CPU. */
  72. #define ARCH_FREE_PTE_NR 5350
  73. #endif
  74. static inline void flush_tlb_kernel_range(unsigned long start,
  75. unsigned long end)
  76. {
  77. flush_tlb_all();
  78. }
  79. static inline void flush_tlb_pgtables(struct mm_struct *mm,
  80. unsigned long start, unsigned long end)
  81. {
  82. /* x86_64 does not keep any page table caches in a software TLB.
  83. The CPUs do in their hardware TLBs, but they are handled
  84. by the normal TLB flushing algorithms. */
  85. }
  86. #endif /* _X8664_TLBFLUSH_H */