tlbflush.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #ifndef _X8664_TLBFLUSH_H
  2. #define _X8664_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <asm/processor.h>
  5. #include <asm/system.h>
  6. static inline void __flush_tlb(void)
  7. {
  8. write_cr3(read_cr3());
  9. }
  10. static inline void __flush_tlb_all(void)
  11. {
  12. unsigned long cr4 = read_cr4();
  13. write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
  14. write_cr4(cr4); /* write old PGE again and flush TLBs */
  15. }
  16. #define __flush_tlb_one(addr) \
  17. __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
  18. /*
  19. * TLB flushing:
  20. *
  21. * - flush_tlb() flushes the current mm struct TLBs
  22. * - flush_tlb_all() flushes all processes TLBs
  23. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  24. * - flush_tlb_page(vma, vmaddr) flushes one page
  25. * - flush_tlb_range(vma, start, end) flushes a range of pages
  26. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  27. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  28. *
  29. * x86-64 can only flush individual pages or full VMs. For a range flush
  30. * we always do the full VM. Might be worth trying if for a small
  31. * range a few INVLPGs in a row are a win.
  32. */
  33. #ifndef CONFIG_SMP
  34. #define flush_tlb() __flush_tlb()
  35. #define flush_tlb_all() __flush_tlb_all()
  36. #define local_flush_tlb() __flush_tlb()
  37. static inline void flush_tlb_mm(struct mm_struct *mm)
  38. {
  39. if (mm == current->active_mm)
  40. __flush_tlb();
  41. }
  42. static inline void flush_tlb_page(struct vm_area_struct *vma,
  43. unsigned long addr)
  44. {
  45. if (vma->vm_mm == current->active_mm)
  46. __flush_tlb_one(addr);
  47. }
  48. static inline void flush_tlb_range(struct vm_area_struct *vma,
  49. unsigned long start, unsigned long end)
  50. {
  51. if (vma->vm_mm == current->active_mm)
  52. __flush_tlb();
  53. }
  54. #else
  55. #include <asm/smp.h>
  56. #define local_flush_tlb() \
  57. __flush_tlb()
  58. extern void flush_tlb_all(void);
  59. extern void flush_tlb_current_task(void);
  60. extern void flush_tlb_mm(struct mm_struct *);
  61. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  62. #define flush_tlb() flush_tlb_current_task()
  63. static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
  64. {
  65. flush_tlb_mm(vma->vm_mm);
  66. }
  67. #define TLBSTATE_OK 1
  68. #define TLBSTATE_LAZY 2
  69. /* Roughly an IPI every 20MB with 4k pages for freeing page table
  70. ranges. Cost is about 42k of memory for each CPU. */
  71. #define ARCH_FREE_PTE_NR 5350
  72. #endif
  73. #define flush_tlb_kernel_range(start, end) flush_tlb_all()
  74. static inline void flush_tlb_pgtables(struct mm_struct *mm,
  75. unsigned long start, unsigned long end)
  76. {
  77. /* x86_64 does not keep any page table caches in a software TLB.
  78. The CPUs do in their hardware TLBs, but they are handled
  79. by the normal TLB flushing algorithms. */
  80. }
  81. #endif /* _X8664_TLBFLUSH_H */