tlbflush.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. /*
  2. * include/asm-ppc/tlbflush.h
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #ifdef __KERNEL__
  10. #ifndef _PPC_TLBFLUSH_H
  11. #define _PPC_TLBFLUSH_H
  12. #include <linux/config.h>
  13. #include <linux/mm.h>
  14. extern void _tlbie(unsigned long address);
  15. extern void _tlbia(void);
  16. #if defined(CONFIG_4xx)
  17. #ifndef CONFIG_44x
  18. #define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory")
  19. #else
  20. #define __tlbia _tlbia
  21. #endif
  22. static inline void flush_tlb_mm(struct mm_struct *mm)
  23. { __tlbia(); }
  24. static inline void flush_tlb_page(struct vm_area_struct *vma,
  25. unsigned long vmaddr)
  26. { _tlbie(vmaddr); }
  27. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
  28. unsigned long vmaddr)
  29. { _tlbie(vmaddr); }
  30. static inline void flush_tlb_range(struct vm_area_struct *vma,
  31. unsigned long start, unsigned long end)
  32. { __tlbia(); }
  33. static inline void flush_tlb_kernel_range(unsigned long start,
  34. unsigned long end)
  35. { __tlbia(); }
  36. #elif defined(CONFIG_FSL_BOOKE)
  37. /* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
  38. * are best implemented as tlbia vs specific tlbie's */
  39. #define __tlbia() _tlbia()
  40. static inline void flush_tlb_mm(struct mm_struct *mm)
  41. { __tlbia(); }
  42. static inline void flush_tlb_page(struct vm_area_struct *vma,
  43. unsigned long vmaddr)
  44. { _tlbie(vmaddr); }
  45. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
  46. unsigned long vmaddr)
  47. { _tlbie(vmaddr); }
  48. static inline void flush_tlb_range(struct vm_area_struct *vma,
  49. unsigned long start, unsigned long end)
  50. { __tlbia(); }
  51. static inline void flush_tlb_kernel_range(unsigned long start,
  52. unsigned long end)
  53. { __tlbia(); }
  54. #elif defined(CONFIG_8xx)
  55. #define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
  56. static inline void flush_tlb_mm(struct mm_struct *mm)
  57. { __tlbia(); }
  58. static inline void flush_tlb_page(struct vm_area_struct *vma,
  59. unsigned long vmaddr)
  60. { _tlbie(vmaddr); }
  61. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
  62. unsigned long vmaddr)
  63. { _tlbie(vmaddr); }
  64. static inline void flush_tlb_range(struct mm_struct *mm,
  65. unsigned long start, unsigned long end)
  66. { __tlbia(); }
  67. static inline void flush_tlb_kernel_range(unsigned long start,
  68. unsigned long end)
  69. { __tlbia(); }
  70. #else /* 6xx, 7xx, 7xxx cpus */
  71. struct mm_struct;
  72. struct vm_area_struct;
  73. extern void flush_tlb_mm(struct mm_struct *mm);
  74. extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  75. extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
  76. extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  77. unsigned long end);
  78. extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  79. #endif
  80. /*
  81. * This is called in munmap when we have freed up some page-table
  82. * pages. We don't need to do anything here, there's nothing special
  83. * about our page-table pages. -- paulus
  84. */
  85. static inline void flush_tlb_pgtables(struct mm_struct *mm,
  86. unsigned long start, unsigned long end)
  87. {
  88. }
  89. /*
  90. * This gets called at the end of handling a page fault, when
  91. * the kernel has put a new PTE into the page table for the process.
  92. * We use it to ensure coherency between the i-cache and d-cache
  93. * for the page which has just been mapped in.
  94. * On machines which use an MMU hash table, we use this to put a
  95. * corresponding HPTE into the hash table ahead of time, instead of
  96. * waiting for the inevitable extra hash-table miss exception.
  97. */
  98. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  99. #endif /* _PPC_TLBFLUSH_H */
  100. #endif /*__KERNEL__ */