tlbflush.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef _ASM_POWERPC_TLBFLUSH_H
  2. #define _ASM_POWERPC_TLBFLUSH_H
  3. /*
  4. * TLB flushing:
  5. *
  6. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  7. * - flush_tlb_page(vma, vmaddr) flushes one page
  8. * - local_flush_tlb_page(vmaddr) flushes one page on the local processor
  9. * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
  10. * - flush_tlb_range(vma, start, end) flushes a range of pages
  11. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. */
  18. #ifdef __KERNEL__
  19. #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
  20. /*
  21. * TLB flushing for software loaded TLB chips
  22. *
  23. * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
  24. * flush_tlb_kernel_range are best implemented as tlbia vs
  25. * specific tlbie's
  26. */
  27. #include <linux/mm.h>
  28. #define MMU_NO_CONTEXT ((unsigned int)-1)
  29. extern void _tlbie(unsigned long address, unsigned int pid);
  30. extern void _tlbil_all(void);
  31. extern void _tlbil_pid(unsigned int pid);
  32. extern void _tlbil_va(unsigned long address, unsigned int pid);
  33. #if defined(CONFIG_40x) || defined(CONFIG_8xx)
  34. #define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
  35. #else /* CONFIG_44x || CONFIG_FSL_BOOKE */
  36. extern void _tlbia(void);
  37. #endif
  38. static inline void local_flush_tlb_mm(struct mm_struct *mm)
  39. {
  40. _tlbil_pid(mm->context.id);
  41. }
  42. static inline void flush_tlb_mm(struct mm_struct *mm)
  43. {
  44. _tlbil_pid(mm->context.id);
  45. }
  46. static inline void local_flush_tlb_page(unsigned long vmaddr)
  47. {
  48. _tlbil_va(vmaddr, 0);
  49. }
  50. static inline void flush_tlb_page(struct vm_area_struct *vma,
  51. unsigned long vmaddr)
  52. {
  53. _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
  54. }
  55. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
  56. unsigned long vmaddr)
  57. {
  58. flush_tlb_page(vma, vmaddr);
  59. }
  60. static inline void flush_tlb_range(struct vm_area_struct *vma,
  61. unsigned long start, unsigned long end)
  62. {
  63. _tlbil_pid(vma->vm_mm->context.id);
  64. }
  65. static inline void flush_tlb_kernel_range(unsigned long start,
  66. unsigned long end)
  67. {
  68. _tlbil_pid(0);
  69. }
  70. #elif defined(CONFIG_PPC32)
  71. /*
  72. * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
  73. */
  74. extern void _tlbie(unsigned long address);
  75. extern void _tlbia(void);
  76. extern void flush_tlb_mm(struct mm_struct *mm);
  77. extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
  78. extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
  79. extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  80. unsigned long end);
  81. extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  82. static inline void local_flush_tlb_page(unsigned long vmaddr)
  83. {
  84. flush_tlb_page(NULL, vmaddr);
  85. }
  86. #else
  87. /*
  88. * TLB flushing for 64-bit has-MMU CPUs
  89. */
  90. #include <linux/percpu.h>
  91. #include <asm/page.h>
  92. #define PPC64_TLB_BATCH_NR 192
  93. struct ppc64_tlb_batch {
  94. int active;
  95. unsigned long index;
  96. struct mm_struct *mm;
  97. real_pte_t pte[PPC64_TLB_BATCH_NR];
  98. unsigned long vaddr[PPC64_TLB_BATCH_NR];
  99. unsigned int psize;
  100. int ssize;
  101. };
  102. DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  103. extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
  104. extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
  105. pte_t *ptep, unsigned long pte, int huge);
  106. #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  107. static inline void arch_enter_lazy_mmu_mode(void)
  108. {
  109. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  110. batch->active = 1;
  111. }
  112. static inline void arch_leave_lazy_mmu_mode(void)
  113. {
  114. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  115. if (batch->index)
  116. __flush_tlb_pending(batch);
  117. batch->active = 0;
  118. }
  119. #define arch_flush_lazy_mmu_mode() do {} while (0)
  120. extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
  121. int ssize, int local);
  122. extern void flush_hash_range(unsigned long number, int local);
  123. static inline void flush_tlb_mm(struct mm_struct *mm)
  124. {
  125. }
  126. static inline void local_flush_tlb_page(unsigned long vmaddr)
  127. {
  128. }
  129. static inline void flush_tlb_page(struct vm_area_struct *vma,
  130. unsigned long vmaddr)
  131. {
  132. }
  133. static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
  134. unsigned long vmaddr)
  135. {
  136. }
  137. static inline void flush_tlb_range(struct vm_area_struct *vma,
  138. unsigned long start, unsigned long end)
  139. {
  140. }
  141. static inline void flush_tlb_kernel_range(unsigned long start,
  142. unsigned long end)
  143. {
  144. }
  145. /* Private function for use by PCI IO mapping code */
  146. extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
  147. unsigned long end);
  148. #endif
  149. #endif /*__KERNEL__ */
  150. #endif /* _ASM_POWERPC_TLBFLUSH_H */