tlb.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * arch/arm/include/asm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Experimentation shows that on a StrongARM, it appears to be faster
  11. * to use the "invalidate whole tlb" rather than "invalidate single
  12. * tlb" for this.
  13. *
  14. * This appears true for both the process fork+exit case, as well as
  15. * the munmap-large-area case.
  16. */
  17. #ifndef __ASMARM_TLB_H
  18. #define __ASMARM_TLB_H
  19. #include <asm/cacheflush.h>
  20. #ifndef CONFIG_MMU
  21. #include <linux/pagemap.h>
  22. #define tlb_flush(tlb) ((void) tlb)
  23. #include <asm-generic/tlb.h>
  24. #else /* !CONFIG_MMU */
  25. #include <linux/swap.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/tlbflush.h>
  28. /*
  29. * We need to delay page freeing for SMP as other CPUs can access pages
  30. * which have been removed but not yet had their TLB entries invalidated.
  31. * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
  32. * we need to apply this same delaying tactic to ensure correct operation.
  33. */
  34. #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
  35. #define tlb_fast_mode(tlb) 0
  36. #define FREE_PTE_NR 500
  37. #else
  38. #define tlb_fast_mode(tlb) 1
  39. #define FREE_PTE_NR 0
  40. #endif
  41. /*
  42. * TLB handling. This allows us to remove pages from the page
  43. * tables, and efficiently handle the TLB issues.
  44. */
  45. struct mmu_gather {
  46. struct mm_struct *mm;
  47. unsigned int fullmm;
  48. struct vm_area_struct *vma;
  49. unsigned long range_start;
  50. unsigned long range_end;
  51. unsigned int nr;
  52. struct page *pages[FREE_PTE_NR];
  53. };
  54. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  55. /*
  56. * This is unnecessarily complex. There's three ways the TLB shootdown
  57. * code is used:
  58. * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
  59. * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
  60. * tlb->vma will be non-NULL.
  61. * 2. Unmapping all vmas. See exit_mmap().
  62. * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
  63. * tlb->vma will be non-NULL. Additionally, page tables will be freed.
  64. * 3. Unmapping argument pages. See shift_arg_pages().
  65. * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
  66. * tlb->vma will be NULL.
  67. */
  68. static inline void tlb_flush(struct mmu_gather *tlb)
  69. {
  70. if (tlb->fullmm || !tlb->vma)
  71. flush_tlb_mm(tlb->mm);
  72. else if (tlb->range_end > 0) {
  73. flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
  74. tlb->range_start = TASK_SIZE;
  75. tlb->range_end = 0;
  76. }
  77. }
  78. static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
  79. {
  80. if (!tlb->fullmm) {
  81. if (addr < tlb->range_start)
  82. tlb->range_start = addr;
  83. if (addr + PAGE_SIZE > tlb->range_end)
  84. tlb->range_end = addr + PAGE_SIZE;
  85. }
  86. }
  87. static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  88. {
  89. tlb_flush(tlb);
  90. if (!tlb_fast_mode(tlb)) {
  91. free_pages_and_swap_cache(tlb->pages, tlb->nr);
  92. tlb->nr = 0;
  93. }
  94. }
  95. static inline struct mmu_gather *
  96. tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  97. {
  98. struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
  99. tlb->mm = mm;
  100. tlb->fullmm = full_mm_flush;
  101. tlb->vma = NULL;
  102. tlb->nr = 0;
  103. return tlb;
  104. }
  105. static inline void
  106. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  107. {
  108. tlb_flush_mmu(tlb);
  109. /* keep the page table cache within bounds */
  110. check_pgt_cache();
  111. put_cpu_var(mmu_gathers);
  112. }
  113. /*
  114. * Memorize the range for the TLB flush.
  115. */
  116. static inline void
  117. tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  118. {
  119. tlb_add_flush(tlb, addr);
  120. }
  121. /*
  122. * In the case of tlb vma handling, we can optimise these away in the
  123. * case where we're doing a full MM flush. When we're doing a munmap,
  124. * the vmas are adjusted to only cover the region to be torn down.
  125. */
  126. static inline void
  127. tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  128. {
  129. if (!tlb->fullmm) {
  130. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  131. tlb->vma = vma;
  132. tlb->range_start = TASK_SIZE;
  133. tlb->range_end = 0;
  134. }
  135. }
  136. static inline void
  137. tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  138. {
  139. if (!tlb->fullmm)
  140. tlb_flush(tlb);
  141. }
  142. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  143. {
  144. if (tlb_fast_mode(tlb)) {
  145. free_page_and_swap_cache(page);
  146. } else {
  147. tlb->pages[tlb->nr++] = page;
  148. if (tlb->nr >= FREE_PTE_NR)
  149. tlb_flush_mmu(tlb);
  150. }
  151. }
  152. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  153. unsigned long addr)
  154. {
  155. pgtable_page_dtor(pte);
  156. tlb_add_flush(tlb, addr);
  157. tlb_remove_page(tlb, pte);
  158. }
  159. #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  160. #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
  161. #define tlb_migrate_finish(mm) do { } while (0)
  162. #endif /* CONFIG_MMU */
  163. #endif