tlb.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /* asm-generic/tlb.h
  2. *
  3. * Generic TLB shootdown code
  4. *
  5. * Copyright 2001 Red Hat, Inc.
  6. * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #ifndef _ASM_GENERIC__TLB_H
  14. #define _ASM_GENERIC__TLB_H
  15. #include <linux/config.h>
  16. #include <linux/swap.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/tlbflush.h>
  19. /*
  20. * For UP we don't need to worry about TLB flush
  21. * and page free order so much..
  22. */
  23. #ifdef CONFIG_SMP
  24. #define FREE_PTE_NR 506
  25. #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
  26. #else
  27. #define FREE_PTE_NR 1
  28. #define tlb_fast_mode(tlb) 1
  29. #endif
  30. /* struct mmu_gather is an opaque type used by the mm code for passing around
  31. * any data needed by arch specific code for tlb_remove_page. This structure
  32. * can be per-CPU or per-MM as the page table lock is held for the duration of
  33. * TLB shootdown.
  34. */
  35. struct mmu_gather {
  36. struct mm_struct *mm;
  37. unsigned int nr; /* set to ~0U means fast mode */
  38. unsigned int need_flush;/* Really unmapped some ptes? */
  39. unsigned int fullmm; /* non-zero means full mm flush */
  40. unsigned long freed;
  41. struct page * pages[FREE_PTE_NR];
  42. };
  43. /* Users of the generic TLB shootdown code must declare this storage space. */
  44. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  45. /* tlb_gather_mmu
  46. * Return a pointer to an initialized struct mmu_gather.
  47. */
  48. static inline struct mmu_gather *
  49. tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
  50. {
  51. struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
  52. tlb->mm = mm;
  53. /* Use fast mode if only one CPU is online */
  54. tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
  55. tlb->fullmm = full_mm_flush;
  56. tlb->freed = 0;
  57. return tlb;
  58. }
  59. static inline void
  60. tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  61. {
  62. if (!tlb->need_flush)
  63. return;
  64. tlb->need_flush = 0;
  65. tlb_flush(tlb);
  66. if (!tlb_fast_mode(tlb)) {
  67. free_pages_and_swap_cache(tlb->pages, tlb->nr);
  68. tlb->nr = 0;
  69. }
  70. }
  71. /* tlb_finish_mmu
  72. * Called at the end of the shootdown operation to free up any resources
  73. * that were required. The page table lock is still held at this point.
  74. */
  75. static inline void
  76. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  77. {
  78. int freed = tlb->freed;
  79. struct mm_struct *mm = tlb->mm;
  80. int rss = get_mm_counter(mm, rss);
  81. if (rss < freed)
  82. freed = rss;
  83. add_mm_counter(mm, rss, -freed);
  84. tlb_flush_mmu(tlb, start, end);
  85. /* keep the page table cache within bounds */
  86. check_pgt_cache();
  87. }
  88. static inline unsigned int
  89. tlb_is_full_mm(struct mmu_gather *tlb)
  90. {
  91. return tlb->fullmm;
  92. }
  93. /* tlb_remove_page
  94. * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
  95. * handling the additional races in SMP caused by other CPUs caching valid
  96. * mappings in their TLBs.
  97. */
  98. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  99. {
  100. tlb->need_flush = 1;
  101. if (tlb_fast_mode(tlb)) {
  102. free_page_and_swap_cache(page);
  103. return;
  104. }
  105. tlb->pages[tlb->nr++] = page;
  106. if (tlb->nr >= FREE_PTE_NR)
  107. tlb_flush_mmu(tlb, 0, 0);
  108. }
  109. /**
  110. * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  111. *
  112. * Record the fact that pte's were really umapped in ->need_flush, so we can
  113. * later optimise away the tlb invalidate. This helps when userspace is
  114. * unmapping already-unmapped pages, which happens quite a lot.
  115. */
  116. #define tlb_remove_tlb_entry(tlb, ptep, address) \
  117. do { \
  118. tlb->need_flush = 1; \
  119. __tlb_remove_tlb_entry(tlb, ptep, address); \
  120. } while (0)
  121. #define pte_free_tlb(tlb, ptep) \
  122. do { \
  123. tlb->need_flush = 1; \
  124. __pte_free_tlb(tlb, ptep); \
  125. } while (0)
  126. #ifndef __ARCH_HAS_4LEVEL_HACK
  127. #define pud_free_tlb(tlb, pudp) \
  128. do { \
  129. tlb->need_flush = 1; \
  130. __pud_free_tlb(tlb, pudp); \
  131. } while (0)
  132. #endif
  133. #define pmd_free_tlb(tlb, pmdp) \
  134. do { \
  135. tlb->need_flush = 1; \
  136. __pmd_free_tlb(tlb, pmdp); \
  137. } while (0)
  138. #define tlb_migrate_finish(mm) do {} while (0)
  139. #endif /* _ASM_GENERIC__TLB_H */