tlb.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /* arch/sparc64/mm/tlb.c
  2. *
  3. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/init.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/tlb.h>
  17. /* Heavily inspired by the ppc64 code. */
  18. static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  19. void flush_tlb_pending(void)
  20. {
  21. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  22. struct mm_struct *mm = tb->mm;
  23. if (!tb->tlb_nr)
  24. goto out;
  25. flush_tsb_user(tb);
  26. if (CTX_VALID(mm->context)) {
  27. if (tb->tlb_nr == 1) {
  28. global_flush_tlb_page(mm, tb->vaddrs[0]);
  29. } else {
  30. #ifdef CONFIG_SMP
  31. smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  32. &tb->vaddrs[0]);
  33. #else
  34. __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  35. tb->tlb_nr, &tb->vaddrs[0]);
  36. #endif
  37. }
  38. }
  39. tb->tlb_nr = 0;
  40. out:
  41. put_cpu_var(tlb_batch);
  42. }
  43. void arch_enter_lazy_mmu_mode(void)
  44. {
  45. struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
  46. tb->active = 1;
  47. }
  48. void arch_leave_lazy_mmu_mode(void)
  49. {
  50. struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
  51. if (tb->tlb_nr)
  52. flush_tlb_pending();
  53. tb->active = 0;
  54. }
  55. static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
  56. bool exec)
  57. {
  58. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  59. unsigned long nr;
  60. vaddr &= PAGE_MASK;
  61. if (exec)
  62. vaddr |= 0x1UL;
  63. nr = tb->tlb_nr;
  64. if (unlikely(nr != 0 && mm != tb->mm)) {
  65. flush_tlb_pending();
  66. nr = 0;
  67. }
  68. if (!tb->active) {
  69. global_flush_tlb_page(mm, vaddr);
  70. flush_tsb_user_page(mm, vaddr);
  71. goto out;
  72. }
  73. if (nr == 0)
  74. tb->mm = mm;
  75. tb->vaddrs[nr] = vaddr;
  76. tb->tlb_nr = ++nr;
  77. if (nr >= TLB_BATCH_NR)
  78. flush_tlb_pending();
  79. out:
  80. put_cpu_var(tlb_batch);
  81. }
  82. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
  83. pte_t *ptep, pte_t orig, int fullmm)
  84. {
  85. if (tlb_type != hypervisor &&
  86. pte_dirty(orig)) {
  87. unsigned long paddr, pfn = pte_pfn(orig);
  88. struct address_space *mapping;
  89. struct page *page;
  90. if (!pfn_valid(pfn))
  91. goto no_cache_flush;
  92. page = pfn_to_page(pfn);
  93. if (PageReserved(page))
  94. goto no_cache_flush;
  95. /* A real file page? */
  96. mapping = page_mapping(page);
  97. if (!mapping)
  98. goto no_cache_flush;
  99. paddr = (unsigned long) page_address(page);
  100. if ((paddr ^ vaddr) & (1 << 13))
  101. flush_dcache_page_all(mm, page);
  102. }
  103. no_cache_flush:
  104. if (!fullmm)
  105. tlb_batch_add_one(mm, vaddr, pte_exec(orig));
  106. }
  107. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  108. static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
  109. pmd_t pmd, bool exec)
  110. {
  111. unsigned long end;
  112. pte_t *pte;
  113. pte = pte_offset_map(&pmd, vaddr);
  114. end = vaddr + HPAGE_SIZE;
  115. while (vaddr < end) {
  116. if (pte_val(*pte) & _PAGE_VALID)
  117. tlb_batch_add_one(mm, vaddr, exec);
  118. pte++;
  119. vaddr += PAGE_SIZE;
  120. }
  121. pte_unmap(pte);
  122. }
  123. void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  124. pmd_t *pmdp, pmd_t pmd)
  125. {
  126. pmd_t orig = *pmdp;
  127. *pmdp = pmd;
  128. if (mm == &init_mm)
  129. return;
  130. if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
  131. if (pmd_val(pmd) & PMD_ISHUGE)
  132. mm->context.huge_pte_count++;
  133. else
  134. mm->context.huge_pte_count--;
  135. /* Do not try to allocate the TSB hash table if we
  136. * don't have one already. We have various locks held
  137. * and thus we'll end up doing a GFP_KERNEL allocation
  138. * in an atomic context.
  139. *
  140. * Instead, we let the first TLB miss on a hugepage
  141. * take care of this.
  142. */
  143. }
  144. if (!pmd_none(orig)) {
  145. bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
  146. addr &= HPAGE_MASK;
  147. if (pmd_val(orig) & PMD_ISHUGE)
  148. tlb_batch_add_one(mm, addr, exec);
  149. else
  150. tlb_batch_pmd_scan(mm, addr, orig, exec);
  151. }
  152. }
  153. void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
  154. {
  155. struct list_head *lh = (struct list_head *) pgtable;
  156. assert_spin_locked(&mm->page_table_lock);
  157. /* FIFO */
  158. if (!mm->pmd_huge_pte)
  159. INIT_LIST_HEAD(lh);
  160. else
  161. list_add(lh, (struct list_head *) mm->pmd_huge_pte);
  162. mm->pmd_huge_pte = pgtable;
  163. }
  164. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
  165. {
  166. struct list_head *lh;
  167. pgtable_t pgtable;
  168. assert_spin_locked(&mm->page_table_lock);
  169. /* FIFO */
  170. pgtable = mm->pmd_huge_pte;
  171. lh = (struct list_head *) pgtable;
  172. if (list_empty(lh))
  173. mm->pmd_huge_pte = NULL;
  174. else {
  175. mm->pmd_huge_pte = (pgtable_t) lh->next;
  176. list_del(lh);
  177. }
  178. pte_val(pgtable[0]) = 0;
  179. pte_val(pgtable[1]) = 0;
  180. return pgtable;
  181. }
  182. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */