pgtable.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * This file contains common routines for dealing with free of page tables
  3. * Along with common page table handling code
  4. *
  5. * Derived from arch/powerpc/mm/tlb_64.c:
  6. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  7. *
  8. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  9. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  10. * Copyright (C) 1996 Paul Mackerras
  11. *
  12. * Derived from "arch/i386/mm/init.c"
  13. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  14. *
  15. * Dave Engebretsen <engebret@us.ibm.com>
  16. * Rework for PPC64 port.
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/mm.h>
  25. #include <linux/init.h>
  26. #include <linux/percpu.h>
  27. #include <linux/hardirq.h>
  28. #include <asm/pgalloc.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/tlb.h>
  31. static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
  32. static unsigned long pte_freelist_forced_free;
  33. struct pte_freelist_batch
  34. {
  35. struct rcu_head rcu;
  36. unsigned int index;
  37. pgtable_free_t tables[0];
  38. };
  39. #define PTE_FREELIST_SIZE \
  40. ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
  41. / sizeof(pgtable_free_t))
  42. static void pte_free_smp_sync(void *arg)
  43. {
  44. /* Do nothing, just ensure we sync with all CPUs */
  45. }
  46. /* This is only called when we are critically out of memory
  47. * (and fail to get a page in pte_free_tlb).
  48. */
  49. static void pgtable_free_now(pgtable_free_t pgf)
  50. {
  51. pte_freelist_forced_free++;
  52. smp_call_function(pte_free_smp_sync, NULL, 1);
  53. pgtable_free(pgf);
  54. }
  55. static void pte_free_rcu_callback(struct rcu_head *head)
  56. {
  57. struct pte_freelist_batch *batch =
  58. container_of(head, struct pte_freelist_batch, rcu);
  59. unsigned int i;
  60. for (i = 0; i < batch->index; i++)
  61. pgtable_free(batch->tables[i]);
  62. free_page((unsigned long)batch);
  63. }
  64. static void pte_free_submit(struct pte_freelist_batch *batch)
  65. {
  66. INIT_RCU_HEAD(&batch->rcu);
  67. call_rcu(&batch->rcu, pte_free_rcu_callback);
  68. }
  69. void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
  70. {
  71. /* This is safe since tlb_gather_mmu has disabled preemption */
  72. cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
  73. struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
  74. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  75. cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
  76. pgtable_free(pgf);
  77. return;
  78. }
  79. if (*batchp == NULL) {
  80. *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
  81. if (*batchp == NULL) {
  82. pgtable_free_now(pgf);
  83. return;
  84. }
  85. (*batchp)->index = 0;
  86. }
  87. (*batchp)->tables[(*batchp)->index++] = pgf;
  88. if ((*batchp)->index == PTE_FREELIST_SIZE) {
  89. pte_free_submit(*batchp);
  90. *batchp = NULL;
  91. }
  92. }
  93. void pte_free_finish(void)
  94. {
  95. /* This is safe since tlb_gather_mmu has disabled preemption */
  96. struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
  97. if (*batchp == NULL)
  98. return;
  99. pte_free_submit(*batchp);
  100. *batchp = NULL;
  101. }
  102. /*
  103. * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
  104. */
  105. static pte_t do_dcache_icache_coherency(pte_t pte)
  106. {
  107. unsigned long pfn = pte_pfn(pte);
  108. struct page *page;
  109. if (unlikely(!pfn_valid(pfn)))
  110. return pte;
  111. page = pfn_to_page(pfn);
  112. if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
  113. pr_debug("do_dcache_icache_coherency... flushing\n");
  114. flush_dcache_icache_page(page);
  115. set_bit(PG_arch_1, &page->flags);
  116. }
  117. else
  118. pr_debug("do_dcache_icache_coherency... already clean\n");
  119. return __pte(pte_val(pte) | _PAGE_HWEXEC);
  120. }
  121. static inline int is_exec_fault(void)
  122. {
  123. return current->thread.regs && TRAP(current->thread.regs) == 0x400;
  124. }
  125. /* We only try to do i/d cache coherency on stuff that looks like
  126. * reasonably "normal" PTEs. We currently require a PTE to be present
  127. * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE
  128. */
  129. static inline int pte_looks_normal(pte_t pte)
  130. {
  131. return (pte_val(pte) &
  132. (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
  133. (_PAGE_PRESENT);
  134. }
  135. #if defined(CONFIG_PPC_STD_MMU)
  136. /* Server-style MMU handles coherency when hashing if HW exec permission
  137. * is supposed per page (currently 64-bit only). Else, we always flush
  138. * valid PTEs in set_pte.
  139. */
  140. static inline int pte_need_exec_flush(pte_t pte, int set_pte)
  141. {
  142. return set_pte && pte_looks_normal(pte) &&
  143. !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
  144. cpu_has_feature(CPU_FTR_NOEXECUTE));
  145. }
  146. #elif _PAGE_HWEXEC == 0
  147. /* Embedded type MMU without HW exec support (8xx only so far), we flush
  148. * the cache for any present PTE
  149. */
  150. static inline int pte_need_exec_flush(pte_t pte, int set_pte)
  151. {
  152. return set_pte && pte_looks_normal(pte);
  153. }
  154. #else
  155. /* Other embedded CPUs with HW exec support per-page, we flush on exec
  156. * fault if HWEXEC is not set
  157. */
  158. static inline int pte_need_exec_flush(pte_t pte, int set_pte)
  159. {
  160. return pte_looks_normal(pte) && is_exec_fault() &&
  161. !(pte_val(pte) & _PAGE_HWEXEC);
  162. }
  163. #endif
  164. /*
  165. * set_pte stores a linux PTE into the linux page table.
  166. */
  167. void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
  168. {
  169. #ifdef CONFIG_DEBUG_VM
  170. WARN_ON(pte_present(*ptep));
  171. #endif
  172. /* Note: mm->context.id might not yet have been assigned as
  173. * this context might not have been activated yet when this
  174. * is called.
  175. */
  176. pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
  177. if (pte_need_exec_flush(pte, 1))
  178. pte = do_dcache_icache_coherency(pte);
  179. /* Perform the setting of the PTE */
  180. __set_pte_at(mm, addr, ptep, pte, 0);
  181. }
  182. /*
  183. * This is called when relaxing access to a PTE. It's also called in the page
  184. * fault path when we don't hit any of the major fault cases, ie, a minor
  185. * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
  186. * handled those two for us, we additionally deal with missing execute
  187. * permission here on some processors
  188. */
  189. int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  190. pte_t *ptep, pte_t entry, int dirty)
  191. {
  192. int changed;
  193. if (!dirty && pte_need_exec_flush(entry, 0))
  194. entry = do_dcache_icache_coherency(entry);
  195. changed = !pte_same(*(ptep), entry);
  196. if (changed) {
  197. assert_pte_locked(vma->vm_mm, address);
  198. __ptep_set_access_flags(ptep, entry);
  199. flush_tlb_page_nohash(vma, address);
  200. }
  201. return changed;
  202. }
  203. #ifdef CONFIG_DEBUG_VM
  204. void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
  205. {
  206. pgd_t *pgd;
  207. pud_t *pud;
  208. pmd_t *pmd;
  209. if (mm == &init_mm)
  210. return;
  211. pgd = mm->pgd + pgd_index(addr);
  212. BUG_ON(pgd_none(*pgd));
  213. pud = pud_offset(pgd, addr);
  214. BUG_ON(pud_none(*pud));
  215. pmd = pmd_offset(pud, addr);
  216. BUG_ON(!pmd_present(*pmd));
  217. BUG_ON(!spin_is_locked(pte_lockptr(mm, pmd)));
  218. }
  219. #endif /* CONFIG_DEBUG_VM */