tlb_hash64.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * This file contains the routines for flushing entries from the
  3. * TLB and MMU hash table.
  4. *
  5. * Derived from arch/ppc64/mm/init.c:
  6. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  7. *
  8. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  9. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  10. * Copyright (C) 1996 Paul Mackerras
  11. *
  12. * Derived from "arch/i386/mm/init.c"
  13. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  14. *
  15. * Dave Engebretsen <engebret@us.ibm.com>
  16. * Rework for PPC64 port.
  17. *
  18. * This program is free software; you can redistribute it and/or
  19. * modify it under the terms of the GNU General Public License
  20. * as published by the Free Software Foundation; either version
  21. * 2 of the License, or (at your option) any later version.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/mm.h>
  25. #include <linux/init.h>
  26. #include <linux/percpu.h>
  27. #include <linux/hardirq.h>
  28. #include <asm/pgalloc.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/tlb.h>
  31. #include <asm/bug.h>
  32. DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  33. /*
  34. * A linux PTE was changed and the corresponding hash table entry
  35. * neesd to be flushed. This function will either perform the flush
  36. * immediately or will batch it up if the current CPU has an active
  37. * batch on it.
  38. *
  39. * Must be called from within some kind of spinlock/non-preempt region...
  40. */
  41. void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
  42. pte_t *ptep, unsigned long pte, int huge)
  43. {
  44. struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
  45. unsigned long vsid, vaddr;
  46. unsigned int psize;
  47. int ssize;
  48. real_pte_t rpte;
  49. int i;
  50. i = batch->index;
  51. /* We mask the address for the base page size. Huge pages will
  52. * have applied their own masking already
  53. */
  54. addr &= PAGE_MASK;
  55. /* Get page size (maybe move back to caller).
  56. *
  57. * NOTE: when using special 64K mappings in 4K environment like
  58. * for SPEs, we obtain the page size from the slice, which thus
  59. * must still exist (and thus the VMA not reused) at the time
  60. * of this call
  61. */
  62. if (huge) {
  63. #ifdef CONFIG_HUGETLB_PAGE
  64. psize = get_slice_psize(mm, addr);
  65. #else
  66. BUG();
  67. psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
  68. #endif
  69. } else
  70. psize = pte_pagesize_index(mm, addr, pte);
  71. /* Build full vaddr */
  72. if (!is_kernel_addr(addr)) {
  73. ssize = user_segment_size(addr);
  74. vsid = get_vsid(mm->context.id, addr, ssize);
  75. WARN_ON(vsid == 0);
  76. } else {
  77. vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
  78. ssize = mmu_kernel_ssize;
  79. }
  80. vaddr = hpt_va(addr, vsid, ssize);
  81. rpte = __real_pte(__pte(pte), ptep);
  82. /*
  83. * Check if we have an active batch on this CPU. If not, just
  84. * flush now and return. For now, we don global invalidates
  85. * in that case, might be worth testing the mm cpu mask though
  86. * and decide to use local invalidates instead...
  87. */
  88. if (!batch->active) {
  89. flush_hash_page(vaddr, rpte, psize, ssize, 0);
  90. return;
  91. }
  92. /*
  93. * This can happen when we are in the middle of a TLB batch and
  94. * we encounter memory pressure (eg copy_page_range when it tries
  95. * to allocate a new pte). If we have to reclaim memory and end
  96. * up scanning and resetting referenced bits then our batch context
  97. * will change mid stream.
  98. *
  99. * We also need to ensure only one page size is present in a given
  100. * batch
  101. */
  102. if (i != 0 && (mm != batch->mm || batch->psize != psize ||
  103. batch->ssize != ssize)) {
  104. __flush_tlb_pending(batch);
  105. i = 0;
  106. }
  107. if (i == 0) {
  108. batch->mm = mm;
  109. batch->psize = psize;
  110. batch->ssize = ssize;
  111. }
  112. batch->pte[i] = rpte;
  113. batch->vaddr[i] = vaddr;
  114. batch->index = ++i;
  115. if (i >= PPC64_TLB_BATCH_NR)
  116. __flush_tlb_pending(batch);
  117. }
  118. /*
  119. * This function is called when terminating an mmu batch or when a batch
  120. * is full. It will perform the flush of all the entries currently stored
  121. * in a batch.
  122. *
  123. * Must be called from within some kind of spinlock/non-preempt region...
  124. */
  125. void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
  126. {
  127. const struct cpumask *tmp;
  128. int i, local = 0;
  129. i = batch->index;
  130. tmp = cpumask_of(smp_processor_id());
  131. if (cpumask_equal(mm_cpumask(batch->mm), tmp))
  132. local = 1;
  133. if (i == 1)
  134. flush_hash_page(batch->vaddr[0], batch->pte[0],
  135. batch->psize, batch->ssize, local);
  136. else
  137. flush_hash_range(i, local);
  138. batch->index = 0;
  139. }
  140. void tlb_flush(struct mmu_gather *tlb)
  141. {
  142. struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
  143. /* If there's a TLB batch pending, then we must flush it because the
  144. * pages are going to be freed and we really don't want to have a CPU
  145. * access a freed page because it has a stale TLB
  146. */
  147. if (tlbbatch->index)
  148. __flush_tlb_pending(tlbbatch);
  149. /* Push out batch of freed page tables */
  150. pte_free_finish();
  151. }
  152. /**
  153. * __flush_hash_table_range - Flush all HPTEs for a given address range
  154. * from the hash table (and the TLB). But keeps
  155. * the linux PTEs intact.
  156. *
  157. * @mm : mm_struct of the target address space (generally init_mm)
  158. * @start : starting address
  159. * @end : ending address (not included in the flush)
  160. *
  161. * This function is mostly to be used by some IO hotplug code in order
  162. * to remove all hash entries from a given address range used to map IO
  163. * space on a removed PCI-PCI bidge without tearing down the full mapping
  164. * since 64K pages may overlap with other bridges when using 64K pages
  165. * with 4K HW pages on IO space.
  166. *
  167. * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
  168. * and is implemented for small size rather than speed.
  169. */
  170. #ifdef CONFIG_HOTPLUG
  171. void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
  172. unsigned long end)
  173. {
  174. unsigned long flags;
  175. start = _ALIGN_DOWN(start, PAGE_SIZE);
  176. end = _ALIGN_UP(end, PAGE_SIZE);
  177. BUG_ON(!mm->pgd);
  178. /* Note: Normally, we should only ever use a batch within a
  179. * PTE locked section. This violates the rule, but will work
  180. * since we don't actually modify the PTEs, we just flush the
  181. * hash while leaving the PTEs intact (including their reference
  182. * to being hashed). This is not the most performance oriented
  183. * way to do things but is fine for our needs here.
  184. */
  185. local_irq_save(flags);
  186. arch_enter_lazy_mmu_mode();
  187. for (; start < end; start += PAGE_SIZE) {
  188. pte_t *ptep = find_linux_pte(mm->pgd, start);
  189. unsigned long pte;
  190. if (ptep == NULL)
  191. continue;
  192. pte = pte_val(*ptep);
  193. if (!(pte & _PAGE_HASHPTE))
  194. continue;
  195. hpte_need_flush(mm, start, ptep, pte, 0);
  196. }
  197. arch_leave_lazy_mmu_mode();
  198. local_irq_restore(flags);
  199. }
  200. #endif /* CONFIG_HOTPLUG */