tlb_nohash.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * This file contains the routines for TLB flushing.
  3. * On machines where the MMU does not use a hash table to store virtual to
  4. * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
  5. * this does -not- include 603 however which shares the implementation with
  6. * hash based processors)
  7. *
  8. * -- BenH
  9. *
  10. * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
  11. * IBM Corp.
  12. *
  13. * Derived from arch/ppc/mm/init.c:
  14. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  15. *
  16. * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  17. * and Cort Dougan (PReP) (cort@cs.nmt.edu)
  18. * Copyright (C) 1996 Paul Mackerras
  19. *
  20. * Derived from "arch/i386/mm/init.c"
  21. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version
  26. * 2 of the License, or (at your option) any later version.
  27. *
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/mm.h>
  31. #include <linux/init.h>
  32. #include <linux/highmem.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/preempt.h>
  35. #include <linux/spinlock.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/tlb.h>
  38. #include "mmu_decl.h"
  39. /*
  40. * Base TLB flushing operations:
  41. *
  42. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  43. * - flush_tlb_page(vma, vmaddr) flushes one page
  44. * - flush_tlb_range(vma, start, end) flushes a range of pages
  45. * - flush_tlb_kernel_range(start, end) flushes kernel pages
  46. *
  47. * - local_* variants of page and mm only apply to the current
  48. * processor
  49. */
  50. /*
  51. * These are the base non-SMP variants of page and mm flushing
  52. */
  53. void local_flush_tlb_mm(struct mm_struct *mm)
  54. {
  55. unsigned int pid;
  56. preempt_disable();
  57. pid = mm->context.id;
  58. if (pid != MMU_NO_CONTEXT)
  59. _tlbil_pid(pid);
  60. preempt_enable();
  61. }
  62. EXPORT_SYMBOL(local_flush_tlb_mm);
  63. void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
  64. int tsize, int ind)
  65. {
  66. unsigned int pid;
  67. preempt_disable();
  68. pid = mm ? mm->context.id : 0;
  69. if (pid != MMU_NO_CONTEXT)
  70. _tlbil_va(vmaddr, pid, tsize, ind);
  71. preempt_enable();
  72. }
  73. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  74. {
  75. __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
  76. 0 /* tsize unused for now */, 0);
  77. }
  78. EXPORT_SYMBOL(local_flush_tlb_page);
  79. /*
  80. * And here are the SMP non-local implementations
  81. */
  82. #ifdef CONFIG_SMP
  83. static DEFINE_SPINLOCK(tlbivax_lock);
  84. static int mm_is_core_local(struct mm_struct *mm)
  85. {
  86. return cpumask_subset(mm_cpumask(mm),
  87. topology_thread_cpumask(smp_processor_id()));
  88. }
  89. struct tlb_flush_param {
  90. unsigned long addr;
  91. unsigned int pid;
  92. unsigned int tsize;
  93. unsigned int ind;
  94. };
  95. static void do_flush_tlb_mm_ipi(void *param)
  96. {
  97. struct tlb_flush_param *p = param;
  98. _tlbil_pid(p ? p->pid : 0);
  99. }
  100. static void do_flush_tlb_page_ipi(void *param)
  101. {
  102. struct tlb_flush_param *p = param;
  103. _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
  104. }
  105. /* Note on invalidations and PID:
  106. *
  107. * We snapshot the PID with preempt disabled. At this point, it can still
  108. * change either because:
  109. * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
  110. * - we are invaliating some target that isn't currently running here
  111. * and is concurrently acquiring a new PID on another CPU
  112. * - some other CPU is re-acquiring a lost PID for this mm
  113. * etc...
  114. *
  115. * However, this shouldn't be a problem as we only guarantee
  116. * invalidation of TLB entries present prior to this call, so we
  117. * don't care about the PID changing, and invalidating a stale PID
  118. * is generally harmless.
  119. */
  120. void flush_tlb_mm(struct mm_struct *mm)
  121. {
  122. unsigned int pid;
  123. preempt_disable();
  124. pid = mm->context.id;
  125. if (unlikely(pid == MMU_NO_CONTEXT))
  126. goto no_context;
  127. if (!mm_is_core_local(mm)) {
  128. struct tlb_flush_param p = { .pid = pid };
  129. /* Ignores smp_processor_id() even if set. */
  130. smp_call_function_many(mm_cpumask(mm),
  131. do_flush_tlb_mm_ipi, &p, 1);
  132. }
  133. _tlbil_pid(pid);
  134. no_context:
  135. preempt_enable();
  136. }
  137. EXPORT_SYMBOL(flush_tlb_mm);
  138. void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
  139. int tsize, int ind)
  140. {
  141. struct cpumask *cpu_mask;
  142. unsigned int pid;
  143. preempt_disable();
  144. pid = mm ? mm->context.id : 0;
  145. if (unlikely(pid == MMU_NO_CONTEXT))
  146. goto bail;
  147. cpu_mask = mm_cpumask(mm);
  148. if (!mm_is_core_local(mm)) {
  149. /* If broadcast tlbivax is supported, use it */
  150. if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
  151. int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
  152. if (lock)
  153. spin_lock(&tlbivax_lock);
  154. _tlbivax_bcast(vmaddr, pid, tsize, ind);
  155. if (lock)
  156. spin_unlock(&tlbivax_lock);
  157. goto bail;
  158. } else {
  159. struct tlb_flush_param p = {
  160. .pid = pid,
  161. .addr = vmaddr,
  162. .tsize = tsize,
  163. .ind = ind,
  164. };
  165. /* Ignores smp_processor_id() even if set in cpu_mask */
  166. smp_call_function_many(cpu_mask,
  167. do_flush_tlb_page_ipi, &p, 1);
  168. }
  169. }
  170. _tlbil_va(vmaddr, pid, tsize, ind);
  171. bail:
  172. preempt_enable();
  173. }
  174. void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  175. {
  176. __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
  177. 0 /* tsize unused for now */, 0);
  178. }
  179. EXPORT_SYMBOL(flush_tlb_page);
  180. #endif /* CONFIG_SMP */
  181. /*
  182. * Flush kernel TLB entries in the given range
  183. */
  184. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  185. {
  186. #ifdef CONFIG_SMP
  187. preempt_disable();
  188. smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
  189. _tlbil_pid(0);
  190. preempt_enable();
  191. #else
  192. _tlbil_pid(0);
  193. #endif
  194. }
  195. EXPORT_SYMBOL(flush_tlb_kernel_range);
  196. /*
  197. * Currently, for range flushing, we just do a full mm flush. This should
  198. * be optimized based on a threshold on the size of the range, since
  199. * some implementation can stack multiple tlbivax before a tlbsync but
  200. * for now, we keep it that way
  201. */
  202. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  203. unsigned long end)
  204. {
  205. flush_tlb_mm(vma->vm_mm);
  206. }
  207. EXPORT_SYMBOL(flush_tlb_range);