tlb_32.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. #include <linux/spinlock.h>
  2. #include <linux/cpu.h>
  3. #include <linux/interrupt.h>
  4. #include <asm/tlbflush.h>
  5. DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
  6. ____cacheline_aligned = { &init_mm, 0, };
  7. /* must come after the send_IPI functions above for inlining */
  8. #include <mach_ipi.h>
  9. /*
  10. * Smarter SMP flushing macros.
  11. * c/o Linus Torvalds.
  12. *
  13. * These mean you can really definitely utterly forget about
  14. * writing to user space from interrupts. (Its not allowed anyway).
  15. *
  16. * Optimizations Manfred Spraul <manfred@colorfullife.com>
  17. */
  18. static cpumask_var_t flush_cpumask;
  19. static struct mm_struct *flush_mm;
  20. static unsigned long flush_va;
  21. static DEFINE_SPINLOCK(tlbstate_lock);
  22. /*
  23. * We cannot call mmdrop() because we are in interrupt context,
  24. * instead update mm->cpu_vm_mask.
  25. *
  26. * We need to reload %cr3 since the page tables may be going
  27. * away from under us..
  28. */
  29. void leave_mm(int cpu)
  30. {
  31. BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
  32. cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
  33. load_cr3(swapper_pg_dir);
  34. }
  35. EXPORT_SYMBOL_GPL(leave_mm);
  36. /*
  37. *
  38. * The flush IPI assumes that a thread switch happens in this order:
  39. * [cpu0: the cpu that switches]
  40. * 1) switch_mm() either 1a) or 1b)
  41. * 1a) thread switch to a different mm
  42. * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  43. * Stop ipi delivery for the old mm. This is not synchronized with
  44. * the other cpus, but smp_invalidate_interrupt ignore flush ipis
  45. * for the wrong mm, and in the worst case we perform a superfluous
  46. * tlb flush.
  47. * 1a2) set cpu_tlbstate to TLBSTATE_OK
  48. * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
  49. * was in lazy tlb mode.
  50. * 1a3) update cpu_tlbstate[].active_mm
  51. * Now cpu0 accepts tlb flushes for the new mm.
  52. * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  53. * Now the other cpus will send tlb flush ipis.
  54. * 1a4) change cr3.
  55. * 1b) thread switch without mm change
  56. * cpu_tlbstate[].active_mm is correct, cpu0 already handles
  57. * flush ipis.
  58. * 1b1) set cpu_tlbstate to TLBSTATE_OK
  59. * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  60. * Atomically set the bit [other cpus will start sending flush ipis],
  61. * and test the bit.
  62. * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
  63. * 2) switch %%esp, ie current
  64. *
  65. * The interrupt must handle 2 special cases:
  66. * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
  67. * - the cpu performs speculative tlb reads, i.e. even if the cpu only
  68. * runs in kernel space, the cpu could load tlb entries for user space
  69. * pages.
  70. *
  71. * The good news is that cpu_tlbstate is local to each cpu, no
  72. * write/read ordering problems.
  73. */
  74. /*
  75. * TLB flush IPI:
  76. *
  77. * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
  78. * 2) Leave the mm if we are in the lazy tlb mode.
  79. */
  80. void smp_invalidate_interrupt(struct pt_regs *regs)
  81. {
  82. unsigned long cpu;
  83. cpu = get_cpu();
  84. if (!cpumask_test_cpu(cpu, flush_cpumask))
  85. goto out;
  86. /*
  87. * This was a BUG() but until someone can quote me the
  88. * line from the intel manual that guarantees an IPI to
  89. * multiple CPUs is retried _only_ on the erroring CPUs
  90. * its staying as a return
  91. *
  92. * BUG();
  93. */
  94. if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
  95. if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
  96. if (flush_va == TLB_FLUSH_ALL)
  97. local_flush_tlb();
  98. else
  99. __flush_tlb_one(flush_va);
  100. } else
  101. leave_mm(cpu);
  102. }
  103. ack_APIC_irq();
  104. smp_mb__before_clear_bit();
  105. cpumask_clear_cpu(cpu, flush_cpumask);
  106. smp_mb__after_clear_bit();
  107. out:
  108. put_cpu_no_resched();
  109. inc_irq_stat(irq_tlb_count);
  110. }
  111. void native_flush_tlb_others(const struct cpumask *cpumask,
  112. struct mm_struct *mm, unsigned long va)
  113. {
  114. /*
  115. * - mask must exist :)
  116. */
  117. BUG_ON(cpumask_empty(cpumask));
  118. BUG_ON(!mm);
  119. /*
  120. * i'm not happy about this global shared spinlock in the
  121. * MM hot path, but we'll see how contended it is.
  122. * AK: x86-64 has a faster method that could be ported.
  123. */
  124. spin_lock(&tlbstate_lock);
  125. cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id()));
  126. #ifdef CONFIG_HOTPLUG_CPU
  127. /* If a CPU which we ran on has gone down, OK. */
  128. cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask);
  129. if (unlikely(cpumask_empty(flush_cpumask))) {
  130. spin_unlock(&tlbstate_lock);
  131. return;
  132. }
  133. #endif
  134. flush_mm = mm;
  135. flush_va = va;
  136. /*
  137. * Make the above memory operations globally visible before
  138. * sending the IPI.
  139. */
  140. smp_mb();
  141. /*
  142. * We have to send the IPI only to
  143. * CPUs affected.
  144. */
  145. send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
  146. while (!cpumask_empty(flush_cpumask))
  147. /* nothing. lockup detection does not belong here */
  148. cpu_relax();
  149. flush_mm = NULL;
  150. flush_va = 0;
  151. spin_unlock(&tlbstate_lock);
  152. }
  153. void flush_tlb_current_task(void)
  154. {
  155. struct mm_struct *mm = current->mm;
  156. preempt_disable();
  157. local_flush_tlb();
  158. if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
  159. flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
  160. preempt_enable();
  161. }
  162. void flush_tlb_mm(struct mm_struct *mm)
  163. {
  164. preempt_disable();
  165. if (current->active_mm == mm) {
  166. if (current->mm)
  167. local_flush_tlb();
  168. else
  169. leave_mm(smp_processor_id());
  170. }
  171. if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
  172. flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
  173. preempt_enable();
  174. }
  175. void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  176. {
  177. struct mm_struct *mm = vma->vm_mm;
  178. preempt_disable();
  179. if (current->active_mm == mm) {
  180. if (current->mm)
  181. __flush_tlb_one(va);
  182. else
  183. leave_mm(smp_processor_id());
  184. }
  185. if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
  186. flush_tlb_others(&mm->cpu_vm_mask, mm, va);
  187. preempt_enable();
  188. }
  189. EXPORT_SYMBOL(flush_tlb_page);
  190. static void do_flush_tlb_all(void *info)
  191. {
  192. unsigned long cpu = smp_processor_id();
  193. __flush_tlb_all();
  194. if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
  195. leave_mm(cpu);
  196. }
  197. void flush_tlb_all(void)
  198. {
  199. on_each_cpu(do_flush_tlb_all, NULL, 1);
  200. }
  201. void reset_lazy_tlbstate(void)
  202. {
  203. int cpu = raw_smp_processor_id();
  204. per_cpu(cpu_tlbstate, cpu).state = 0;
  205. per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
  206. }
  207. static int init_flush_cpumask(void)
  208. {
  209. alloc_cpumask_var(&flush_cpumask, GFP_KERNEL);
  210. return 0;
  211. }
  212. early_initcall(init_flush_cpumask);