context.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * linux/arch/arm/mm/context.c
  3. *
  4. * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/percpu.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/thread_notify.h>
  17. #include <asm/tlbflush.h>
  18. static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
  19. unsigned int cpu_last_asid = ASID_FIRST_VERSION;
  20. #ifdef CONFIG_ARM_LPAE
  21. void cpu_set_reserved_ttbr0(void)
  22. {
  23. unsigned long ttbl = __pa(swapper_pg_dir);
  24. unsigned long ttbh = 0;
  25. /*
  26. * Set TTBR0 to swapper_pg_dir which contains only global entries. The
  27. * ASID is set to 0.
  28. */
  29. asm volatile(
  30. " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
  31. :
  32. : "r" (ttbl), "r" (ttbh));
  33. isb();
  34. }
  35. #else
  36. void cpu_set_reserved_ttbr0(void)
  37. {
  38. u32 ttb;
  39. /* Copy TTBR1 into TTBR0 */
  40. asm volatile(
  41. " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
  42. " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
  43. : "=r" (ttb));
  44. isb();
  45. }
  46. #endif
  47. #ifdef CONFIG_PID_IN_CONTEXTIDR
  48. static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
  49. void *t)
  50. {
  51. u32 contextidr;
  52. pid_t pid;
  53. struct thread_info *thread = t;
  54. if (cmd != THREAD_NOTIFY_SWITCH)
  55. return NOTIFY_DONE;
  56. pid = task_pid_nr(thread->task) << ASID_BITS;
  57. asm volatile(
  58. " mrc p15, 0, %0, c13, c0, 1\n"
  59. " and %0, %0, %2\n"
  60. " orr %0, %0, %1\n"
  61. " mcr p15, 0, %0, c13, c0, 1\n"
  62. : "=r" (contextidr), "+r" (pid)
  63. : "I" (~ASID_MASK));
  64. isb();
  65. return NOTIFY_OK;
  66. }
  67. static struct notifier_block contextidr_notifier_block = {
  68. .notifier_call = contextidr_notifier,
  69. };
  70. static int __init contextidr_notifier_init(void)
  71. {
  72. return thread_register_notifier(&contextidr_notifier_block);
  73. }
  74. arch_initcall(contextidr_notifier_init);
  75. #endif
  76. /*
  77. * We fork()ed a process, and we need a new context for the child
  78. * to run in.
  79. */
  80. void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  81. {
  82. mm->context.id = 0;
  83. raw_spin_lock_init(&mm->context.id_lock);
  84. }
  85. static void flush_context(void)
  86. {
  87. cpu_set_reserved_ttbr0();
  88. local_flush_tlb_all();
  89. if (icache_is_vivt_asid_tagged()) {
  90. __flush_icache_all();
  91. dsb();
  92. }
  93. }
  94. #ifdef CONFIG_SMP
  95. static void set_mm_context(struct mm_struct *mm, unsigned int asid)
  96. {
  97. unsigned long flags;
  98. /*
  99. * Locking needed for multi-threaded applications where the
  100. * same mm->context.id could be set from different CPUs during
  101. * the broadcast. This function is also called via IPI so the
  102. * mm->context.id_lock has to be IRQ-safe.
  103. */
  104. raw_spin_lock_irqsave(&mm->context.id_lock, flags);
  105. if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
  106. /*
  107. * Old version of ASID found. Set the new one and
  108. * reset mm_cpumask(mm).
  109. */
  110. mm->context.id = asid;
  111. cpumask_clear(mm_cpumask(mm));
  112. }
  113. raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
  114. /*
  115. * Set the mm_cpumask(mm) bit for the current CPU.
  116. */
  117. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  118. }
  119. /*
  120. * Reset the ASID on the current CPU. This function call is broadcast
  121. * from the CPU handling the ASID rollover and holding cpu_asid_lock.
  122. */
  123. static void reset_context(void *info)
  124. {
  125. unsigned int asid;
  126. unsigned int cpu = smp_processor_id();
  127. struct mm_struct *mm = current->active_mm;
  128. smp_rmb();
  129. asid = cpu_last_asid + cpu + 1;
  130. flush_context();
  131. set_mm_context(mm, asid);
  132. /* set the new ASID */
  133. cpu_switch_mm(mm->pgd, mm);
  134. }
  135. #else
  136. static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
  137. {
  138. mm->context.id = asid;
  139. cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
  140. }
  141. #endif
  142. void __new_context(struct mm_struct *mm)
  143. {
  144. unsigned int asid;
  145. raw_spin_lock(&cpu_asid_lock);
  146. #ifdef CONFIG_SMP
  147. /*
  148. * Check the ASID again, in case the change was broadcast from
  149. * another CPU before we acquired the lock.
  150. */
  151. if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
  152. cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
  153. raw_spin_unlock(&cpu_asid_lock);
  154. return;
  155. }
  156. #endif
  157. /*
  158. * At this point, it is guaranteed that the current mm (with
  159. * an old ASID) isn't active on any other CPU since the ASIDs
  160. * are changed simultaneously via IPI.
  161. */
  162. asid = ++cpu_last_asid;
  163. if (asid == 0)
  164. asid = cpu_last_asid = ASID_FIRST_VERSION;
  165. /*
  166. * If we've used up all our ASIDs, we need
  167. * to start a new version and flush the TLB.
  168. */
  169. if (unlikely((asid & ~ASID_MASK) == 0)) {
  170. asid = cpu_last_asid + smp_processor_id() + 1;
  171. flush_context();
  172. #ifdef CONFIG_SMP
  173. smp_wmb();
  174. smp_call_function(reset_context, NULL, 1);
  175. #endif
  176. cpu_last_asid += NR_CPUS;
  177. }
  178. set_mm_context(mm, asid);
  179. raw_spin_unlock(&cpu_asid_lock);
  180. }