mmu_notifier.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * linux/mm/mmu_notifier.c
  3. *
  4. * Copyright (C) 2008 Qumranet, Inc.
  5. * Copyright (C) 2008 SGI
  6. * Christoph Lameter <clameter@sgi.com>
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2. See
  9. * the COPYING file in the top-level directory.
  10. */
  11. #include <linux/rculist.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/export.h>
  14. #include <linux/mm.h>
  15. #include <linux/err.h>
  16. #include <linux/srcu.h>
  17. #include <linux/rcupdate.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. /* global SRCU for all MMs */
  21. static struct srcu_struct srcu;
  22. /*
  23. * This function can't run concurrently against mmu_notifier_register
  24. * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
  25. * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
  26. * in parallel despite there being no task using this mm any more,
  27. * through the vmas outside of the exit_mmap context, such as with
  28. * vmtruncate. This serializes against mmu_notifier_unregister with
  29. * the mmu_notifier_mm->lock in addition to SRCU and it serializes
  30. * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
  31. * can't go away from under us as exit_mmap holds an mm_count pin
  32. * itself.
  33. */
  34. void __mmu_notifier_release(struct mm_struct *mm)
  35. {
  36. struct mmu_notifier *mn;
  37. int id;
  38. /*
  39. * srcu_read_lock() here will block synchronize_srcu() in
  40. * mmu_notifier_unregister() until all registered
  41. * ->release() callouts this function makes have
  42. * returned.
  43. */
  44. id = srcu_read_lock(&srcu);
  45. spin_lock(&mm->mmu_notifier_mm->lock);
  46. while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
  47. mn = hlist_entry(mm->mmu_notifier_mm->list.first,
  48. struct mmu_notifier,
  49. hlist);
  50. /*
  51. * Unlink. This will prevent mmu_notifier_unregister()
  52. * from also making the ->release() callout.
  53. */
  54. hlist_del_init_rcu(&mn->hlist);
  55. spin_unlock(&mm->mmu_notifier_mm->lock);
  56. /*
  57. * Clear sptes. (see 'release' description in mmu_notifier.h)
  58. */
  59. if (mn->ops->release)
  60. mn->ops->release(mn, mm);
  61. spin_lock(&mm->mmu_notifier_mm->lock);
  62. }
  63. spin_unlock(&mm->mmu_notifier_mm->lock);
  64. /*
  65. * All callouts to ->release() which we have done are complete.
  66. * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
  67. */
  68. srcu_read_unlock(&srcu, id);
  69. /*
  70. * mmu_notifier_unregister() may have unlinked a notifier and may
  71. * still be calling out to it. Additionally, other notifiers
  72. * may have been active via vmtruncate() et. al. Block here
  73. * to ensure that all notifier callouts for this mm have been
  74. * completed and the sptes are really cleaned up before returning
  75. * to exit_mmap().
  76. */
  77. synchronize_srcu(&srcu);
  78. }
  79. /*
  80. * If no young bitflag is supported by the hardware, ->clear_flush_young can
  81. * unmap the address and return 1 or 0 depending if the mapping previously
  82. * existed or not.
  83. */
  84. int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
  85. unsigned long address)
  86. {
  87. struct mmu_notifier *mn;
  88. int young = 0, id;
  89. id = srcu_read_lock(&srcu);
  90. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  91. if (mn->ops->clear_flush_young)
  92. young |= mn->ops->clear_flush_young(mn, mm, address);
  93. }
  94. srcu_read_unlock(&srcu, id);
  95. return young;
  96. }
  97. int __mmu_notifier_test_young(struct mm_struct *mm,
  98. unsigned long address)
  99. {
  100. struct mmu_notifier *mn;
  101. int young = 0, id;
  102. id = srcu_read_lock(&srcu);
  103. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  104. if (mn->ops->test_young) {
  105. young = mn->ops->test_young(mn, mm, address);
  106. if (young)
  107. break;
  108. }
  109. }
  110. srcu_read_unlock(&srcu, id);
  111. return young;
  112. }
  113. void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
  114. pte_t pte)
  115. {
  116. struct mmu_notifier *mn;
  117. int id;
  118. id = srcu_read_lock(&srcu);
  119. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  120. if (mn->ops->change_pte)
  121. mn->ops->change_pte(mn, mm, address, pte);
  122. }
  123. srcu_read_unlock(&srcu, id);
  124. }
  125. void __mmu_notifier_invalidate_page(struct mm_struct *mm,
  126. unsigned long address)
  127. {
  128. struct mmu_notifier *mn;
  129. int id;
  130. id = srcu_read_lock(&srcu);
  131. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  132. if (mn->ops->invalidate_page)
  133. mn->ops->invalidate_page(mn, mm, address);
  134. }
  135. srcu_read_unlock(&srcu, id);
  136. }
  137. void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
  138. unsigned long start, unsigned long end)
  139. {
  140. struct mmu_notifier *mn;
  141. int id;
  142. id = srcu_read_lock(&srcu);
  143. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  144. if (mn->ops->invalidate_range_start)
  145. mn->ops->invalidate_range_start(mn, mm, start, end);
  146. }
  147. srcu_read_unlock(&srcu, id);
  148. }
  149. EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
  150. void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
  151. unsigned long start, unsigned long end)
  152. {
  153. struct mmu_notifier *mn;
  154. int id;
  155. id = srcu_read_lock(&srcu);
  156. hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
  157. if (mn->ops->invalidate_range_end)
  158. mn->ops->invalidate_range_end(mn, mm, start, end);
  159. }
  160. srcu_read_unlock(&srcu, id);
  161. }
  162. EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
  163. static int do_mmu_notifier_register(struct mmu_notifier *mn,
  164. struct mm_struct *mm,
  165. int take_mmap_sem)
  166. {
  167. struct mmu_notifier_mm *mmu_notifier_mm;
  168. int ret;
  169. BUG_ON(atomic_read(&mm->mm_users) <= 0);
  170. /*
  171. * Verify that mmu_notifier_init() already run and the global srcu is
  172. * initialized.
  173. */
  174. BUG_ON(!srcu.per_cpu_ref);
  175. ret = -ENOMEM;
  176. mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
  177. if (unlikely(!mmu_notifier_mm))
  178. goto out;
  179. if (take_mmap_sem)
  180. down_write(&mm->mmap_sem);
  181. ret = mm_take_all_locks(mm);
  182. if (unlikely(ret))
  183. goto out_clean;
  184. if (!mm_has_notifiers(mm)) {
  185. INIT_HLIST_HEAD(&mmu_notifier_mm->list);
  186. spin_lock_init(&mmu_notifier_mm->lock);
  187. mm->mmu_notifier_mm = mmu_notifier_mm;
  188. mmu_notifier_mm = NULL;
  189. }
  190. atomic_inc(&mm->mm_count);
  191. /*
  192. * Serialize the update against mmu_notifier_unregister. A
  193. * side note: mmu_notifier_release can't run concurrently with
  194. * us because we hold the mm_users pin (either implicitly as
  195. * current->mm or explicitly with get_task_mm() or similar).
  196. * We can't race against any other mmu notifier method either
  197. * thanks to mm_take_all_locks().
  198. */
  199. spin_lock(&mm->mmu_notifier_mm->lock);
  200. hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
  201. spin_unlock(&mm->mmu_notifier_mm->lock);
  202. mm_drop_all_locks(mm);
  203. out_clean:
  204. if (take_mmap_sem)
  205. up_write(&mm->mmap_sem);
  206. kfree(mmu_notifier_mm);
  207. out:
  208. BUG_ON(atomic_read(&mm->mm_users) <= 0);
  209. return ret;
  210. }
  211. /*
  212. * Must not hold mmap_sem nor any other VM related lock when calling
  213. * this registration function. Must also ensure mm_users can't go down
  214. * to zero while this runs to avoid races with mmu_notifier_release,
  215. * so mm has to be current->mm or the mm should be pinned safely such
  216. * as with get_task_mm(). If the mm is not current->mm, the mm_users
  217. * pin should be released by calling mmput after mmu_notifier_register
  218. * returns. mmu_notifier_unregister must be always called to
  219. * unregister the notifier. mm_count is automatically pinned to allow
  220. * mmu_notifier_unregister to safely run at any time later, before or
  221. * after exit_mmap. ->release will always be called before exit_mmap
  222. * frees the pages.
  223. */
  224. int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
  225. {
  226. return do_mmu_notifier_register(mn, mm, 1);
  227. }
  228. EXPORT_SYMBOL_GPL(mmu_notifier_register);
  229. /*
  230. * Same as mmu_notifier_register but here the caller must hold the
  231. * mmap_sem in write mode.
  232. */
  233. int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
  234. {
  235. return do_mmu_notifier_register(mn, mm, 0);
  236. }
  237. EXPORT_SYMBOL_GPL(__mmu_notifier_register);
  238. /* this is called after the last mmu_notifier_unregister() returned */
  239. void __mmu_notifier_mm_destroy(struct mm_struct *mm)
  240. {
  241. BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
  242. kfree(mm->mmu_notifier_mm);
  243. mm->mmu_notifier_mm = LIST_POISON1; /* debug */
  244. }
  245. /*
  246. * This releases the mm_count pin automatically and frees the mm
  247. * structure if it was the last user of it. It serializes against
  248. * running mmu notifiers with SRCU and against mmu_notifier_unregister
  249. * with the unregister lock + SRCU. All sptes must be dropped before
  250. * calling mmu_notifier_unregister. ->release or any other notifier
  251. * method may be invoked concurrently with mmu_notifier_unregister,
  252. * and only after mmu_notifier_unregister returned we're guaranteed
  253. * that ->release or any other method can't run anymore.
  254. */
  255. void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
  256. {
  257. BUG_ON(atomic_read(&mm->mm_count) <= 0);
  258. spin_lock(&mm->mmu_notifier_mm->lock);
  259. if (!hlist_unhashed(&mn->hlist)) {
  260. int id;
  261. /*
  262. * Ensure we synchronize up with __mmu_notifier_release().
  263. */
  264. id = srcu_read_lock(&srcu);
  265. hlist_del_rcu(&mn->hlist);
  266. spin_unlock(&mm->mmu_notifier_mm->lock);
  267. if (mn->ops->release)
  268. mn->ops->release(mn, mm);
  269. /*
  270. * Allow __mmu_notifier_release() to complete.
  271. */
  272. srcu_read_unlock(&srcu, id);
  273. } else
  274. spin_unlock(&mm->mmu_notifier_mm->lock);
  275. /*
  276. * Wait for any running method to finish, including ->release() if it
  277. * was run by __mmu_notifier_release() instead of us.
  278. */
  279. synchronize_srcu(&srcu);
  280. BUG_ON(atomic_read(&mm->mm_count) <= 0);
  281. mmdrop(mm);
  282. }
  283. EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
  284. static int __init mmu_notifier_init(void)
  285. {
  286. return init_srcu_struct(&srcu);
  287. }
  288. module_init(mmu_notifier_init);