rmap.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728
  1. /*
  2. * mm/rmap.c - physical to virtual reverse mappings
  3. *
  4. * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
  5. * Released under the General Public License (GPL).
  6. *
  7. * Simple, low overhead reverse mapping scheme.
  8. * Please try to keep this thing as modular as possible.
  9. *
  10. * Provides methods for unmapping each kind of mapped page:
  11. * the anon methods track anonymous pages, and
  12. * the file methods track pages belonging to an inode.
  13. *
  14. * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15. * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16. * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17. * Contributions by Hugh Dickins 2003, 2004
  18. */
  19. /*
  20. * Lock ordering in mm:
  21. *
  22. * inode->i_mutex (while writing or truncating, not reading or faulting)
  23. * inode->i_alloc_sem (vmtruncate_range)
  24. * mm->mmap_sem
  25. * page->flags PG_locked (lock_page)
  26. * mapping->i_mmap_mutex
  27. * anon_vma->mutex
  28. * mm->page_table_lock or pte_lock
  29. * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  30. * swap_lock (in swap_duplicate, swap_info_get)
  31. * mmlist_lock (in mmput, drain_mmlist and others)
  32. * mapping->private_lock (in __set_page_dirty_buffers)
  33. * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  34. * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
  35. * sb_lock (within inode_lock in fs/fs-writeback.c)
  36. * mapping->tree_lock (widely used, in set_page_dirty,
  37. * in arch-dependent flush_dcache_mmap_lock,
  38. * within inode_wb_list_lock in __sync_single_inode)
  39. *
  40. * (code doesn't rely on that order so it could be switched around)
  41. * ->tasklist_lock
  42. * anon_vma->mutex (memory_failure, collect_procs_anon)
  43. * pte map lock
  44. */
  45. #include <linux/mm.h>
  46. #include <linux/pagemap.h>
  47. #include <linux/swap.h>
  48. #include <linux/swapops.h>
  49. #include <linux/slab.h>
  50. #include <linux/init.h>
  51. #include <linux/ksm.h>
  52. #include <linux/rmap.h>
  53. #include <linux/rcupdate.h>
  54. #include <linux/module.h>
  55. #include <linux/memcontrol.h>
  56. #include <linux/mmu_notifier.h>
  57. #include <linux/migrate.h>
  58. #include <linux/hugetlb.h>
  59. #include <asm/tlbflush.h>
  60. #include "internal.h"
  61. static struct kmem_cache *anon_vma_cachep;
  62. static struct kmem_cache *anon_vma_chain_cachep;
  63. static inline struct anon_vma *anon_vma_alloc(void)
  64. {
  65. struct anon_vma *anon_vma;
  66. anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  67. if (anon_vma) {
  68. atomic_set(&anon_vma->refcount, 1);
  69. /*
  70. * Initialise the anon_vma root to point to itself. If called
  71. * from fork, the root will be reset to the parents anon_vma.
  72. */
  73. anon_vma->root = anon_vma;
  74. }
  75. return anon_vma;
  76. }
  77. static inline void anon_vma_free(struct anon_vma *anon_vma)
  78. {
  79. VM_BUG_ON(atomic_read(&anon_vma->refcount));
  80. /*
  81. * Synchronize against page_lock_anon_vma() such that
  82. * we can safely hold the lock without the anon_vma getting
  83. * freed.
  84. *
  85. * Relies on the full mb implied by the atomic_dec_and_test() from
  86. * put_anon_vma() against the acquire barrier implied by
  87. * mutex_trylock() from page_lock_anon_vma(). This orders:
  88. *
  89. * page_lock_anon_vma() VS put_anon_vma()
  90. * mutex_trylock() atomic_dec_and_test()
  91. * LOCK MB
  92. * atomic_read() mutex_is_locked()
  93. *
  94. * LOCK should suffice since the actual taking of the lock must
  95. * happen _before_ what follows.
  96. */
  97. if (mutex_is_locked(&anon_vma->root->mutex)) {
  98. anon_vma_lock(anon_vma);
  99. anon_vma_unlock(anon_vma);
  100. }
  101. kmem_cache_free(anon_vma_cachep, anon_vma);
  102. }
  103. static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
  104. {
  105. return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
  106. }
  107. static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
  108. {
  109. kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
  110. }
  111. /**
  112. * anon_vma_prepare - attach an anon_vma to a memory region
  113. * @vma: the memory region in question
  114. *
  115. * This makes sure the memory mapping described by 'vma' has
  116. * an 'anon_vma' attached to it, so that we can associate the
  117. * anonymous pages mapped into it with that anon_vma.
  118. *
  119. * The common case will be that we already have one, but if
  120. * not we either need to find an adjacent mapping that we
  121. * can re-use the anon_vma from (very common when the only
  122. * reason for splitting a vma has been mprotect()), or we
  123. * allocate a new one.
  124. *
  125. * Anon-vma allocations are very subtle, because we may have
  126. * optimistically looked up an anon_vma in page_lock_anon_vma()
  127. * and that may actually touch the spinlock even in the newly
  128. * allocated vma (it depends on RCU to make sure that the
  129. * anon_vma isn't actually destroyed).
  130. *
  131. * As a result, we need to do proper anon_vma locking even
  132. * for the new allocation. At the same time, we do not want
  133. * to do any locking for the common case of already having
  134. * an anon_vma.
  135. *
  136. * This must be called with the mmap_sem held for reading.
  137. */
  138. int anon_vma_prepare(struct vm_area_struct *vma)
  139. {
  140. struct anon_vma *anon_vma = vma->anon_vma;
  141. struct anon_vma_chain *avc;
  142. might_sleep();
  143. if (unlikely(!anon_vma)) {
  144. struct mm_struct *mm = vma->vm_mm;
  145. struct anon_vma *allocated;
  146. avc = anon_vma_chain_alloc();
  147. if (!avc)
  148. goto out_enomem;
  149. anon_vma = find_mergeable_anon_vma(vma);
  150. allocated = NULL;
  151. if (!anon_vma) {
  152. anon_vma = anon_vma_alloc();
  153. if (unlikely(!anon_vma))
  154. goto out_enomem_free_avc;
  155. allocated = anon_vma;
  156. }
  157. anon_vma_lock(anon_vma);
  158. /* page_table_lock to protect against threads */
  159. spin_lock(&mm->page_table_lock);
  160. if (likely(!vma->anon_vma)) {
  161. vma->anon_vma = anon_vma;
  162. avc->anon_vma = anon_vma;
  163. avc->vma = vma;
  164. list_add(&avc->same_vma, &vma->anon_vma_chain);
  165. list_add_tail(&avc->same_anon_vma, &anon_vma->head);
  166. allocated = NULL;
  167. avc = NULL;
  168. }
  169. spin_unlock(&mm->page_table_lock);
  170. anon_vma_unlock(anon_vma);
  171. if (unlikely(allocated))
  172. put_anon_vma(allocated);
  173. if (unlikely(avc))
  174. anon_vma_chain_free(avc);
  175. }
  176. return 0;
  177. out_enomem_free_avc:
  178. anon_vma_chain_free(avc);
  179. out_enomem:
  180. return -ENOMEM;
  181. }
  182. static void anon_vma_chain_link(struct vm_area_struct *vma,
  183. struct anon_vma_chain *avc,
  184. struct anon_vma *anon_vma)
  185. {
  186. avc->vma = vma;
  187. avc->anon_vma = anon_vma;
  188. list_add(&avc->same_vma, &vma->anon_vma_chain);
  189. anon_vma_lock(anon_vma);
  190. /*
  191. * It's critical to add new vmas to the tail of the anon_vma,
  192. * see comment in huge_memory.c:__split_huge_page().
  193. */
  194. list_add_tail(&avc->same_anon_vma, &anon_vma->head);
  195. anon_vma_unlock(anon_vma);
  196. }
  197. /*
  198. * Attach the anon_vmas from src to dst.
  199. * Returns 0 on success, -ENOMEM on failure.
  200. */
  201. int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
  202. {
  203. struct anon_vma_chain *avc, *pavc;
  204. list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
  205. avc = anon_vma_chain_alloc();
  206. if (!avc)
  207. goto enomem_failure;
  208. anon_vma_chain_link(dst, avc, pavc->anon_vma);
  209. }
  210. return 0;
  211. enomem_failure:
  212. unlink_anon_vmas(dst);
  213. return -ENOMEM;
  214. }
  215. /*
  216. * Attach vma to its own anon_vma, as well as to the anon_vmas that
  217. * the corresponding VMA in the parent process is attached to.
  218. * Returns 0 on success, non-zero on failure.
  219. */
  220. int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
  221. {
  222. struct anon_vma_chain *avc;
  223. struct anon_vma *anon_vma;
  224. /* Don't bother if the parent process has no anon_vma here. */
  225. if (!pvma->anon_vma)
  226. return 0;
  227. /*
  228. * First, attach the new VMA to the parent VMA's anon_vmas,
  229. * so rmap can find non-COWed pages in child processes.
  230. */
  231. if (anon_vma_clone(vma, pvma))
  232. return -ENOMEM;
  233. /* Then add our own anon_vma. */
  234. anon_vma = anon_vma_alloc();
  235. if (!anon_vma)
  236. goto out_error;
  237. avc = anon_vma_chain_alloc();
  238. if (!avc)
  239. goto out_error_free_anon_vma;
  240. /*
  241. * The root anon_vma's spinlock is the lock actually used when we
  242. * lock any of the anon_vmas in this anon_vma tree.
  243. */
  244. anon_vma->root = pvma->anon_vma->root;
  245. /*
  246. * With refcounts, an anon_vma can stay around longer than the
  247. * process it belongs to. The root anon_vma needs to be pinned until
  248. * this anon_vma is freed, because the lock lives in the root.
  249. */
  250. get_anon_vma(anon_vma->root);
  251. /* Mark this anon_vma as the one where our new (COWed) pages go. */
  252. vma->anon_vma = anon_vma;
  253. anon_vma_chain_link(vma, avc, anon_vma);
  254. return 0;
  255. out_error_free_anon_vma:
  256. put_anon_vma(anon_vma);
  257. out_error:
  258. unlink_anon_vmas(vma);
  259. return -ENOMEM;
  260. }
  261. static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
  262. {
  263. struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
  264. int empty;
  265. /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
  266. if (!anon_vma)
  267. return;
  268. anon_vma_lock(anon_vma);
  269. list_del(&anon_vma_chain->same_anon_vma);
  270. /* We must garbage collect the anon_vma if it's empty */
  271. empty = list_empty(&anon_vma->head);
  272. anon_vma_unlock(anon_vma);
  273. if (empty)
  274. put_anon_vma(anon_vma);
  275. }
  276. void unlink_anon_vmas(struct vm_area_struct *vma)
  277. {
  278. struct anon_vma_chain *avc, *next;
  279. /*
  280. * Unlink each anon_vma chained to the VMA. This list is ordered
  281. * from newest to oldest, ensuring the root anon_vma gets freed last.
  282. */
  283. list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
  284. anon_vma_unlink(avc);
  285. list_del(&avc->same_vma);
  286. anon_vma_chain_free(avc);
  287. }
  288. }
  289. static void anon_vma_ctor(void *data)
  290. {
  291. struct anon_vma *anon_vma = data;
  292. mutex_init(&anon_vma->mutex);
  293. atomic_set(&anon_vma->refcount, 0);
  294. INIT_LIST_HEAD(&anon_vma->head);
  295. }
  296. void __init anon_vma_init(void)
  297. {
  298. anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
  299. 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
  300. anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
  301. }
  302. /*
  303. * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
  304. *
  305. * Since there is no serialization what so ever against page_remove_rmap()
  306. * the best this function can do is return a locked anon_vma that might
  307. * have been relevant to this page.
  308. *
  309. * The page might have been remapped to a different anon_vma or the anon_vma
  310. * returned may already be freed (and even reused).
  311. *
  312. * All users of this function must be very careful when walking the anon_vma
  313. * chain and verify that the page in question is indeed mapped in it
  314. * [ something equivalent to page_mapped_in_vma() ].
  315. *
  316. * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
  317. * that the anon_vma pointer from page->mapping is valid if there is a
  318. * mapcount, we can dereference the anon_vma after observing those.
  319. */
  320. struct anon_vma *page_get_anon_vma(struct page *page)
  321. {
  322. struct anon_vma *anon_vma = NULL;
  323. unsigned long anon_mapping;
  324. rcu_read_lock();
  325. anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
  326. if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  327. goto out;
  328. if (!page_mapped(page))
  329. goto out;
  330. anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
  331. if (!atomic_inc_not_zero(&anon_vma->refcount)) {
  332. anon_vma = NULL;
  333. goto out;
  334. }
  335. /*
  336. * If this page is still mapped, then its anon_vma cannot have been
  337. * freed. But if it has been unmapped, we have no security against the
  338. * anon_vma structure being freed and reused (for another anon_vma:
  339. * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
  340. * above cannot corrupt).
  341. */
  342. if (!page_mapped(page)) {
  343. put_anon_vma(anon_vma);
  344. anon_vma = NULL;
  345. }
  346. out:
  347. rcu_read_unlock();
  348. return anon_vma;
  349. }
  350. /*
  351. * Similar to page_get_anon_vma() except it locks the anon_vma.
  352. *
  353. * Its a little more complex as it tries to keep the fast path to a single
  354. * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
  355. * reference like with page_get_anon_vma() and then block on the mutex.
  356. */
  357. struct anon_vma *page_lock_anon_vma(struct page *page)
  358. {
  359. struct anon_vma *anon_vma = NULL;
  360. struct anon_vma *root_anon_vma;
  361. unsigned long anon_mapping;
  362. rcu_read_lock();
  363. anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
  364. if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
  365. goto out;
  366. if (!page_mapped(page))
  367. goto out;
  368. anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
  369. root_anon_vma = ACCESS_ONCE(anon_vma->root);
  370. if (mutex_trylock(&root_anon_vma->mutex)) {
  371. /*
  372. * If the page is still mapped, then this anon_vma is still
  373. * its anon_vma, and holding the mutex ensures that it will
  374. * not go away, see __put_anon_vma().
  375. */
  376. if (!page_mapped(page)) {
  377. mutex_unlock(&root_anon_vma->mutex);
  378. anon_vma = NULL;
  379. }
  380. goto out;
  381. }
  382. /* trylock failed, we got to sleep */
  383. if (!atomic_inc_not_zero(&anon_vma->refcount)) {
  384. anon_vma = NULL;
  385. goto out;
  386. }
  387. if (!page_mapped(page)) {
  388. put_anon_vma(anon_vma);
  389. anon_vma = NULL;
  390. goto out;
  391. }
  392. /* we pinned the anon_vma, its safe to sleep */
  393. rcu_read_unlock();
  394. anon_vma_lock(anon_vma);
  395. if (atomic_dec_and_test(&anon_vma->refcount)) {
  396. /*
  397. * Oops, we held the last refcount, release the lock
  398. * and bail -- can't simply use put_anon_vma() because
  399. * we'll deadlock on the anon_vma_lock() recursion.
  400. */
  401. anon_vma_unlock(anon_vma);
  402. __put_anon_vma(anon_vma);
  403. anon_vma = NULL;
  404. }
  405. return anon_vma;
  406. out:
  407. rcu_read_unlock();
  408. return anon_vma;
  409. }
  410. void page_unlock_anon_vma(struct anon_vma *anon_vma)
  411. {
  412. anon_vma_unlock(anon_vma);
  413. }
  414. /*
  415. * At what user virtual address is page expected in @vma?
  416. * Returns virtual address or -EFAULT if page's index/offset is not
  417. * within the range mapped the @vma.
  418. */
  419. inline unsigned long
  420. vma_address(struct page *page, struct vm_area_struct *vma)
  421. {
  422. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  423. unsigned long address;
  424. if (unlikely(is_vm_hugetlb_page(vma)))
  425. pgoff = page->index << huge_page_order(page_hstate(page));
  426. address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  427. if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
  428. /* page should be within @vma mapping range */
  429. return -EFAULT;
  430. }
  431. return address;
  432. }
  433. /*
  434. * At what user virtual address is page expected in vma?
  435. * Caller should check the page is actually part of the vma.
  436. */
  437. unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
  438. {
  439. if (PageAnon(page)) {
  440. struct anon_vma *page__anon_vma = page_anon_vma(page);
  441. /*
  442. * Note: swapoff's unuse_vma() is more efficient with this
  443. * check, and needs it to match anon_vma when KSM is active.
  444. */
  445. if (!vma->anon_vma || !page__anon_vma ||
  446. vma->anon_vma->root != page__anon_vma->root)
  447. return -EFAULT;
  448. } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
  449. if (!vma->vm_file ||
  450. vma->vm_file->f_mapping != page->mapping)
  451. return -EFAULT;
  452. } else
  453. return -EFAULT;
  454. return vma_address(page, vma);
  455. }
  456. /*
  457. * Check that @page is mapped at @address into @mm.
  458. *
  459. * If @sync is false, page_check_address may perform a racy check to avoid
  460. * the page table lock when the pte is not present (helpful when reclaiming
  461. * highly shared pages).
  462. *
  463. * On success returns with pte mapped and locked.
  464. */
  465. pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
  466. unsigned long address, spinlock_t **ptlp, int sync)
  467. {
  468. pgd_t *pgd;
  469. pud_t *pud;
  470. pmd_t *pmd;
  471. pte_t *pte;
  472. spinlock_t *ptl;
  473. if (unlikely(PageHuge(page))) {
  474. pte = huge_pte_offset(mm, address);
  475. ptl = &mm->page_table_lock;
  476. goto check;
  477. }
  478. pgd = pgd_offset(mm, address);
  479. if (!pgd_present(*pgd))
  480. return NULL;
  481. pud = pud_offset(pgd, address);
  482. if (!pud_present(*pud))
  483. return NULL;
  484. pmd = pmd_offset(pud, address);
  485. if (!pmd_present(*pmd))
  486. return NULL;
  487. if (pmd_trans_huge(*pmd))
  488. return NULL;
  489. pte = pte_offset_map(pmd, address);
  490. /* Make a quick check before getting the lock */
  491. if (!sync && !pte_present(*pte)) {
  492. pte_unmap(pte);
  493. return NULL;
  494. }
  495. ptl = pte_lockptr(mm, pmd);
  496. check:
  497. spin_lock(ptl);
  498. if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
  499. *ptlp = ptl;
  500. return pte;
  501. }
  502. pte_unmap_unlock(pte, ptl);
  503. return NULL;
  504. }
  505. /**
  506. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  507. * @page: the page to test
  508. * @vma: the VMA to test
  509. *
  510. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  511. * if the page is not mapped into the page tables of this VMA. Only
  512. * valid for normal file or anonymous VMAs.
  513. */
  514. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  515. {
  516. unsigned long address;
  517. pte_t *pte;
  518. spinlock_t *ptl;
  519. address = vma_address(page, vma);
  520. if (address == -EFAULT) /* out of vma range */
  521. return 0;
  522. pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
  523. if (!pte) /* the page is not in this mm */
  524. return 0;
  525. pte_unmap_unlock(pte, ptl);
  526. return 1;
  527. }
  528. /*
  529. * Subfunctions of page_referenced: page_referenced_one called
  530. * repeatedly from either page_referenced_anon or page_referenced_file.
  531. */
  532. int page_referenced_one(struct page *page, struct vm_area_struct *vma,
  533. unsigned long address, unsigned int *mapcount,
  534. unsigned long *vm_flags)
  535. {
  536. struct mm_struct *mm = vma->vm_mm;
  537. int referenced = 0;
  538. if (unlikely(PageTransHuge(page))) {
  539. pmd_t *pmd;
  540. spin_lock(&mm->page_table_lock);
  541. /*
  542. * rmap might return false positives; we must filter
  543. * these out using page_check_address_pmd().
  544. */
  545. pmd = page_check_address_pmd(page, mm, address,
  546. PAGE_CHECK_ADDRESS_PMD_FLAG);
  547. if (!pmd) {
  548. spin_unlock(&mm->page_table_lock);
  549. goto out;
  550. }
  551. if (vma->vm_flags & VM_LOCKED) {
  552. spin_unlock(&mm->page_table_lock);
  553. *mapcount = 0; /* break early from loop */
  554. *vm_flags |= VM_LOCKED;
  555. goto out;
  556. }
  557. /* go ahead even if the pmd is pmd_trans_splitting() */
  558. if (pmdp_clear_flush_young_notify(vma, address, pmd))
  559. referenced++;
  560. spin_unlock(&mm->page_table_lock);
  561. } else {
  562. pte_t *pte;
  563. spinlock_t *ptl;
  564. /*
  565. * rmap might return false positives; we must filter
  566. * these out using page_check_address().
  567. */
  568. pte = page_check_address(page, mm, address, &ptl, 0);
  569. if (!pte)
  570. goto out;
  571. if (vma->vm_flags & VM_LOCKED) {
  572. pte_unmap_unlock(pte, ptl);
  573. *mapcount = 0; /* break early from loop */
  574. *vm_flags |= VM_LOCKED;
  575. goto out;
  576. }
  577. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  578. /*
  579. * Don't treat a reference through a sequentially read
  580. * mapping as such. If the page has been used in
  581. * another mapping, we will catch it; if this other
  582. * mapping is already gone, the unmap path will have
  583. * set PG_referenced or activated the page.
  584. */
  585. if (likely(!VM_SequentialReadHint(vma)))
  586. referenced++;
  587. }
  588. pte_unmap_unlock(pte, ptl);
  589. }
  590. /* Pretend the page is referenced if the task has the
  591. swap token and is in the middle of a page fault. */
  592. if (mm != current->mm && has_swap_token(mm) &&
  593. rwsem_is_locked(&mm->mmap_sem))
  594. referenced++;
  595. (*mapcount)--;
  596. if (referenced)
  597. *vm_flags |= vma->vm_flags;
  598. out:
  599. return referenced;
  600. }
  601. static int page_referenced_anon(struct page *page,
  602. struct mem_cgroup *mem_cont,
  603. unsigned long *vm_flags)
  604. {
  605. unsigned int mapcount;
  606. struct anon_vma *anon_vma;
  607. struct anon_vma_chain *avc;
  608. int referenced = 0;
  609. anon_vma = page_lock_anon_vma(page);
  610. if (!anon_vma)
  611. return referenced;
  612. mapcount = page_mapcount(page);
  613. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  614. struct vm_area_struct *vma = avc->vma;
  615. unsigned long address = vma_address(page, vma);
  616. if (address == -EFAULT)
  617. continue;
  618. /*
  619. * If we are reclaiming on behalf of a cgroup, skip
  620. * counting on behalf of references from different
  621. * cgroups
  622. */
  623. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  624. continue;
  625. referenced += page_referenced_one(page, vma, address,
  626. &mapcount, vm_flags);
  627. if (!mapcount)
  628. break;
  629. }
  630. page_unlock_anon_vma(anon_vma);
  631. return referenced;
  632. }
  633. /**
  634. * page_referenced_file - referenced check for object-based rmap
  635. * @page: the page we're checking references on.
  636. * @mem_cont: target memory controller
  637. * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  638. *
  639. * For an object-based mapped page, find all the places it is mapped and
  640. * check/clear the referenced flag. This is done by following the page->mapping
  641. * pointer, then walking the chain of vmas it holds. It returns the number
  642. * of references it found.
  643. *
  644. * This function is only called from page_referenced for object-based pages.
  645. */
  646. static int page_referenced_file(struct page *page,
  647. struct mem_cgroup *mem_cont,
  648. unsigned long *vm_flags)
  649. {
  650. unsigned int mapcount;
  651. struct address_space *mapping = page->mapping;
  652. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  653. struct vm_area_struct *vma;
  654. struct prio_tree_iter iter;
  655. int referenced = 0;
  656. /*
  657. * The caller's checks on page->mapping and !PageAnon have made
  658. * sure that this is a file page: the check for page->mapping
  659. * excludes the case just before it gets set on an anon page.
  660. */
  661. BUG_ON(PageAnon(page));
  662. /*
  663. * The page lock not only makes sure that page->mapping cannot
  664. * suddenly be NULLified by truncation, it makes sure that the
  665. * structure at mapping cannot be freed and reused yet,
  666. * so we can safely take mapping->i_mmap_mutex.
  667. */
  668. BUG_ON(!PageLocked(page));
  669. mutex_lock(&mapping->i_mmap_mutex);
  670. /*
  671. * i_mmap_mutex does not stabilize mapcount at all, but mapcount
  672. * is more likely to be accurate if we note it after spinning.
  673. */
  674. mapcount = page_mapcount(page);
  675. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  676. unsigned long address = vma_address(page, vma);
  677. if (address == -EFAULT)
  678. continue;
  679. /*
  680. * If we are reclaiming on behalf of a cgroup, skip
  681. * counting on behalf of references from different
  682. * cgroups
  683. */
  684. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  685. continue;
  686. referenced += page_referenced_one(page, vma, address,
  687. &mapcount, vm_flags);
  688. if (!mapcount)
  689. break;
  690. }
  691. mutex_unlock(&mapping->i_mmap_mutex);
  692. return referenced;
  693. }
  694. /**
  695. * page_referenced - test if the page was referenced
  696. * @page: the page to test
  697. * @is_locked: caller holds lock on the page
  698. * @mem_cont: target memory controller
  699. * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  700. *
  701. * Quick test_and_clear_referenced for all mappings to a page,
  702. * returns the number of ptes which referenced the page.
  703. */
  704. int page_referenced(struct page *page,
  705. int is_locked,
  706. struct mem_cgroup *mem_cont,
  707. unsigned long *vm_flags)
  708. {
  709. int referenced = 0;
  710. int we_locked = 0;
  711. *vm_flags = 0;
  712. if (page_mapped(page) && page_rmapping(page)) {
  713. if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
  714. we_locked = trylock_page(page);
  715. if (!we_locked) {
  716. referenced++;
  717. goto out;
  718. }
  719. }
  720. if (unlikely(PageKsm(page)))
  721. referenced += page_referenced_ksm(page, mem_cont,
  722. vm_flags);
  723. else if (PageAnon(page))
  724. referenced += page_referenced_anon(page, mem_cont,
  725. vm_flags);
  726. else if (page->mapping)
  727. referenced += page_referenced_file(page, mem_cont,
  728. vm_flags);
  729. if (we_locked)
  730. unlock_page(page);
  731. }
  732. out:
  733. if (page_test_and_clear_young(page_to_pfn(page)))
  734. referenced++;
  735. return referenced;
  736. }
  737. static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
  738. unsigned long address)
  739. {
  740. struct mm_struct *mm = vma->vm_mm;
  741. pte_t *pte;
  742. spinlock_t *ptl;
  743. int ret = 0;
  744. pte = page_check_address(page, mm, address, &ptl, 1);
  745. if (!pte)
  746. goto out;
  747. if (pte_dirty(*pte) || pte_write(*pte)) {
  748. pte_t entry;
  749. flush_cache_page(vma, address, pte_pfn(*pte));
  750. entry = ptep_clear_flush_notify(vma, address, pte);
  751. entry = pte_wrprotect(entry);
  752. entry = pte_mkclean(entry);
  753. set_pte_at(mm, address, pte, entry);
  754. ret = 1;
  755. }
  756. pte_unmap_unlock(pte, ptl);
  757. out:
  758. return ret;
  759. }
  760. static int page_mkclean_file(struct address_space *mapping, struct page *page)
  761. {
  762. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  763. struct vm_area_struct *vma;
  764. struct prio_tree_iter iter;
  765. int ret = 0;
  766. BUG_ON(PageAnon(page));
  767. mutex_lock(&mapping->i_mmap_mutex);
  768. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  769. if (vma->vm_flags & VM_SHARED) {
  770. unsigned long address = vma_address(page, vma);
  771. if (address == -EFAULT)
  772. continue;
  773. ret += page_mkclean_one(page, vma, address);
  774. }
  775. }
  776. mutex_unlock(&mapping->i_mmap_mutex);
  777. return ret;
  778. }
  779. int page_mkclean(struct page *page)
  780. {
  781. int ret = 0;
  782. BUG_ON(!PageLocked(page));
  783. if (page_mapped(page)) {
  784. struct address_space *mapping = page_mapping(page);
  785. if (mapping) {
  786. ret = page_mkclean_file(mapping, page);
  787. if (page_test_and_clear_dirty(page_to_pfn(page), 1))
  788. ret = 1;
  789. }
  790. }
  791. return ret;
  792. }
  793. EXPORT_SYMBOL_GPL(page_mkclean);
  794. /**
  795. * page_move_anon_rmap - move a page to our anon_vma
  796. * @page: the page to move to our anon_vma
  797. * @vma: the vma the page belongs to
  798. * @address: the user virtual address mapped
  799. *
  800. * When a page belongs exclusively to one process after a COW event,
  801. * that page can be moved into the anon_vma that belongs to just that
  802. * process, so the rmap code will not search the parent or sibling
  803. * processes.
  804. */
  805. void page_move_anon_rmap(struct page *page,
  806. struct vm_area_struct *vma, unsigned long address)
  807. {
  808. struct anon_vma *anon_vma = vma->anon_vma;
  809. VM_BUG_ON(!PageLocked(page));
  810. VM_BUG_ON(!anon_vma);
  811. VM_BUG_ON(page->index != linear_page_index(vma, address));
  812. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  813. page->mapping = (struct address_space *) anon_vma;
  814. }
  815. /**
  816. * __page_set_anon_rmap - set up new anonymous rmap
  817. * @page: Page to add to rmap
  818. * @vma: VM area to add page to.
  819. * @address: User virtual address of the mapping
  820. * @exclusive: the page is exclusively owned by the current process
  821. */
  822. static void __page_set_anon_rmap(struct page *page,
  823. struct vm_area_struct *vma, unsigned long address, int exclusive)
  824. {
  825. struct anon_vma *anon_vma = vma->anon_vma;
  826. BUG_ON(!anon_vma);
  827. if (PageAnon(page))
  828. return;
  829. /*
  830. * If the page isn't exclusively mapped into this vma,
  831. * we must use the _oldest_ possible anon_vma for the
  832. * page mapping!
  833. */
  834. if (!exclusive)
  835. anon_vma = anon_vma->root;
  836. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  837. page->mapping = (struct address_space *) anon_vma;
  838. page->index = linear_page_index(vma, address);
  839. }
  840. /**
  841. * __page_check_anon_rmap - sanity check anonymous rmap addition
  842. * @page: the page to add the mapping to
  843. * @vma: the vm area in which the mapping is added
  844. * @address: the user virtual address mapped
  845. */
  846. static void __page_check_anon_rmap(struct page *page,
  847. struct vm_area_struct *vma, unsigned long address)
  848. {
  849. #ifdef CONFIG_DEBUG_VM
  850. /*
  851. * The page's anon-rmap details (mapping and index) are guaranteed to
  852. * be set up correctly at this point.
  853. *
  854. * We have exclusion against page_add_anon_rmap because the caller
  855. * always holds the page locked, except if called from page_dup_rmap,
  856. * in which case the page is already known to be setup.
  857. *
  858. * We have exclusion against page_add_new_anon_rmap because those pages
  859. * are initially only visible via the pagetables, and the pte is locked
  860. * over the call to page_add_new_anon_rmap.
  861. */
  862. BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
  863. BUG_ON(page->index != linear_page_index(vma, address));
  864. #endif
  865. }
  866. /**
  867. * page_add_anon_rmap - add pte mapping to an anonymous page
  868. * @page: the page to add the mapping to
  869. * @vma: the vm area in which the mapping is added
  870. * @address: the user virtual address mapped
  871. *
  872. * The caller needs to hold the pte lock, and the page must be locked in
  873. * the anon_vma case: to serialize mapping,index checking after setting,
  874. * and to ensure that PageAnon is not being upgraded racily to PageKsm
  875. * (but PageKsm is never downgraded to PageAnon).
  876. */
  877. void page_add_anon_rmap(struct page *page,
  878. struct vm_area_struct *vma, unsigned long address)
  879. {
  880. do_page_add_anon_rmap(page, vma, address, 0);
  881. }
  882. /*
  883. * Special version of the above for do_swap_page, which often runs
  884. * into pages that are exclusively owned by the current process.
  885. * Everybody else should continue to use page_add_anon_rmap above.
  886. */
  887. void do_page_add_anon_rmap(struct page *page,
  888. struct vm_area_struct *vma, unsigned long address, int exclusive)
  889. {
  890. int first = atomic_inc_and_test(&page->_mapcount);
  891. if (first) {
  892. if (!PageTransHuge(page))
  893. __inc_zone_page_state(page, NR_ANON_PAGES);
  894. else
  895. __inc_zone_page_state(page,
  896. NR_ANON_TRANSPARENT_HUGEPAGES);
  897. }
  898. if (unlikely(PageKsm(page)))
  899. return;
  900. VM_BUG_ON(!PageLocked(page));
  901. /* address might be in next vma when migration races vma_adjust */
  902. if (first)
  903. __page_set_anon_rmap(page, vma, address, exclusive);
  904. else
  905. __page_check_anon_rmap(page, vma, address);
  906. }
  907. /**
  908. * page_add_new_anon_rmap - add pte mapping to a new anonymous page
  909. * @page: the page to add the mapping to
  910. * @vma: the vm area in which the mapping is added
  911. * @address: the user virtual address mapped
  912. *
  913. * Same as page_add_anon_rmap but must only be called on *new* pages.
  914. * This means the inc-and-test can be bypassed.
  915. * Page does not have to be locked.
  916. */
  917. void page_add_new_anon_rmap(struct page *page,
  918. struct vm_area_struct *vma, unsigned long address)
  919. {
  920. VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  921. SetPageSwapBacked(page);
  922. atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
  923. if (!PageTransHuge(page))
  924. __inc_zone_page_state(page, NR_ANON_PAGES);
  925. else
  926. __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
  927. __page_set_anon_rmap(page, vma, address, 1);
  928. if (page_evictable(page, vma))
  929. lru_cache_add_lru(page, LRU_ACTIVE_ANON);
  930. else
  931. add_page_to_unevictable_list(page);
  932. }
  933. /**
  934. * page_add_file_rmap - add pte mapping to a file page
  935. * @page: the page to add the mapping to
  936. *
  937. * The caller needs to hold the pte lock.
  938. */
  939. void page_add_file_rmap(struct page *page)
  940. {
  941. if (atomic_inc_and_test(&page->_mapcount)) {
  942. __inc_zone_page_state(page, NR_FILE_MAPPED);
  943. mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
  944. }
  945. }
  946. /**
  947. * page_remove_rmap - take down pte mapping from a page
  948. * @page: page to remove mapping from
  949. *
  950. * The caller needs to hold the pte lock.
  951. */
  952. void page_remove_rmap(struct page *page)
  953. {
  954. /* page still mapped by someone else? */
  955. if (!atomic_add_negative(-1, &page->_mapcount))
  956. return;
  957. /*
  958. * Now that the last pte has gone, s390 must transfer dirty
  959. * flag from storage key to struct page. We can usually skip
  960. * this if the page is anon, so about to be freed; but perhaps
  961. * not if it's in swapcache - there might be another pte slot
  962. * containing the swap entry, but page not yet written to swap.
  963. */
  964. if ((!PageAnon(page) || PageSwapCache(page)) &&
  965. page_test_and_clear_dirty(page_to_pfn(page), 1))
  966. set_page_dirty(page);
  967. /*
  968. * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
  969. * and not charged by memcg for now.
  970. */
  971. if (unlikely(PageHuge(page)))
  972. return;
  973. if (PageAnon(page)) {
  974. mem_cgroup_uncharge_page(page);
  975. if (!PageTransHuge(page))
  976. __dec_zone_page_state(page, NR_ANON_PAGES);
  977. else
  978. __dec_zone_page_state(page,
  979. NR_ANON_TRANSPARENT_HUGEPAGES);
  980. } else {
  981. __dec_zone_page_state(page, NR_FILE_MAPPED);
  982. mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
  983. }
  984. /*
  985. * It would be tidy to reset the PageAnon mapping here,
  986. * but that might overwrite a racing page_add_anon_rmap
  987. * which increments mapcount after us but sets mapping
  988. * before us: so leave the reset to free_hot_cold_page,
  989. * and remember that it's only reliable while mapped.
  990. * Leaving it set also helps swapoff to reinstate ptes
  991. * faster for those pages still in swapcache.
  992. */
  993. }
  994. /*
  995. * Subfunctions of try_to_unmap: try_to_unmap_one called
  996. * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  997. */
  998. int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
  999. unsigned long address, enum ttu_flags flags)
  1000. {
  1001. struct mm_struct *mm = vma->vm_mm;
  1002. pte_t *pte;
  1003. pte_t pteval;
  1004. spinlock_t *ptl;
  1005. int ret = SWAP_AGAIN;
  1006. pte = page_check_address(page, mm, address, &ptl, 0);
  1007. if (!pte)
  1008. goto out;
  1009. /*
  1010. * If the page is mlock()d, we cannot swap it out.
  1011. * If it's recently referenced (perhaps page_referenced
  1012. * skipped over this mm) then we should reactivate it.
  1013. */
  1014. if (!(flags & TTU_IGNORE_MLOCK)) {
  1015. if (vma->vm_flags & VM_LOCKED)
  1016. goto out_mlock;
  1017. if (TTU_ACTION(flags) == TTU_MUNLOCK)
  1018. goto out_unmap;
  1019. }
  1020. if (!(flags & TTU_IGNORE_ACCESS)) {
  1021. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  1022. ret = SWAP_FAIL;
  1023. goto out_unmap;
  1024. }
  1025. }
  1026. /* Nuke the page table entry. */
  1027. flush_cache_page(vma, address, page_to_pfn(page));
  1028. pteval = ptep_clear_flush_notify(vma, address, pte);
  1029. /* Move the dirty bit to the physical page now the pte is gone. */
  1030. if (pte_dirty(pteval))
  1031. set_page_dirty(page);
  1032. /* Update high watermark before we lower rss */
  1033. update_hiwater_rss(mm);
  1034. if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
  1035. if (PageAnon(page))
  1036. dec_mm_counter(mm, MM_ANONPAGES);
  1037. else
  1038. dec_mm_counter(mm, MM_FILEPAGES);
  1039. set_pte_at(mm, address, pte,
  1040. swp_entry_to_pte(make_hwpoison_entry(page)));
  1041. } else if (PageAnon(page)) {
  1042. swp_entry_t entry = { .val = page_private(page) };
  1043. if (PageSwapCache(page)) {
  1044. /*
  1045. * Store the swap location in the pte.
  1046. * See handle_pte_fault() ...
  1047. */
  1048. if (swap_duplicate(entry) < 0) {
  1049. set_pte_at(mm, address, pte, pteval);
  1050. ret = SWAP_FAIL;
  1051. goto out_unmap;
  1052. }
  1053. if (list_empty(&mm->mmlist)) {
  1054. spin_lock(&mmlist_lock);
  1055. if (list_empty(&mm->mmlist))
  1056. list_add(&mm->mmlist, &init_mm.mmlist);
  1057. spin_unlock(&mmlist_lock);
  1058. }
  1059. dec_mm_counter(mm, MM_ANONPAGES);
  1060. inc_mm_counter(mm, MM_SWAPENTS);
  1061. } else if (PAGE_MIGRATION) {
  1062. /*
  1063. * Store the pfn of the page in a special migration
  1064. * pte. do_swap_page() will wait until the migration
  1065. * pte is removed and then restart fault handling.
  1066. */
  1067. BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
  1068. entry = make_migration_entry(page, pte_write(pteval));
  1069. }
  1070. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  1071. BUG_ON(pte_file(*pte));
  1072. } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
  1073. /* Establish migration entry for a file page */
  1074. swp_entry_t entry;
  1075. entry = make_migration_entry(page, pte_write(pteval));
  1076. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  1077. } else
  1078. dec_mm_counter(mm, MM_FILEPAGES);
  1079. page_remove_rmap(page);
  1080. page_cache_release(page);
  1081. out_unmap:
  1082. pte_unmap_unlock(pte, ptl);
  1083. out:
  1084. return ret;
  1085. out_mlock:
  1086. pte_unmap_unlock(pte, ptl);
  1087. /*
  1088. * We need mmap_sem locking, Otherwise VM_LOCKED check makes
  1089. * unstable result and race. Plus, We can't wait here because
  1090. * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
  1091. * if trylock failed, the page remain in evictable lru and later
  1092. * vmscan could retry to move the page to unevictable lru if the
  1093. * page is actually mlocked.
  1094. */
  1095. if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
  1096. if (vma->vm_flags & VM_LOCKED) {
  1097. mlock_vma_page(page);
  1098. ret = SWAP_MLOCK;
  1099. }
  1100. up_read(&vma->vm_mm->mmap_sem);
  1101. }
  1102. return ret;
  1103. }
  1104. /*
  1105. * objrmap doesn't work for nonlinear VMAs because the assumption that
  1106. * offset-into-file correlates with offset-into-virtual-addresses does not hold.
  1107. * Consequently, given a particular page and its ->index, we cannot locate the
  1108. * ptes which are mapping that page without an exhaustive linear search.
  1109. *
  1110. * So what this code does is a mini "virtual scan" of each nonlinear VMA which
  1111. * maps the file to which the target page belongs. The ->vm_private_data field
  1112. * holds the current cursor into that scan. Successive searches will circulate
  1113. * around the vma's virtual address space.
  1114. *
  1115. * So as more replacement pressure is applied to the pages in a nonlinear VMA,
  1116. * more scanning pressure is placed against them as well. Eventually pages
  1117. * will become fully unmapped and are eligible for eviction.
  1118. *
  1119. * For very sparsely populated VMAs this is a little inefficient - chances are
  1120. * there there won't be many ptes located within the scan cluster. In this case
  1121. * maybe we could scan further - to the end of the pte page, perhaps.
  1122. *
  1123. * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
  1124. * acquire it without blocking. If vma locked, mlock the pages in the cluster,
  1125. * rather than unmapping them. If we encounter the "check_page" that vmscan is
  1126. * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
  1127. */
  1128. #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
  1129. #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
  1130. static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
  1131. struct vm_area_struct *vma, struct page *check_page)
  1132. {
  1133. struct mm_struct *mm = vma->vm_mm;
  1134. pgd_t *pgd;
  1135. pud_t *pud;
  1136. pmd_t *pmd;
  1137. pte_t *pte;
  1138. pte_t pteval;
  1139. spinlock_t *ptl;
  1140. struct page *page;
  1141. unsigned long address;
  1142. unsigned long end;
  1143. int ret = SWAP_AGAIN;
  1144. int locked_vma = 0;
  1145. address = (vma->vm_start + cursor) & CLUSTER_MASK;
  1146. end = address + CLUSTER_SIZE;
  1147. if (address < vma->vm_start)
  1148. address = vma->vm_start;
  1149. if (end > vma->vm_end)
  1150. end = vma->vm_end;
  1151. pgd = pgd_offset(mm, address);
  1152. if (!pgd_present(*pgd))
  1153. return ret;
  1154. pud = pud_offset(pgd, address);
  1155. if (!pud_present(*pud))
  1156. return ret;
  1157. pmd = pmd_offset(pud, address);
  1158. if (!pmd_present(*pmd))
  1159. return ret;
  1160. /*
  1161. * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
  1162. * keep the sem while scanning the cluster for mlocking pages.
  1163. */
  1164. if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
  1165. locked_vma = (vma->vm_flags & VM_LOCKED);
  1166. if (!locked_vma)
  1167. up_read(&vma->vm_mm->mmap_sem); /* don't need it */
  1168. }
  1169. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  1170. /* Update high watermark before we lower rss */
  1171. update_hiwater_rss(mm);
  1172. for (; address < end; pte++, address += PAGE_SIZE) {
  1173. if (!pte_present(*pte))
  1174. continue;
  1175. page = vm_normal_page(vma, address, *pte);
  1176. BUG_ON(!page || PageAnon(page));
  1177. if (locked_vma) {
  1178. mlock_vma_page(page); /* no-op if already mlocked */
  1179. if (page == check_page)
  1180. ret = SWAP_MLOCK;
  1181. continue; /* don't unmap */
  1182. }
  1183. if (ptep_clear_flush_young_notify(vma, address, pte))
  1184. continue;
  1185. /* Nuke the page table entry. */
  1186. flush_cache_page(vma, address, pte_pfn(*pte));
  1187. pteval = ptep_clear_flush_notify(vma, address, pte);
  1188. /* If nonlinear, store the file page offset in the pte. */
  1189. if (page->index != linear_page_index(vma, address))
  1190. set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
  1191. /* Move the dirty bit to the physical page now the pte is gone. */
  1192. if (pte_dirty(pteval))
  1193. set_page_dirty(page);
  1194. page_remove_rmap(page);
  1195. page_cache_release(page);
  1196. dec_mm_counter(mm, MM_FILEPAGES);
  1197. (*mapcount)--;
  1198. }
  1199. pte_unmap_unlock(pte - 1, ptl);
  1200. if (locked_vma)
  1201. up_read(&vma->vm_mm->mmap_sem);
  1202. return ret;
  1203. }
  1204. bool is_vma_temporary_stack(struct vm_area_struct *vma)
  1205. {
  1206. int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
  1207. if (!maybe_stack)
  1208. return false;
  1209. if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
  1210. VM_STACK_INCOMPLETE_SETUP)
  1211. return true;
  1212. return false;
  1213. }
  1214. /**
  1215. * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  1216. * rmap method
  1217. * @page: the page to unmap/unlock
  1218. * @flags: action and flags
  1219. *
  1220. * Find all the mappings of a page using the mapping pointer and the vma chains
  1221. * contained in the anon_vma struct it points to.
  1222. *
  1223. * This function is only called from try_to_unmap/try_to_munlock for
  1224. * anonymous pages.
  1225. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  1226. * where the page was found will be held for write. So, we won't recheck
  1227. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  1228. * 'LOCKED.
  1229. */
  1230. static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
  1231. {
  1232. struct anon_vma *anon_vma;
  1233. struct anon_vma_chain *avc;
  1234. int ret = SWAP_AGAIN;
  1235. anon_vma = page_lock_anon_vma(page);
  1236. if (!anon_vma)
  1237. return ret;
  1238. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1239. struct vm_area_struct *vma = avc->vma;
  1240. unsigned long address;
  1241. /*
  1242. * During exec, a temporary VMA is setup and later moved.
  1243. * The VMA is moved under the anon_vma lock but not the
  1244. * page tables leading to a race where migration cannot
  1245. * find the migration ptes. Rather than increasing the
  1246. * locking requirements of exec(), migration skips
  1247. * temporary VMAs until after exec() completes.
  1248. */
  1249. if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
  1250. is_vma_temporary_stack(vma))
  1251. continue;
  1252. address = vma_address(page, vma);
  1253. if (address == -EFAULT)
  1254. continue;
  1255. ret = try_to_unmap_one(page, vma, address, flags);
  1256. if (ret != SWAP_AGAIN || !page_mapped(page))
  1257. break;
  1258. }
  1259. page_unlock_anon_vma(anon_vma);
  1260. return ret;
  1261. }
  1262. /**
  1263. * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
  1264. * @page: the page to unmap/unlock
  1265. * @flags: action and flags
  1266. *
  1267. * Find all the mappings of a page using the mapping pointer and the vma chains
  1268. * contained in the address_space struct it points to.
  1269. *
  1270. * This function is only called from try_to_unmap/try_to_munlock for
  1271. * object-based pages.
  1272. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  1273. * where the page was found will be held for write. So, we won't recheck
  1274. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  1275. * 'LOCKED.
  1276. */
  1277. static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
  1278. {
  1279. struct address_space *mapping = page->mapping;
  1280. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1281. struct vm_area_struct *vma;
  1282. struct prio_tree_iter iter;
  1283. int ret = SWAP_AGAIN;
  1284. unsigned long cursor;
  1285. unsigned long max_nl_cursor = 0;
  1286. unsigned long max_nl_size = 0;
  1287. unsigned int mapcount;
  1288. mutex_lock(&mapping->i_mmap_mutex);
  1289. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1290. unsigned long address = vma_address(page, vma);
  1291. if (address == -EFAULT)
  1292. continue;
  1293. ret = try_to_unmap_one(page, vma, address, flags);
  1294. if (ret != SWAP_AGAIN || !page_mapped(page))
  1295. goto out;
  1296. }
  1297. if (list_empty(&mapping->i_mmap_nonlinear))
  1298. goto out;
  1299. /*
  1300. * We don't bother to try to find the munlocked page in nonlinears.
  1301. * It's costly. Instead, later, page reclaim logic may call
  1302. * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
  1303. */
  1304. if (TTU_ACTION(flags) == TTU_MUNLOCK)
  1305. goto out;
  1306. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  1307. shared.vm_set.list) {
  1308. cursor = (unsigned long) vma->vm_private_data;
  1309. if (cursor > max_nl_cursor)
  1310. max_nl_cursor = cursor;
  1311. cursor = vma->vm_end - vma->vm_start;
  1312. if (cursor > max_nl_size)
  1313. max_nl_size = cursor;
  1314. }
  1315. if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
  1316. ret = SWAP_FAIL;
  1317. goto out;
  1318. }
  1319. /*
  1320. * We don't try to search for this page in the nonlinear vmas,
  1321. * and page_referenced wouldn't have found it anyway. Instead
  1322. * just walk the nonlinear vmas trying to age and unmap some.
  1323. * The mapcount of the page we came in with is irrelevant,
  1324. * but even so use it as a guide to how hard we should try?
  1325. */
  1326. mapcount = page_mapcount(page);
  1327. if (!mapcount)
  1328. goto out;
  1329. cond_resched();
  1330. max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
  1331. if (max_nl_cursor == 0)
  1332. max_nl_cursor = CLUSTER_SIZE;
  1333. do {
  1334. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  1335. shared.vm_set.list) {
  1336. cursor = (unsigned long) vma->vm_private_data;
  1337. while ( cursor < max_nl_cursor &&
  1338. cursor < vma->vm_end - vma->vm_start) {
  1339. if (try_to_unmap_cluster(cursor, &mapcount,
  1340. vma, page) == SWAP_MLOCK)
  1341. ret = SWAP_MLOCK;
  1342. cursor += CLUSTER_SIZE;
  1343. vma->vm_private_data = (void *) cursor;
  1344. if ((int)mapcount <= 0)
  1345. goto out;
  1346. }
  1347. vma->vm_private_data = (void *) max_nl_cursor;
  1348. }
  1349. cond_resched();
  1350. max_nl_cursor += CLUSTER_SIZE;
  1351. } while (max_nl_cursor <= max_nl_size);
  1352. /*
  1353. * Don't loop forever (perhaps all the remaining pages are
  1354. * in locked vmas). Reset cursor on all unreserved nonlinear
  1355. * vmas, now forgetting on which ones it had fallen behind.
  1356. */
  1357. list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  1358. vma->vm_private_data = NULL;
  1359. out:
  1360. mutex_unlock(&mapping->i_mmap_mutex);
  1361. return ret;
  1362. }
  1363. /**
  1364. * try_to_unmap - try to remove all page table mappings to a page
  1365. * @page: the page to get unmapped
  1366. * @flags: action and flags
  1367. *
  1368. * Tries to remove all the page table entries which are mapping this
  1369. * page, used in the pageout path. Caller must hold the page lock.
  1370. * Return values are:
  1371. *
  1372. * SWAP_SUCCESS - we succeeded in removing all mappings
  1373. * SWAP_AGAIN - we missed a mapping, try again later
  1374. * SWAP_FAIL - the page is unswappable
  1375. * SWAP_MLOCK - page is mlocked.
  1376. */
  1377. int try_to_unmap(struct page *page, enum ttu_flags flags)
  1378. {
  1379. int ret;
  1380. BUG_ON(!PageLocked(page));
  1381. VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
  1382. if (unlikely(PageKsm(page)))
  1383. ret = try_to_unmap_ksm(page, flags);
  1384. else if (PageAnon(page))
  1385. ret = try_to_unmap_anon(page, flags);
  1386. else
  1387. ret = try_to_unmap_file(page, flags);
  1388. if (ret != SWAP_MLOCK && !page_mapped(page))
  1389. ret = SWAP_SUCCESS;
  1390. return ret;
  1391. }
  1392. /**
  1393. * try_to_munlock - try to munlock a page
  1394. * @page: the page to be munlocked
  1395. *
  1396. * Called from munlock code. Checks all of the VMAs mapping the page
  1397. * to make sure nobody else has this page mlocked. The page will be
  1398. * returned with PG_mlocked cleared if no other vmas have it mlocked.
  1399. *
  1400. * Return values are:
  1401. *
  1402. * SWAP_AGAIN - no vma is holding page mlocked, or,
  1403. * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
  1404. * SWAP_FAIL - page cannot be located at present
  1405. * SWAP_MLOCK - page is now mlocked.
  1406. */
  1407. int try_to_munlock(struct page *page)
  1408. {
  1409. VM_BUG_ON(!PageLocked(page) || PageLRU(page));
  1410. if (unlikely(PageKsm(page)))
  1411. return try_to_unmap_ksm(page, TTU_MUNLOCK);
  1412. else if (PageAnon(page))
  1413. return try_to_unmap_anon(page, TTU_MUNLOCK);
  1414. else
  1415. return try_to_unmap_file(page, TTU_MUNLOCK);
  1416. }
  1417. void __put_anon_vma(struct anon_vma *anon_vma)
  1418. {
  1419. struct anon_vma *root = anon_vma->root;
  1420. if (root != anon_vma && atomic_dec_and_test(&root->refcount))
  1421. anon_vma_free(root);
  1422. anon_vma_free(anon_vma);
  1423. }
  1424. #ifdef CONFIG_MIGRATION
  1425. /*
  1426. * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
  1427. * Called by migrate.c to remove migration ptes, but might be used more later.
  1428. */
  1429. static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
  1430. struct vm_area_struct *, unsigned long, void *), void *arg)
  1431. {
  1432. struct anon_vma *anon_vma;
  1433. struct anon_vma_chain *avc;
  1434. int ret = SWAP_AGAIN;
  1435. /*
  1436. * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
  1437. * because that depends on page_mapped(); but not all its usages
  1438. * are holding mmap_sem. Users without mmap_sem are required to
  1439. * take a reference count to prevent the anon_vma disappearing
  1440. */
  1441. anon_vma = page_anon_vma(page);
  1442. if (!anon_vma)
  1443. return ret;
  1444. anon_vma_lock(anon_vma);
  1445. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1446. struct vm_area_struct *vma = avc->vma;
  1447. unsigned long address = vma_address(page, vma);
  1448. if (address == -EFAULT)
  1449. continue;
  1450. ret = rmap_one(page, vma, address, arg);
  1451. if (ret != SWAP_AGAIN)
  1452. break;
  1453. }
  1454. anon_vma_unlock(anon_vma);
  1455. return ret;
  1456. }
  1457. static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
  1458. struct vm_area_struct *, unsigned long, void *), void *arg)
  1459. {
  1460. struct address_space *mapping = page->mapping;
  1461. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1462. struct vm_area_struct *vma;
  1463. struct prio_tree_iter iter;
  1464. int ret = SWAP_AGAIN;
  1465. if (!mapping)
  1466. return ret;
  1467. mutex_lock(&mapping->i_mmap_mutex);
  1468. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1469. unsigned long address = vma_address(page, vma);
  1470. if (address == -EFAULT)
  1471. continue;
  1472. ret = rmap_one(page, vma, address, arg);
  1473. if (ret != SWAP_AGAIN)
  1474. break;
  1475. }
  1476. /*
  1477. * No nonlinear handling: being always shared, nonlinear vmas
  1478. * never contain migration ptes. Decide what to do about this
  1479. * limitation to linear when we need rmap_walk() on nonlinear.
  1480. */
  1481. mutex_unlock(&mapping->i_mmap_mutex);
  1482. return ret;
  1483. }
  1484. int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  1485. struct vm_area_struct *, unsigned long, void *), void *arg)
  1486. {
  1487. VM_BUG_ON(!PageLocked(page));
  1488. if (unlikely(PageKsm(page)))
  1489. return rmap_walk_ksm(page, rmap_one, arg);
  1490. else if (PageAnon(page))
  1491. return rmap_walk_anon(page, rmap_one, arg);
  1492. else
  1493. return rmap_walk_file(page, rmap_one, arg);
  1494. }
  1495. #endif /* CONFIG_MIGRATION */
  1496. #ifdef CONFIG_HUGETLB_PAGE
  1497. /*
  1498. * The following three functions are for anonymous (private mapped) hugepages.
  1499. * Unlike common anonymous pages, anonymous hugepages have no accounting code
  1500. * and no lru code, because we handle hugepages differently from common pages.
  1501. */
  1502. static void __hugepage_set_anon_rmap(struct page *page,
  1503. struct vm_area_struct *vma, unsigned long address, int exclusive)
  1504. {
  1505. struct anon_vma *anon_vma = vma->anon_vma;
  1506. BUG_ON(!anon_vma);
  1507. if (PageAnon(page))
  1508. return;
  1509. if (!exclusive)
  1510. anon_vma = anon_vma->root;
  1511. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  1512. page->mapping = (struct address_space *) anon_vma;
  1513. page->index = linear_page_index(vma, address);
  1514. }
  1515. void hugepage_add_anon_rmap(struct page *page,
  1516. struct vm_area_struct *vma, unsigned long address)
  1517. {
  1518. struct anon_vma *anon_vma = vma->anon_vma;
  1519. int first;
  1520. BUG_ON(!PageLocked(page));
  1521. BUG_ON(!anon_vma);
  1522. /* address might be in next vma when migration races vma_adjust */
  1523. first = atomic_inc_and_test(&page->_mapcount);
  1524. if (first)
  1525. __hugepage_set_anon_rmap(page, vma, address, 0);
  1526. }
  1527. void hugepage_add_new_anon_rmap(struct page *page,
  1528. struct vm_area_struct *vma, unsigned long address)
  1529. {
  1530. BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  1531. atomic_set(&page->_mapcount, 0);
  1532. __hugepage_set_anon_rmap(page, vma, address, 1);
  1533. }
  1534. #endif /* CONFIG_HUGETLB_PAGE */