rmap.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. /*
  2. * mm/rmap.c - physical to virtual reverse mappings
  3. *
  4. * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
  5. * Released under the General Public License (GPL).
  6. *
  7. * Simple, low overhead reverse mapping scheme.
  8. * Please try to keep this thing as modular as possible.
  9. *
  10. * Provides methods for unmapping each kind of mapped page:
  11. * the anon methods track anonymous pages, and
  12. * the file methods track pages belonging to an inode.
  13. *
  14. * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15. * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16. * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17. * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
  18. */
  19. /*
  20. * Lock ordering in mm:
  21. *
  22. * inode->i_mutex (while writing or truncating, not reading or faulting)
  23. * inode->i_alloc_sem (vmtruncate_range)
  24. * mm->mmap_sem
  25. * page->flags PG_locked (lock_page)
  26. * mapping->i_mmap_lock
  27. * anon_vma->lock
  28. * mm->page_table_lock or pte_lock
  29. * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  30. * swap_lock (in swap_duplicate, swap_info_get)
  31. * mmlist_lock (in mmput, drain_mmlist and others)
  32. * mapping->private_lock (in __set_page_dirty_buffers)
  33. * inode_lock (in set_page_dirty's __mark_inode_dirty)
  34. * sb_lock (within inode_lock in fs/fs-writeback.c)
  35. * mapping->tree_lock (widely used, in set_page_dirty,
  36. * in arch-dependent flush_dcache_mmap_lock,
  37. * within inode_lock in __sync_single_inode)
  38. */
  39. #include <linux/mm.h>
  40. #include <linux/pagemap.h>
  41. #include <linux/swap.h>
  42. #include <linux/swapops.h>
  43. #include <linux/slab.h>
  44. #include <linux/init.h>
  45. #include <linux/rmap.h>
  46. #include <linux/rcupdate.h>
  47. #include <linux/module.h>
  48. #include <linux/memcontrol.h>
  49. #include <linux/mmu_notifier.h>
  50. #include <linux/migrate.h>
  51. #include <asm/tlbflush.h>
  52. #include "internal.h"
  53. static struct kmem_cache *anon_vma_cachep;
  54. static inline struct anon_vma *anon_vma_alloc(void)
  55. {
  56. return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  57. }
  58. static inline void anon_vma_free(struct anon_vma *anon_vma)
  59. {
  60. kmem_cache_free(anon_vma_cachep, anon_vma);
  61. }
  62. /**
  63. * anon_vma_prepare - attach an anon_vma to a memory region
  64. * @vma: the memory region in question
  65. *
  66. * This makes sure the memory mapping described by 'vma' has
  67. * an 'anon_vma' attached to it, so that we can associate the
  68. * anonymous pages mapped into it with that anon_vma.
  69. *
  70. * The common case will be that we already have one, but if
  71. * if not we either need to find an adjacent mapping that we
  72. * can re-use the anon_vma from (very common when the only
  73. * reason for splitting a vma has been mprotect()), or we
  74. * allocate a new one.
  75. *
  76. * Anon-vma allocations are very subtle, because we may have
  77. * optimistically looked up an anon_vma in page_lock_anon_vma()
  78. * and that may actually touch the spinlock even in the newly
  79. * allocated vma (it depends on RCU to make sure that the
  80. * anon_vma isn't actually destroyed).
  81. *
  82. * As a result, we need to do proper anon_vma locking even
  83. * for the new allocation. At the same time, we do not want
  84. * to do any locking for the common case of already having
  85. * an anon_vma.
  86. *
  87. * This must be called with the mmap_sem held for reading.
  88. */
  89. int anon_vma_prepare(struct vm_area_struct *vma)
  90. {
  91. struct anon_vma *anon_vma = vma->anon_vma;
  92. might_sleep();
  93. if (unlikely(!anon_vma)) {
  94. struct mm_struct *mm = vma->vm_mm;
  95. struct anon_vma *allocated;
  96. anon_vma = find_mergeable_anon_vma(vma);
  97. allocated = NULL;
  98. if (!anon_vma) {
  99. anon_vma = anon_vma_alloc();
  100. if (unlikely(!anon_vma))
  101. return -ENOMEM;
  102. allocated = anon_vma;
  103. }
  104. spin_lock(&anon_vma->lock);
  105. /* page_table_lock to protect against threads */
  106. spin_lock(&mm->page_table_lock);
  107. if (likely(!vma->anon_vma)) {
  108. vma->anon_vma = anon_vma;
  109. list_add_tail(&vma->anon_vma_node, &anon_vma->head);
  110. allocated = NULL;
  111. }
  112. spin_unlock(&mm->page_table_lock);
  113. spin_unlock(&anon_vma->lock);
  114. if (unlikely(allocated))
  115. anon_vma_free(allocated);
  116. }
  117. return 0;
  118. }
  119. void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
  120. {
  121. BUG_ON(vma->anon_vma != next->anon_vma);
  122. list_del(&next->anon_vma_node);
  123. }
  124. void __anon_vma_link(struct vm_area_struct *vma)
  125. {
  126. struct anon_vma *anon_vma = vma->anon_vma;
  127. if (anon_vma)
  128. list_add_tail(&vma->anon_vma_node, &anon_vma->head);
  129. }
  130. void anon_vma_link(struct vm_area_struct *vma)
  131. {
  132. struct anon_vma *anon_vma = vma->anon_vma;
  133. if (anon_vma) {
  134. spin_lock(&anon_vma->lock);
  135. list_add_tail(&vma->anon_vma_node, &anon_vma->head);
  136. spin_unlock(&anon_vma->lock);
  137. }
  138. }
  139. void anon_vma_unlink(struct vm_area_struct *vma)
  140. {
  141. struct anon_vma *anon_vma = vma->anon_vma;
  142. int empty;
  143. if (!anon_vma)
  144. return;
  145. spin_lock(&anon_vma->lock);
  146. list_del(&vma->anon_vma_node);
  147. /* We must garbage collect the anon_vma if it's empty */
  148. empty = list_empty(&anon_vma->head);
  149. spin_unlock(&anon_vma->lock);
  150. if (empty)
  151. anon_vma_free(anon_vma);
  152. }
  153. static void anon_vma_ctor(void *data)
  154. {
  155. struct anon_vma *anon_vma = data;
  156. spin_lock_init(&anon_vma->lock);
  157. INIT_LIST_HEAD(&anon_vma->head);
  158. }
  159. void __init anon_vma_init(void)
  160. {
  161. anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
  162. 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
  163. }
  164. /*
  165. * Getting a lock on a stable anon_vma from a page off the LRU is
  166. * tricky: page_lock_anon_vma rely on RCU to guard against the races.
  167. */
  168. static struct anon_vma *page_lock_anon_vma(struct page *page)
  169. {
  170. struct anon_vma *anon_vma;
  171. unsigned long anon_mapping;
  172. rcu_read_lock();
  173. anon_mapping = (unsigned long) page->mapping;
  174. if (!(anon_mapping & PAGE_MAPPING_ANON))
  175. goto out;
  176. if (!page_mapped(page))
  177. goto out;
  178. anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
  179. spin_lock(&anon_vma->lock);
  180. return anon_vma;
  181. out:
  182. rcu_read_unlock();
  183. return NULL;
  184. }
  185. static void page_unlock_anon_vma(struct anon_vma *anon_vma)
  186. {
  187. spin_unlock(&anon_vma->lock);
  188. rcu_read_unlock();
  189. }
  190. /*
  191. * At what user virtual address is page expected in @vma?
  192. * Returns virtual address or -EFAULT if page's index/offset is not
  193. * within the range mapped the @vma.
  194. */
  195. static inline unsigned long
  196. vma_address(struct page *page, struct vm_area_struct *vma)
  197. {
  198. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  199. unsigned long address;
  200. address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
  201. if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
  202. /* page should be within @vma mapping range */
  203. return -EFAULT;
  204. }
  205. return address;
  206. }
  207. /*
  208. * At what user virtual address is page expected in vma? checking that the
  209. * page matches the vma: currently only used on anon pages, by unuse_vma;
  210. */
  211. unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
  212. {
  213. if (PageAnon(page)) {
  214. if ((void *)vma->anon_vma !=
  215. (void *)page->mapping - PAGE_MAPPING_ANON)
  216. return -EFAULT;
  217. } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
  218. if (!vma->vm_file ||
  219. vma->vm_file->f_mapping != page->mapping)
  220. return -EFAULT;
  221. } else
  222. return -EFAULT;
  223. return vma_address(page, vma);
  224. }
  225. /*
  226. * Check that @page is mapped at @address into @mm.
  227. *
  228. * If @sync is false, page_check_address may perform a racy check to avoid
  229. * the page table lock when the pte is not present (helpful when reclaiming
  230. * highly shared pages).
  231. *
  232. * On success returns with pte mapped and locked.
  233. */
  234. pte_t *page_check_address(struct page *page, struct mm_struct *mm,
  235. unsigned long address, spinlock_t **ptlp, int sync)
  236. {
  237. pgd_t *pgd;
  238. pud_t *pud;
  239. pmd_t *pmd;
  240. pte_t *pte;
  241. spinlock_t *ptl;
  242. pgd = pgd_offset(mm, address);
  243. if (!pgd_present(*pgd))
  244. return NULL;
  245. pud = pud_offset(pgd, address);
  246. if (!pud_present(*pud))
  247. return NULL;
  248. pmd = pmd_offset(pud, address);
  249. if (!pmd_present(*pmd))
  250. return NULL;
  251. pte = pte_offset_map(pmd, address);
  252. /* Make a quick check before getting the lock */
  253. if (!sync && !pte_present(*pte)) {
  254. pte_unmap(pte);
  255. return NULL;
  256. }
  257. ptl = pte_lockptr(mm, pmd);
  258. spin_lock(ptl);
  259. if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
  260. *ptlp = ptl;
  261. return pte;
  262. }
  263. pte_unmap_unlock(pte, ptl);
  264. return NULL;
  265. }
  266. /**
  267. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  268. * @page: the page to test
  269. * @vma: the VMA to test
  270. *
  271. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  272. * if the page is not mapped into the page tables of this VMA. Only
  273. * valid for normal file or anonymous VMAs.
  274. */
  275. static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  276. {
  277. unsigned long address;
  278. pte_t *pte;
  279. spinlock_t *ptl;
  280. address = vma_address(page, vma);
  281. if (address == -EFAULT) /* out of vma range */
  282. return 0;
  283. pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
  284. if (!pte) /* the page is not in this mm */
  285. return 0;
  286. pte_unmap_unlock(pte, ptl);
  287. return 1;
  288. }
  289. /*
  290. * Subfunctions of page_referenced: page_referenced_one called
  291. * repeatedly from either page_referenced_anon or page_referenced_file.
  292. */
  293. static int page_referenced_one(struct page *page,
  294. struct vm_area_struct *vma, unsigned int *mapcount)
  295. {
  296. struct mm_struct *mm = vma->vm_mm;
  297. unsigned long address;
  298. pte_t *pte;
  299. spinlock_t *ptl;
  300. int referenced = 0;
  301. address = vma_address(page, vma);
  302. if (address == -EFAULT)
  303. goto out;
  304. pte = page_check_address(page, mm, address, &ptl, 0);
  305. if (!pte)
  306. goto out;
  307. /*
  308. * Don't want to elevate referenced for mlocked page that gets this far,
  309. * in order that it progresses to try_to_unmap and is moved to the
  310. * unevictable list.
  311. */
  312. if (vma->vm_flags & VM_LOCKED) {
  313. *mapcount = 1; /* break early from loop */
  314. goto out_unmap;
  315. }
  316. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  317. /*
  318. * Don't treat a reference through a sequentially read
  319. * mapping as such. If the page has been used in
  320. * another mapping, we will catch it; if this other
  321. * mapping is already gone, the unmap path will have
  322. * set PG_referenced or activated the page.
  323. */
  324. if (likely(!VM_SequentialReadHint(vma)))
  325. referenced++;
  326. }
  327. /* Pretend the page is referenced if the task has the
  328. swap token and is in the middle of a page fault. */
  329. if (mm != current->mm && has_swap_token(mm) &&
  330. rwsem_is_locked(&mm->mmap_sem))
  331. referenced++;
  332. out_unmap:
  333. (*mapcount)--;
  334. pte_unmap_unlock(pte, ptl);
  335. out:
  336. return referenced;
  337. }
  338. static int page_referenced_anon(struct page *page,
  339. struct mem_cgroup *mem_cont)
  340. {
  341. unsigned int mapcount;
  342. struct anon_vma *anon_vma;
  343. struct vm_area_struct *vma;
  344. int referenced = 0;
  345. anon_vma = page_lock_anon_vma(page);
  346. if (!anon_vma)
  347. return referenced;
  348. mapcount = page_mapcount(page);
  349. list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
  350. /*
  351. * If we are reclaiming on behalf of a cgroup, skip
  352. * counting on behalf of references from different
  353. * cgroups
  354. */
  355. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  356. continue;
  357. referenced += page_referenced_one(page, vma, &mapcount);
  358. if (!mapcount)
  359. break;
  360. }
  361. page_unlock_anon_vma(anon_vma);
  362. return referenced;
  363. }
  364. /**
  365. * page_referenced_file - referenced check for object-based rmap
  366. * @page: the page we're checking references on.
  367. * @mem_cont: target memory controller
  368. *
  369. * For an object-based mapped page, find all the places it is mapped and
  370. * check/clear the referenced flag. This is done by following the page->mapping
  371. * pointer, then walking the chain of vmas it holds. It returns the number
  372. * of references it found.
  373. *
  374. * This function is only called from page_referenced for object-based pages.
  375. */
  376. static int page_referenced_file(struct page *page,
  377. struct mem_cgroup *mem_cont)
  378. {
  379. unsigned int mapcount;
  380. struct address_space *mapping = page->mapping;
  381. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  382. struct vm_area_struct *vma;
  383. struct prio_tree_iter iter;
  384. int referenced = 0;
  385. /*
  386. * The caller's checks on page->mapping and !PageAnon have made
  387. * sure that this is a file page: the check for page->mapping
  388. * excludes the case just before it gets set on an anon page.
  389. */
  390. BUG_ON(PageAnon(page));
  391. /*
  392. * The page lock not only makes sure that page->mapping cannot
  393. * suddenly be NULLified by truncation, it makes sure that the
  394. * structure at mapping cannot be freed and reused yet,
  395. * so we can safely take mapping->i_mmap_lock.
  396. */
  397. BUG_ON(!PageLocked(page));
  398. spin_lock(&mapping->i_mmap_lock);
  399. /*
  400. * i_mmap_lock does not stabilize mapcount at all, but mapcount
  401. * is more likely to be accurate if we note it after spinning.
  402. */
  403. mapcount = page_mapcount(page);
  404. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  405. /*
  406. * If we are reclaiming on behalf of a cgroup, skip
  407. * counting on behalf of references from different
  408. * cgroups
  409. */
  410. if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
  411. continue;
  412. referenced += page_referenced_one(page, vma, &mapcount);
  413. if (!mapcount)
  414. break;
  415. }
  416. spin_unlock(&mapping->i_mmap_lock);
  417. return referenced;
  418. }
  419. /**
  420. * page_referenced - test if the page was referenced
  421. * @page: the page to test
  422. * @is_locked: caller holds lock on the page
  423. * @mem_cont: target memory controller
  424. *
  425. * Quick test_and_clear_referenced for all mappings to a page,
  426. * returns the number of ptes which referenced the page.
  427. */
  428. int page_referenced(struct page *page, int is_locked,
  429. struct mem_cgroup *mem_cont)
  430. {
  431. int referenced = 0;
  432. if (TestClearPageReferenced(page))
  433. referenced++;
  434. if (page_mapped(page) && page->mapping) {
  435. if (PageAnon(page))
  436. referenced += page_referenced_anon(page, mem_cont);
  437. else if (is_locked)
  438. referenced += page_referenced_file(page, mem_cont);
  439. else if (!trylock_page(page))
  440. referenced++;
  441. else {
  442. if (page->mapping)
  443. referenced +=
  444. page_referenced_file(page, mem_cont);
  445. unlock_page(page);
  446. }
  447. }
  448. if (page_test_and_clear_young(page))
  449. referenced++;
  450. return referenced;
  451. }
  452. static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
  453. {
  454. struct mm_struct *mm = vma->vm_mm;
  455. unsigned long address;
  456. pte_t *pte;
  457. spinlock_t *ptl;
  458. int ret = 0;
  459. address = vma_address(page, vma);
  460. if (address == -EFAULT)
  461. goto out;
  462. pte = page_check_address(page, mm, address, &ptl, 1);
  463. if (!pte)
  464. goto out;
  465. if (pte_dirty(*pte) || pte_write(*pte)) {
  466. pte_t entry;
  467. flush_cache_page(vma, address, pte_pfn(*pte));
  468. entry = ptep_clear_flush_notify(vma, address, pte);
  469. entry = pte_wrprotect(entry);
  470. entry = pte_mkclean(entry);
  471. set_pte_at(mm, address, pte, entry);
  472. ret = 1;
  473. }
  474. pte_unmap_unlock(pte, ptl);
  475. out:
  476. return ret;
  477. }
  478. static int page_mkclean_file(struct address_space *mapping, struct page *page)
  479. {
  480. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  481. struct vm_area_struct *vma;
  482. struct prio_tree_iter iter;
  483. int ret = 0;
  484. BUG_ON(PageAnon(page));
  485. spin_lock(&mapping->i_mmap_lock);
  486. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  487. if (vma->vm_flags & VM_SHARED)
  488. ret += page_mkclean_one(page, vma);
  489. }
  490. spin_unlock(&mapping->i_mmap_lock);
  491. return ret;
  492. }
  493. int page_mkclean(struct page *page)
  494. {
  495. int ret = 0;
  496. BUG_ON(!PageLocked(page));
  497. if (page_mapped(page)) {
  498. struct address_space *mapping = page_mapping(page);
  499. if (mapping) {
  500. ret = page_mkclean_file(mapping, page);
  501. if (page_test_dirty(page)) {
  502. page_clear_dirty(page);
  503. ret = 1;
  504. }
  505. }
  506. }
  507. return ret;
  508. }
  509. EXPORT_SYMBOL_GPL(page_mkclean);
  510. /**
  511. * __page_set_anon_rmap - setup new anonymous rmap
  512. * @page: the page to add the mapping to
  513. * @vma: the vm area in which the mapping is added
  514. * @address: the user virtual address mapped
  515. */
  516. static void __page_set_anon_rmap(struct page *page,
  517. struct vm_area_struct *vma, unsigned long address)
  518. {
  519. struct anon_vma *anon_vma = vma->anon_vma;
  520. BUG_ON(!anon_vma);
  521. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  522. page->mapping = (struct address_space *) anon_vma;
  523. page->index = linear_page_index(vma, address);
  524. /*
  525. * nr_mapped state can be updated without turning off
  526. * interrupts because it is not modified via interrupt.
  527. */
  528. __inc_zone_page_state(page, NR_ANON_PAGES);
  529. }
  530. /**
  531. * __page_check_anon_rmap - sanity check anonymous rmap addition
  532. * @page: the page to add the mapping to
  533. * @vma: the vm area in which the mapping is added
  534. * @address: the user virtual address mapped
  535. */
  536. static void __page_check_anon_rmap(struct page *page,
  537. struct vm_area_struct *vma, unsigned long address)
  538. {
  539. #ifdef CONFIG_DEBUG_VM
  540. /*
  541. * The page's anon-rmap details (mapping and index) are guaranteed to
  542. * be set up correctly at this point.
  543. *
  544. * We have exclusion against page_add_anon_rmap because the caller
  545. * always holds the page locked, except if called from page_dup_rmap,
  546. * in which case the page is already known to be setup.
  547. *
  548. * We have exclusion against page_add_new_anon_rmap because those pages
  549. * are initially only visible via the pagetables, and the pte is locked
  550. * over the call to page_add_new_anon_rmap.
  551. */
  552. struct anon_vma *anon_vma = vma->anon_vma;
  553. anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  554. BUG_ON(page->mapping != (struct address_space *)anon_vma);
  555. BUG_ON(page->index != linear_page_index(vma, address));
  556. #endif
  557. }
  558. /**
  559. * page_add_anon_rmap - add pte mapping to an anonymous page
  560. * @page: the page to add the mapping to
  561. * @vma: the vm area in which the mapping is added
  562. * @address: the user virtual address mapped
  563. *
  564. * The caller needs to hold the pte lock and the page must be locked.
  565. */
  566. void page_add_anon_rmap(struct page *page,
  567. struct vm_area_struct *vma, unsigned long address)
  568. {
  569. VM_BUG_ON(!PageLocked(page));
  570. VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  571. if (atomic_inc_and_test(&page->_mapcount))
  572. __page_set_anon_rmap(page, vma, address);
  573. else
  574. __page_check_anon_rmap(page, vma, address);
  575. }
  576. /**
  577. * page_add_new_anon_rmap - add pte mapping to a new anonymous page
  578. * @page: the page to add the mapping to
  579. * @vma: the vm area in which the mapping is added
  580. * @address: the user virtual address mapped
  581. *
  582. * Same as page_add_anon_rmap but must only be called on *new* pages.
  583. * This means the inc-and-test can be bypassed.
  584. * Page does not have to be locked.
  585. */
  586. void page_add_new_anon_rmap(struct page *page,
  587. struct vm_area_struct *vma, unsigned long address)
  588. {
  589. VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
  590. SetPageSwapBacked(page);
  591. atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
  592. __page_set_anon_rmap(page, vma, address);
  593. if (page_evictable(page, vma))
  594. lru_cache_add_lru(page, LRU_ACTIVE_ANON);
  595. else
  596. add_page_to_unevictable_list(page);
  597. }
  598. /**
  599. * page_add_file_rmap - add pte mapping to a file page
  600. * @page: the page to add the mapping to
  601. *
  602. * The caller needs to hold the pte lock.
  603. */
  604. void page_add_file_rmap(struct page *page)
  605. {
  606. if (atomic_inc_and_test(&page->_mapcount))
  607. __inc_zone_page_state(page, NR_FILE_MAPPED);
  608. }
  609. #ifdef CONFIG_DEBUG_VM
  610. /**
  611. * page_dup_rmap - duplicate pte mapping to a page
  612. * @page: the page to add the mapping to
  613. * @vma: the vm area being duplicated
  614. * @address: the user virtual address mapped
  615. *
  616. * For copy_page_range only: minimal extract from page_add_file_rmap /
  617. * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
  618. * quicker.
  619. *
  620. * The caller needs to hold the pte lock.
  621. */
  622. void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
  623. {
  624. BUG_ON(page_mapcount(page) == 0);
  625. if (PageAnon(page))
  626. __page_check_anon_rmap(page, vma, address);
  627. atomic_inc(&page->_mapcount);
  628. }
  629. #endif
  630. /**
  631. * page_remove_rmap - take down pte mapping from a page
  632. * @page: page to remove mapping from
  633. * @vma: the vm area in which the mapping is removed
  634. *
  635. * The caller needs to hold the pte lock.
  636. */
  637. void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
  638. {
  639. if (atomic_add_negative(-1, &page->_mapcount)) {
  640. /*
  641. * Now that the last pte has gone, s390 must transfer dirty
  642. * flag from storage key to struct page. We can usually skip
  643. * this if the page is anon, so about to be freed; but perhaps
  644. * not if it's in swapcache - there might be another pte slot
  645. * containing the swap entry, but page not yet written to swap.
  646. */
  647. if ((!PageAnon(page) || PageSwapCache(page)) &&
  648. page_test_dirty(page)) {
  649. page_clear_dirty(page);
  650. set_page_dirty(page);
  651. }
  652. if (PageAnon(page))
  653. mem_cgroup_uncharge_page(page);
  654. __dec_zone_page_state(page,
  655. PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
  656. /*
  657. * It would be tidy to reset the PageAnon mapping here,
  658. * but that might overwrite a racing page_add_anon_rmap
  659. * which increments mapcount after us but sets mapping
  660. * before us: so leave the reset to free_hot_cold_page,
  661. * and remember that it's only reliable while mapped.
  662. * Leaving it set also helps swapoff to reinstate ptes
  663. * faster for those pages still in swapcache.
  664. */
  665. }
  666. }
  667. /*
  668. * Subfunctions of try_to_unmap: try_to_unmap_one called
  669. * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  670. */
  671. static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
  672. int migration)
  673. {
  674. struct mm_struct *mm = vma->vm_mm;
  675. unsigned long address;
  676. pte_t *pte;
  677. pte_t pteval;
  678. spinlock_t *ptl;
  679. int ret = SWAP_AGAIN;
  680. address = vma_address(page, vma);
  681. if (address == -EFAULT)
  682. goto out;
  683. pte = page_check_address(page, mm, address, &ptl, 0);
  684. if (!pte)
  685. goto out;
  686. /*
  687. * If the page is mlock()d, we cannot swap it out.
  688. * If it's recently referenced (perhaps page_referenced
  689. * skipped over this mm) then we should reactivate it.
  690. */
  691. if (!migration) {
  692. if (vma->vm_flags & VM_LOCKED) {
  693. ret = SWAP_MLOCK;
  694. goto out_unmap;
  695. }
  696. if (ptep_clear_flush_young_notify(vma, address, pte)) {
  697. ret = SWAP_FAIL;
  698. goto out_unmap;
  699. }
  700. }
  701. /* Nuke the page table entry. */
  702. flush_cache_page(vma, address, page_to_pfn(page));
  703. pteval = ptep_clear_flush_notify(vma, address, pte);
  704. /* Move the dirty bit to the physical page now the pte is gone. */
  705. if (pte_dirty(pteval))
  706. set_page_dirty(page);
  707. /* Update high watermark before we lower rss */
  708. update_hiwater_rss(mm);
  709. if (PageAnon(page)) {
  710. swp_entry_t entry = { .val = page_private(page) };
  711. if (PageSwapCache(page)) {
  712. /*
  713. * Store the swap location in the pte.
  714. * See handle_pte_fault() ...
  715. */
  716. swap_duplicate(entry);
  717. if (list_empty(&mm->mmlist)) {
  718. spin_lock(&mmlist_lock);
  719. if (list_empty(&mm->mmlist))
  720. list_add(&mm->mmlist, &init_mm.mmlist);
  721. spin_unlock(&mmlist_lock);
  722. }
  723. dec_mm_counter(mm, anon_rss);
  724. } else if (PAGE_MIGRATION) {
  725. /*
  726. * Store the pfn of the page in a special migration
  727. * pte. do_swap_page() will wait until the migration
  728. * pte is removed and then restart fault handling.
  729. */
  730. BUG_ON(!migration);
  731. entry = make_migration_entry(page, pte_write(pteval));
  732. }
  733. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  734. BUG_ON(pte_file(*pte));
  735. } else if (PAGE_MIGRATION && migration) {
  736. /* Establish migration entry for a file page */
  737. swp_entry_t entry;
  738. entry = make_migration_entry(page, pte_write(pteval));
  739. set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
  740. } else
  741. dec_mm_counter(mm, file_rss);
  742. page_remove_rmap(page, vma);
  743. page_cache_release(page);
  744. out_unmap:
  745. pte_unmap_unlock(pte, ptl);
  746. out:
  747. return ret;
  748. }
  749. /*
  750. * objrmap doesn't work for nonlinear VMAs because the assumption that
  751. * offset-into-file correlates with offset-into-virtual-addresses does not hold.
  752. * Consequently, given a particular page and its ->index, we cannot locate the
  753. * ptes which are mapping that page without an exhaustive linear search.
  754. *
  755. * So what this code does is a mini "virtual scan" of each nonlinear VMA which
  756. * maps the file to which the target page belongs. The ->vm_private_data field
  757. * holds the current cursor into that scan. Successive searches will circulate
  758. * around the vma's virtual address space.
  759. *
  760. * So as more replacement pressure is applied to the pages in a nonlinear VMA,
  761. * more scanning pressure is placed against them as well. Eventually pages
  762. * will become fully unmapped and are eligible for eviction.
  763. *
  764. * For very sparsely populated VMAs this is a little inefficient - chances are
  765. * there there won't be many ptes located within the scan cluster. In this case
  766. * maybe we could scan further - to the end of the pte page, perhaps.
  767. *
  768. * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
  769. * acquire it without blocking. If vma locked, mlock the pages in the cluster,
  770. * rather than unmapping them. If we encounter the "check_page" that vmscan is
  771. * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
  772. */
  773. #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
  774. #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
  775. static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
  776. struct vm_area_struct *vma, struct page *check_page)
  777. {
  778. struct mm_struct *mm = vma->vm_mm;
  779. pgd_t *pgd;
  780. pud_t *pud;
  781. pmd_t *pmd;
  782. pte_t *pte;
  783. pte_t pteval;
  784. spinlock_t *ptl;
  785. struct page *page;
  786. unsigned long address;
  787. unsigned long end;
  788. int ret = SWAP_AGAIN;
  789. int locked_vma = 0;
  790. address = (vma->vm_start + cursor) & CLUSTER_MASK;
  791. end = address + CLUSTER_SIZE;
  792. if (address < vma->vm_start)
  793. address = vma->vm_start;
  794. if (end > vma->vm_end)
  795. end = vma->vm_end;
  796. pgd = pgd_offset(mm, address);
  797. if (!pgd_present(*pgd))
  798. return ret;
  799. pud = pud_offset(pgd, address);
  800. if (!pud_present(*pud))
  801. return ret;
  802. pmd = pmd_offset(pud, address);
  803. if (!pmd_present(*pmd))
  804. return ret;
  805. /*
  806. * MLOCK_PAGES => feature is configured.
  807. * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
  808. * keep the sem while scanning the cluster for mlocking pages.
  809. */
  810. if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
  811. locked_vma = (vma->vm_flags & VM_LOCKED);
  812. if (!locked_vma)
  813. up_read(&vma->vm_mm->mmap_sem); /* don't need it */
  814. }
  815. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  816. /* Update high watermark before we lower rss */
  817. update_hiwater_rss(mm);
  818. for (; address < end; pte++, address += PAGE_SIZE) {
  819. if (!pte_present(*pte))
  820. continue;
  821. page = vm_normal_page(vma, address, *pte);
  822. BUG_ON(!page || PageAnon(page));
  823. if (locked_vma) {
  824. mlock_vma_page(page); /* no-op if already mlocked */
  825. if (page == check_page)
  826. ret = SWAP_MLOCK;
  827. continue; /* don't unmap */
  828. }
  829. if (ptep_clear_flush_young_notify(vma, address, pte))
  830. continue;
  831. /* Nuke the page table entry. */
  832. flush_cache_page(vma, address, pte_pfn(*pte));
  833. pteval = ptep_clear_flush_notify(vma, address, pte);
  834. /* If nonlinear, store the file page offset in the pte. */
  835. if (page->index != linear_page_index(vma, address))
  836. set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
  837. /* Move the dirty bit to the physical page now the pte is gone. */
  838. if (pte_dirty(pteval))
  839. set_page_dirty(page);
  840. page_remove_rmap(page, vma);
  841. page_cache_release(page);
  842. dec_mm_counter(mm, file_rss);
  843. (*mapcount)--;
  844. }
  845. pte_unmap_unlock(pte - 1, ptl);
  846. if (locked_vma)
  847. up_read(&vma->vm_mm->mmap_sem);
  848. return ret;
  849. }
  850. /*
  851. * common handling for pages mapped in VM_LOCKED vmas
  852. */
  853. static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
  854. {
  855. int mlocked = 0;
  856. if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
  857. if (vma->vm_flags & VM_LOCKED) {
  858. mlock_vma_page(page);
  859. mlocked++; /* really mlocked the page */
  860. }
  861. up_read(&vma->vm_mm->mmap_sem);
  862. }
  863. return mlocked;
  864. }
  865. /**
  866. * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  867. * rmap method
  868. * @page: the page to unmap/unlock
  869. * @unlock: request for unlock rather than unmap [unlikely]
  870. * @migration: unmapping for migration - ignored if @unlock
  871. *
  872. * Find all the mappings of a page using the mapping pointer and the vma chains
  873. * contained in the anon_vma struct it points to.
  874. *
  875. * This function is only called from try_to_unmap/try_to_munlock for
  876. * anonymous pages.
  877. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  878. * where the page was found will be held for write. So, we won't recheck
  879. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  880. * 'LOCKED.
  881. */
  882. static int try_to_unmap_anon(struct page *page, int unlock, int migration)
  883. {
  884. struct anon_vma *anon_vma;
  885. struct vm_area_struct *vma;
  886. unsigned int mlocked = 0;
  887. int ret = SWAP_AGAIN;
  888. if (MLOCK_PAGES && unlikely(unlock))
  889. ret = SWAP_SUCCESS; /* default for try_to_munlock() */
  890. anon_vma = page_lock_anon_vma(page);
  891. if (!anon_vma)
  892. return ret;
  893. list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
  894. if (MLOCK_PAGES && unlikely(unlock)) {
  895. if (!((vma->vm_flags & VM_LOCKED) &&
  896. page_mapped_in_vma(page, vma)))
  897. continue; /* must visit all unlocked vmas */
  898. ret = SWAP_MLOCK; /* saw at least one mlocked vma */
  899. } else {
  900. ret = try_to_unmap_one(page, vma, migration);
  901. if (ret == SWAP_FAIL || !page_mapped(page))
  902. break;
  903. }
  904. if (ret == SWAP_MLOCK) {
  905. mlocked = try_to_mlock_page(page, vma);
  906. if (mlocked)
  907. break; /* stop if actually mlocked page */
  908. }
  909. }
  910. page_unlock_anon_vma(anon_vma);
  911. if (mlocked)
  912. ret = SWAP_MLOCK; /* actually mlocked the page */
  913. else if (ret == SWAP_MLOCK)
  914. ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
  915. return ret;
  916. }
  917. /**
  918. * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
  919. * @page: the page to unmap/unlock
  920. * @unlock: request for unlock rather than unmap [unlikely]
  921. * @migration: unmapping for migration - ignored if @unlock
  922. *
  923. * Find all the mappings of a page using the mapping pointer and the vma chains
  924. * contained in the address_space struct it points to.
  925. *
  926. * This function is only called from try_to_unmap/try_to_munlock for
  927. * object-based pages.
  928. * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
  929. * where the page was found will be held for write. So, we won't recheck
  930. * vm_flags for that VMA. That should be OK, because that vma shouldn't be
  931. * 'LOCKED.
  932. */
  933. static int try_to_unmap_file(struct page *page, int unlock, int migration)
  934. {
  935. struct address_space *mapping = page->mapping;
  936. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  937. struct vm_area_struct *vma;
  938. struct prio_tree_iter iter;
  939. int ret = SWAP_AGAIN;
  940. unsigned long cursor;
  941. unsigned long max_nl_cursor = 0;
  942. unsigned long max_nl_size = 0;
  943. unsigned int mapcount;
  944. unsigned int mlocked = 0;
  945. if (MLOCK_PAGES && unlikely(unlock))
  946. ret = SWAP_SUCCESS; /* default for try_to_munlock() */
  947. spin_lock(&mapping->i_mmap_lock);
  948. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  949. if (MLOCK_PAGES && unlikely(unlock)) {
  950. if (!(vma->vm_flags & VM_LOCKED))
  951. continue; /* must visit all vmas */
  952. ret = SWAP_MLOCK;
  953. } else {
  954. ret = try_to_unmap_one(page, vma, migration);
  955. if (ret == SWAP_FAIL || !page_mapped(page))
  956. goto out;
  957. }
  958. if (ret == SWAP_MLOCK) {
  959. mlocked = try_to_mlock_page(page, vma);
  960. if (mlocked)
  961. break; /* stop if actually mlocked page */
  962. }
  963. }
  964. if (mlocked)
  965. goto out;
  966. if (list_empty(&mapping->i_mmap_nonlinear))
  967. goto out;
  968. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  969. shared.vm_set.list) {
  970. if (MLOCK_PAGES && unlikely(unlock)) {
  971. if (!(vma->vm_flags & VM_LOCKED))
  972. continue; /* must visit all vmas */
  973. ret = SWAP_MLOCK; /* leave mlocked == 0 */
  974. goto out; /* no need to look further */
  975. }
  976. if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED))
  977. continue;
  978. cursor = (unsigned long) vma->vm_private_data;
  979. if (cursor > max_nl_cursor)
  980. max_nl_cursor = cursor;
  981. cursor = vma->vm_end - vma->vm_start;
  982. if (cursor > max_nl_size)
  983. max_nl_size = cursor;
  984. }
  985. if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
  986. ret = SWAP_FAIL;
  987. goto out;
  988. }
  989. /*
  990. * We don't try to search for this page in the nonlinear vmas,
  991. * and page_referenced wouldn't have found it anyway. Instead
  992. * just walk the nonlinear vmas trying to age and unmap some.
  993. * The mapcount of the page we came in with is irrelevant,
  994. * but even so use it as a guide to how hard we should try?
  995. */
  996. mapcount = page_mapcount(page);
  997. if (!mapcount)
  998. goto out;
  999. cond_resched_lock(&mapping->i_mmap_lock);
  1000. max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
  1001. if (max_nl_cursor == 0)
  1002. max_nl_cursor = CLUSTER_SIZE;
  1003. do {
  1004. list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
  1005. shared.vm_set.list) {
  1006. if (!MLOCK_PAGES && !migration &&
  1007. (vma->vm_flags & VM_LOCKED))
  1008. continue;
  1009. cursor = (unsigned long) vma->vm_private_data;
  1010. while ( cursor < max_nl_cursor &&
  1011. cursor < vma->vm_end - vma->vm_start) {
  1012. ret = try_to_unmap_cluster(cursor, &mapcount,
  1013. vma, page);
  1014. if (ret == SWAP_MLOCK)
  1015. mlocked = 2; /* to return below */
  1016. cursor += CLUSTER_SIZE;
  1017. vma->vm_private_data = (void *) cursor;
  1018. if ((int)mapcount <= 0)
  1019. goto out;
  1020. }
  1021. vma->vm_private_data = (void *) max_nl_cursor;
  1022. }
  1023. cond_resched_lock(&mapping->i_mmap_lock);
  1024. max_nl_cursor += CLUSTER_SIZE;
  1025. } while (max_nl_cursor <= max_nl_size);
  1026. /*
  1027. * Don't loop forever (perhaps all the remaining pages are
  1028. * in locked vmas). Reset cursor on all unreserved nonlinear
  1029. * vmas, now forgetting on which ones it had fallen behind.
  1030. */
  1031. list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  1032. vma->vm_private_data = NULL;
  1033. out:
  1034. spin_unlock(&mapping->i_mmap_lock);
  1035. if (mlocked)
  1036. ret = SWAP_MLOCK; /* actually mlocked the page */
  1037. else if (ret == SWAP_MLOCK)
  1038. ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
  1039. return ret;
  1040. }
  1041. /**
  1042. * try_to_unmap - try to remove all page table mappings to a page
  1043. * @page: the page to get unmapped
  1044. * @migration: migration flag
  1045. *
  1046. * Tries to remove all the page table entries which are mapping this
  1047. * page, used in the pageout path. Caller must hold the page lock.
  1048. * Return values are:
  1049. *
  1050. * SWAP_SUCCESS - we succeeded in removing all mappings
  1051. * SWAP_AGAIN - we missed a mapping, try again later
  1052. * SWAP_FAIL - the page is unswappable
  1053. * SWAP_MLOCK - page is mlocked.
  1054. */
  1055. int try_to_unmap(struct page *page, int migration)
  1056. {
  1057. int ret;
  1058. BUG_ON(!PageLocked(page));
  1059. if (PageAnon(page))
  1060. ret = try_to_unmap_anon(page, 0, migration);
  1061. else
  1062. ret = try_to_unmap_file(page, 0, migration);
  1063. if (ret != SWAP_MLOCK && !page_mapped(page))
  1064. ret = SWAP_SUCCESS;
  1065. return ret;
  1066. }
  1067. #ifdef CONFIG_UNEVICTABLE_LRU
  1068. /**
  1069. * try_to_munlock - try to munlock a page
  1070. * @page: the page to be munlocked
  1071. *
  1072. * Called from munlock code. Checks all of the VMAs mapping the page
  1073. * to make sure nobody else has this page mlocked. The page will be
  1074. * returned with PG_mlocked cleared if no other vmas have it mlocked.
  1075. *
  1076. * Return values are:
  1077. *
  1078. * SWAP_SUCCESS - no vma's holding page mlocked.
  1079. * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
  1080. * SWAP_MLOCK - page is now mlocked.
  1081. */
  1082. int try_to_munlock(struct page *page)
  1083. {
  1084. VM_BUG_ON(!PageLocked(page) || PageLRU(page));
  1085. if (PageAnon(page))
  1086. return try_to_unmap_anon(page, 1, 0);
  1087. else
  1088. return try_to_unmap_file(page, 1, 0);
  1089. }
  1090. #endif