swap.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the operation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/percpu_counter.h>
  26. #include <linux/percpu.h>
  27. #include <linux/cpu.h>
  28. #include <linux/notifier.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/memcontrol.h>
  31. #include <linux/gfp.h>
  32. #include "internal.h"
  33. /* How many pages do we try to swap or page in/out together? */
  34. int page_cluster;
  35. static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  36. static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  37. /*
  38. * This path almost never happens for VM activity - pages are normally
  39. * freed via pagevecs. But it gets used by networking.
  40. */
  41. static void __page_cache_release(struct page *page)
  42. {
  43. if (PageLRU(page)) {
  44. unsigned long flags;
  45. struct zone *zone = page_zone(page);
  46. spin_lock_irqsave(&zone->lru_lock, flags);
  47. VM_BUG_ON(!PageLRU(page));
  48. __ClearPageLRU(page);
  49. del_page_from_lru(zone, page);
  50. spin_unlock_irqrestore(&zone->lru_lock, flags);
  51. }
  52. }
  53. static void __put_single_page(struct page *page)
  54. {
  55. __page_cache_release(page);
  56. free_hot_cold_page(page, 0);
  57. }
  58. static void __put_compound_page(struct page *page)
  59. {
  60. compound_page_dtor *dtor;
  61. __page_cache_release(page);
  62. dtor = get_compound_page_dtor(page);
  63. (*dtor)(page);
  64. }
  65. static void put_compound_page(struct page *page)
  66. {
  67. if (unlikely(PageTail(page))) {
  68. /* __split_huge_page_refcount can run under us */
  69. struct page *page_head = page->first_page;
  70. smp_rmb();
  71. /*
  72. * If PageTail is still set after smp_rmb() we can be sure
  73. * that the page->first_page we read wasn't a dangling pointer.
  74. * See __split_huge_page_refcount() smp_wmb().
  75. */
  76. if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
  77. unsigned long flags;
  78. /*
  79. * Verify that our page_head wasn't converted
  80. * to a a regular page before we got a
  81. * reference on it.
  82. */
  83. if (unlikely(!PageHead(page_head))) {
  84. /* PageHead is cleared after PageTail */
  85. smp_rmb();
  86. VM_BUG_ON(PageTail(page));
  87. goto out_put_head;
  88. }
  89. /*
  90. * Only run compound_lock on a valid PageHead,
  91. * after having it pinned with
  92. * get_page_unless_zero() above.
  93. */
  94. smp_mb();
  95. /* page_head wasn't a dangling pointer */
  96. flags = compound_lock_irqsave(page_head);
  97. if (unlikely(!PageTail(page))) {
  98. /* __split_huge_page_refcount run before us */
  99. compound_unlock_irqrestore(page_head, flags);
  100. VM_BUG_ON(PageHead(page_head));
  101. out_put_head:
  102. if (put_page_testzero(page_head))
  103. __put_single_page(page_head);
  104. out_put_single:
  105. if (put_page_testzero(page))
  106. __put_single_page(page);
  107. return;
  108. }
  109. VM_BUG_ON(page_head != page->first_page);
  110. /*
  111. * We can release the refcount taken by
  112. * get_page_unless_zero now that
  113. * split_huge_page_refcount is blocked on the
  114. * compound_lock.
  115. */
  116. if (put_page_testzero(page_head))
  117. VM_BUG_ON(1);
  118. /* __split_huge_page_refcount will wait now */
  119. VM_BUG_ON(atomic_read(&page->_count) <= 0);
  120. atomic_dec(&page->_count);
  121. VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
  122. compound_unlock_irqrestore(page_head, flags);
  123. if (put_page_testzero(page_head)) {
  124. if (PageHead(page_head))
  125. __put_compound_page(page_head);
  126. else
  127. __put_single_page(page_head);
  128. }
  129. } else {
  130. /* page_head is a dangling pointer */
  131. VM_BUG_ON(PageTail(page));
  132. goto out_put_single;
  133. }
  134. } else if (put_page_testzero(page)) {
  135. if (PageHead(page))
  136. __put_compound_page(page);
  137. else
  138. __put_single_page(page);
  139. }
  140. }
  141. void put_page(struct page *page)
  142. {
  143. if (unlikely(PageCompound(page)))
  144. put_compound_page(page);
  145. else if (put_page_testzero(page))
  146. __put_single_page(page);
  147. }
  148. EXPORT_SYMBOL(put_page);
  149. /**
  150. * put_pages_list() - release a list of pages
  151. * @pages: list of pages threaded on page->lru
  152. *
  153. * Release a list of pages which are strung together on page.lru. Currently
  154. * used by read_cache_pages() and related error recovery code.
  155. */
  156. void put_pages_list(struct list_head *pages)
  157. {
  158. while (!list_empty(pages)) {
  159. struct page *victim;
  160. victim = list_entry(pages->prev, struct page, lru);
  161. list_del(&victim->lru);
  162. page_cache_release(victim);
  163. }
  164. }
  165. EXPORT_SYMBOL(put_pages_list);
  166. /*
  167. * pagevec_move_tail() must be called with IRQ disabled.
  168. * Otherwise this may cause nasty races.
  169. */
  170. static void pagevec_move_tail(struct pagevec *pvec)
  171. {
  172. int i;
  173. int pgmoved = 0;
  174. struct zone *zone = NULL;
  175. for (i = 0; i < pagevec_count(pvec); i++) {
  176. struct page *page = pvec->pages[i];
  177. struct zone *pagezone = page_zone(page);
  178. if (pagezone != zone) {
  179. if (zone)
  180. spin_unlock(&zone->lru_lock);
  181. zone = pagezone;
  182. spin_lock(&zone->lru_lock);
  183. }
  184. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  185. int lru = page_lru_base_type(page);
  186. list_move_tail(&page->lru, &zone->lru[lru].list);
  187. pgmoved++;
  188. }
  189. }
  190. if (zone)
  191. spin_unlock(&zone->lru_lock);
  192. __count_vm_events(PGROTATED, pgmoved);
  193. release_pages(pvec->pages, pvec->nr, pvec->cold);
  194. pagevec_reinit(pvec);
  195. }
  196. /*
  197. * Writeback is about to end against a page which has been marked for immediate
  198. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  199. * inactive list.
  200. */
  201. void rotate_reclaimable_page(struct page *page)
  202. {
  203. if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
  204. !PageUnevictable(page) && PageLRU(page)) {
  205. struct pagevec *pvec;
  206. unsigned long flags;
  207. page_cache_get(page);
  208. local_irq_save(flags);
  209. pvec = &__get_cpu_var(lru_rotate_pvecs);
  210. if (!pagevec_add(pvec, page))
  211. pagevec_move_tail(pvec);
  212. local_irq_restore(flags);
  213. }
  214. }
  215. static void update_page_reclaim_stat(struct zone *zone, struct page *page,
  216. int file, int rotated)
  217. {
  218. struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
  219. struct zone_reclaim_stat *memcg_reclaim_stat;
  220. memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
  221. reclaim_stat->recent_scanned[file]++;
  222. if (rotated)
  223. reclaim_stat->recent_rotated[file]++;
  224. if (!memcg_reclaim_stat)
  225. return;
  226. memcg_reclaim_stat->recent_scanned[file]++;
  227. if (rotated)
  228. memcg_reclaim_stat->recent_rotated[file]++;
  229. }
  230. /*
  231. * FIXME: speed this up?
  232. */
  233. void activate_page(struct page *page)
  234. {
  235. struct zone *zone = page_zone(page);
  236. spin_lock_irq(&zone->lru_lock);
  237. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  238. int file = page_is_file_cache(page);
  239. int lru = page_lru_base_type(page);
  240. del_page_from_lru_list(zone, page, lru);
  241. SetPageActive(page);
  242. lru += LRU_ACTIVE;
  243. add_page_to_lru_list(zone, page, lru);
  244. __count_vm_event(PGACTIVATE);
  245. update_page_reclaim_stat(zone, page, file, 1);
  246. }
  247. spin_unlock_irq(&zone->lru_lock);
  248. }
  249. /*
  250. * Mark a page as having seen activity.
  251. *
  252. * inactive,unreferenced -> inactive,referenced
  253. * inactive,referenced -> active,unreferenced
  254. * active,unreferenced -> active,referenced
  255. */
  256. void mark_page_accessed(struct page *page)
  257. {
  258. if (!PageActive(page) && !PageUnevictable(page) &&
  259. PageReferenced(page) && PageLRU(page)) {
  260. activate_page(page);
  261. ClearPageReferenced(page);
  262. } else if (!PageReferenced(page)) {
  263. SetPageReferenced(page);
  264. }
  265. }
  266. EXPORT_SYMBOL(mark_page_accessed);
  267. void __lru_cache_add(struct page *page, enum lru_list lru)
  268. {
  269. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
  270. page_cache_get(page);
  271. if (!pagevec_add(pvec, page))
  272. ____pagevec_lru_add(pvec, lru);
  273. put_cpu_var(lru_add_pvecs);
  274. }
  275. EXPORT_SYMBOL(__lru_cache_add);
  276. /**
  277. * lru_cache_add_lru - add a page to a page list
  278. * @page: the page to be added to the LRU.
  279. * @lru: the LRU list to which the page is added.
  280. */
  281. void lru_cache_add_lru(struct page *page, enum lru_list lru)
  282. {
  283. if (PageActive(page)) {
  284. VM_BUG_ON(PageUnevictable(page));
  285. ClearPageActive(page);
  286. } else if (PageUnevictable(page)) {
  287. VM_BUG_ON(PageActive(page));
  288. ClearPageUnevictable(page);
  289. }
  290. VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
  291. __lru_cache_add(page, lru);
  292. }
  293. /**
  294. * add_page_to_unevictable_list - add a page to the unevictable list
  295. * @page: the page to be added to the unevictable list
  296. *
  297. * Add page directly to its zone's unevictable list. To avoid races with
  298. * tasks that might be making the page evictable, through eg. munlock,
  299. * munmap or exit, while it's not on the lru, we want to add the page
  300. * while it's locked or otherwise "invisible" to other tasks. This is
  301. * difficult to do when using the pagevec cache, so bypass that.
  302. */
  303. void add_page_to_unevictable_list(struct page *page)
  304. {
  305. struct zone *zone = page_zone(page);
  306. spin_lock_irq(&zone->lru_lock);
  307. SetPageUnevictable(page);
  308. SetPageLRU(page);
  309. add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
  310. spin_unlock_irq(&zone->lru_lock);
  311. }
  312. /*
  313. * Drain pages out of the cpu's pagevecs.
  314. * Either "cpu" is the current CPU, and preemption has already been
  315. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  316. */
  317. static void drain_cpu_pagevecs(int cpu)
  318. {
  319. struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
  320. struct pagevec *pvec;
  321. int lru;
  322. for_each_lru(lru) {
  323. pvec = &pvecs[lru - LRU_BASE];
  324. if (pagevec_count(pvec))
  325. ____pagevec_lru_add(pvec, lru);
  326. }
  327. pvec = &per_cpu(lru_rotate_pvecs, cpu);
  328. if (pagevec_count(pvec)) {
  329. unsigned long flags;
  330. /* No harm done if a racing interrupt already did this */
  331. local_irq_save(flags);
  332. pagevec_move_tail(pvec);
  333. local_irq_restore(flags);
  334. }
  335. }
  336. void lru_add_drain(void)
  337. {
  338. drain_cpu_pagevecs(get_cpu());
  339. put_cpu();
  340. }
  341. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  342. {
  343. lru_add_drain();
  344. }
  345. /*
  346. * Returns 0 for success
  347. */
  348. int lru_add_drain_all(void)
  349. {
  350. return schedule_on_each_cpu(lru_add_drain_per_cpu);
  351. }
  352. /*
  353. * Batched page_cache_release(). Decrement the reference count on all the
  354. * passed pages. If it fell to zero then remove the page from the LRU and
  355. * free it.
  356. *
  357. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  358. * for the remainder of the operation.
  359. *
  360. * The locking in this function is against shrink_inactive_list(): we recheck
  361. * the page count inside the lock to see whether shrink_inactive_list()
  362. * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
  363. * will free it.
  364. */
  365. void release_pages(struct page **pages, int nr, int cold)
  366. {
  367. int i;
  368. struct pagevec pages_to_free;
  369. struct zone *zone = NULL;
  370. unsigned long uninitialized_var(flags);
  371. pagevec_init(&pages_to_free, cold);
  372. for (i = 0; i < nr; i++) {
  373. struct page *page = pages[i];
  374. if (unlikely(PageCompound(page))) {
  375. if (zone) {
  376. spin_unlock_irqrestore(&zone->lru_lock, flags);
  377. zone = NULL;
  378. }
  379. put_compound_page(page);
  380. continue;
  381. }
  382. if (!put_page_testzero(page))
  383. continue;
  384. if (PageLRU(page)) {
  385. struct zone *pagezone = page_zone(page);
  386. if (pagezone != zone) {
  387. if (zone)
  388. spin_unlock_irqrestore(&zone->lru_lock,
  389. flags);
  390. zone = pagezone;
  391. spin_lock_irqsave(&zone->lru_lock, flags);
  392. }
  393. VM_BUG_ON(!PageLRU(page));
  394. __ClearPageLRU(page);
  395. del_page_from_lru(zone, page);
  396. }
  397. if (!pagevec_add(&pages_to_free, page)) {
  398. if (zone) {
  399. spin_unlock_irqrestore(&zone->lru_lock, flags);
  400. zone = NULL;
  401. }
  402. __pagevec_free(&pages_to_free);
  403. pagevec_reinit(&pages_to_free);
  404. }
  405. }
  406. if (zone)
  407. spin_unlock_irqrestore(&zone->lru_lock, flags);
  408. pagevec_free(&pages_to_free);
  409. }
  410. EXPORT_SYMBOL(release_pages);
  411. /*
  412. * The pages which we're about to release may be in the deferred lru-addition
  413. * queues. That would prevent them from really being freed right now. That's
  414. * OK from a correctness point of view but is inefficient - those pages may be
  415. * cache-warm and we want to give them back to the page allocator ASAP.
  416. *
  417. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  418. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  419. * mutual recursion.
  420. */
  421. void __pagevec_release(struct pagevec *pvec)
  422. {
  423. lru_add_drain();
  424. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  425. pagevec_reinit(pvec);
  426. }
  427. EXPORT_SYMBOL(__pagevec_release);
  428. /* used by __split_huge_page_refcount() */
  429. void lru_add_page_tail(struct zone* zone,
  430. struct page *page, struct page *page_tail)
  431. {
  432. int active;
  433. enum lru_list lru;
  434. const int file = 0;
  435. struct list_head *head;
  436. VM_BUG_ON(!PageHead(page));
  437. VM_BUG_ON(PageCompound(page_tail));
  438. VM_BUG_ON(PageLRU(page_tail));
  439. VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
  440. SetPageLRU(page_tail);
  441. if (page_evictable(page_tail, NULL)) {
  442. if (PageActive(page)) {
  443. SetPageActive(page_tail);
  444. active = 1;
  445. lru = LRU_ACTIVE_ANON;
  446. } else {
  447. active = 0;
  448. lru = LRU_INACTIVE_ANON;
  449. }
  450. update_page_reclaim_stat(zone, page_tail, file, active);
  451. if (likely(PageLRU(page)))
  452. head = page->lru.prev;
  453. else
  454. head = &zone->lru[lru].list;
  455. __add_page_to_lru_list(zone, page_tail, lru, head);
  456. } else {
  457. SetPageUnevictable(page_tail);
  458. add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
  459. }
  460. }
  461. /*
  462. * Add the passed pages to the LRU, then drop the caller's refcount
  463. * on them. Reinitialises the caller's pagevec.
  464. */
  465. void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
  466. {
  467. int i;
  468. struct zone *zone = NULL;
  469. VM_BUG_ON(is_unevictable_lru(lru));
  470. for (i = 0; i < pagevec_count(pvec); i++) {
  471. struct page *page = pvec->pages[i];
  472. struct zone *pagezone = page_zone(page);
  473. int file;
  474. int active;
  475. if (pagezone != zone) {
  476. if (zone)
  477. spin_unlock_irq(&zone->lru_lock);
  478. zone = pagezone;
  479. spin_lock_irq(&zone->lru_lock);
  480. }
  481. VM_BUG_ON(PageActive(page));
  482. VM_BUG_ON(PageUnevictable(page));
  483. VM_BUG_ON(PageLRU(page));
  484. SetPageLRU(page);
  485. active = is_active_lru(lru);
  486. file = is_file_lru(lru);
  487. if (active)
  488. SetPageActive(page);
  489. update_page_reclaim_stat(zone, page, file, active);
  490. add_page_to_lru_list(zone, page, lru);
  491. }
  492. if (zone)
  493. spin_unlock_irq(&zone->lru_lock);
  494. release_pages(pvec->pages, pvec->nr, pvec->cold);
  495. pagevec_reinit(pvec);
  496. }
  497. EXPORT_SYMBOL(____pagevec_lru_add);
  498. /*
  499. * Try to drop buffers from the pages in a pagevec
  500. */
  501. void pagevec_strip(struct pagevec *pvec)
  502. {
  503. int i;
  504. for (i = 0; i < pagevec_count(pvec); i++) {
  505. struct page *page = pvec->pages[i];
  506. if (page_has_private(page) && trylock_page(page)) {
  507. if (page_has_private(page))
  508. try_to_release_page(page, 0);
  509. unlock_page(page);
  510. }
  511. }
  512. }
  513. /**
  514. * pagevec_lookup - gang pagecache lookup
  515. * @pvec: Where the resulting pages are placed
  516. * @mapping: The address_space to search
  517. * @start: The starting page index
  518. * @nr_pages: The maximum number of pages
  519. *
  520. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  521. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  522. * reference against the pages in @pvec.
  523. *
  524. * The search returns a group of mapping-contiguous pages with ascending
  525. * indexes. There may be holes in the indices due to not-present pages.
  526. *
  527. * pagevec_lookup() returns the number of pages which were found.
  528. */
  529. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  530. pgoff_t start, unsigned nr_pages)
  531. {
  532. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  533. return pagevec_count(pvec);
  534. }
  535. EXPORT_SYMBOL(pagevec_lookup);
  536. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  537. pgoff_t *index, int tag, unsigned nr_pages)
  538. {
  539. pvec->nr = find_get_pages_tag(mapping, index, tag,
  540. nr_pages, pvec->pages);
  541. return pagevec_count(pvec);
  542. }
  543. EXPORT_SYMBOL(pagevec_lookup_tag);
  544. /*
  545. * Perform any setup for the swap system
  546. */
  547. void __init swap_setup(void)
  548. {
  549. unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
  550. #ifdef CONFIG_SWAP
  551. bdi_init(swapper_space.backing_dev_info);
  552. #endif
  553. /* Use a smaller cluster for small-memory machines */
  554. if (megs < 16)
  555. page_cluster = 2;
  556. else
  557. page_cluster = 3;
  558. /*
  559. * Right now other parts of the system means that we
  560. * _really_ don't want to cluster much more
  561. */
  562. }