swap.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the operation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/export.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/percpu_counter.h>
  25. #include <linux/percpu.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/backing-dev.h>
  29. #include <linux/memcontrol.h>
  30. #include <linux/gfp.h>
  31. #include "internal.h"
  32. /* How many pages do we try to swap or page in/out together? */
  33. int page_cluster;
  34. static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  35. static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  36. static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  37. /*
  38. * This path almost never happens for VM activity - pages are normally
  39. * freed via pagevecs. But it gets used by networking.
  40. */
  41. static void __page_cache_release(struct page *page)
  42. {
  43. if (PageLRU(page)) {
  44. struct zone *zone = page_zone(page);
  45. struct lruvec *lruvec;
  46. unsigned long flags;
  47. spin_lock_irqsave(&zone->lru_lock, flags);
  48. lruvec = mem_cgroup_page_lruvec(page, zone);
  49. VM_BUG_ON(!PageLRU(page));
  50. __ClearPageLRU(page);
  51. del_page_from_lru_list(page, lruvec, page_off_lru(page));
  52. spin_unlock_irqrestore(&zone->lru_lock, flags);
  53. }
  54. }
  55. static void __put_single_page(struct page *page)
  56. {
  57. __page_cache_release(page);
  58. free_hot_cold_page(page, 0);
  59. }
  60. static void __put_compound_page(struct page *page)
  61. {
  62. compound_page_dtor *dtor;
  63. __page_cache_release(page);
  64. dtor = get_compound_page_dtor(page);
  65. (*dtor)(page);
  66. }
  67. static void put_compound_page(struct page *page)
  68. {
  69. if (unlikely(PageTail(page))) {
  70. /* __split_huge_page_refcount can run under us */
  71. struct page *page_head = compound_trans_head(page);
  72. if (likely(page != page_head &&
  73. get_page_unless_zero(page_head))) {
  74. unsigned long flags;
  75. /*
  76. * THP can not break up slab pages so avoid taking
  77. * compound_lock(). Slab performs non-atomic bit ops
  78. * on page->flags for better performance. In particular
  79. * slab_unlock() in slub used to be a hot path. It is
  80. * still hot on arches that do not support
  81. * this_cpu_cmpxchg_double().
  82. */
  83. if (PageSlab(page_head)) {
  84. if (PageTail(page)) {
  85. if (put_page_testzero(page_head))
  86. VM_BUG_ON(1);
  87. atomic_dec(&page->_mapcount);
  88. goto skip_lock_tail;
  89. } else
  90. goto skip_lock;
  91. }
  92. /*
  93. * page_head wasn't a dangling pointer but it
  94. * may not be a head page anymore by the time
  95. * we obtain the lock. That is ok as long as it
  96. * can't be freed from under us.
  97. */
  98. flags = compound_lock_irqsave(page_head);
  99. if (unlikely(!PageTail(page))) {
  100. /* __split_huge_page_refcount run before us */
  101. compound_unlock_irqrestore(page_head, flags);
  102. skip_lock:
  103. if (put_page_testzero(page_head))
  104. __put_single_page(page_head);
  105. out_put_single:
  106. if (put_page_testzero(page))
  107. __put_single_page(page);
  108. return;
  109. }
  110. VM_BUG_ON(page_head != page->first_page);
  111. /*
  112. * We can release the refcount taken by
  113. * get_page_unless_zero() now that
  114. * __split_huge_page_refcount() is blocked on
  115. * the compound_lock.
  116. */
  117. if (put_page_testzero(page_head))
  118. VM_BUG_ON(1);
  119. /* __split_huge_page_refcount will wait now */
  120. VM_BUG_ON(page_mapcount(page) <= 0);
  121. atomic_dec(&page->_mapcount);
  122. VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
  123. VM_BUG_ON(atomic_read(&page->_count) != 0);
  124. compound_unlock_irqrestore(page_head, flags);
  125. skip_lock_tail:
  126. if (put_page_testzero(page_head)) {
  127. if (PageHead(page_head))
  128. __put_compound_page(page_head);
  129. else
  130. __put_single_page(page_head);
  131. }
  132. } else {
  133. /* page_head is a dangling pointer */
  134. VM_BUG_ON(PageTail(page));
  135. goto out_put_single;
  136. }
  137. } else if (put_page_testzero(page)) {
  138. if (PageHead(page))
  139. __put_compound_page(page);
  140. else
  141. __put_single_page(page);
  142. }
  143. }
  144. void put_page(struct page *page)
  145. {
  146. if (unlikely(PageCompound(page)))
  147. put_compound_page(page);
  148. else if (put_page_testzero(page))
  149. __put_single_page(page);
  150. }
  151. EXPORT_SYMBOL(put_page);
  152. /*
  153. * This function is exported but must not be called by anything other
  154. * than get_page(). It implements the slow path of get_page().
  155. */
  156. bool __get_page_tail(struct page *page)
  157. {
  158. /*
  159. * This takes care of get_page() if run on a tail page
  160. * returned by one of the get_user_pages/follow_page variants.
  161. * get_user_pages/follow_page itself doesn't need the compound
  162. * lock because it runs __get_page_tail_foll() under the
  163. * proper PT lock that already serializes against
  164. * split_huge_page().
  165. */
  166. unsigned long flags;
  167. bool got = false;
  168. struct page *page_head = compound_trans_head(page);
  169. if (likely(page != page_head && get_page_unless_zero(page_head))) {
  170. /* Ref to put_compound_page() comment. */
  171. if (PageSlab(page_head)) {
  172. if (likely(PageTail(page))) {
  173. __get_page_tail_foll(page, false);
  174. return true;
  175. } else {
  176. put_page(page_head);
  177. return false;
  178. }
  179. }
  180. /*
  181. * page_head wasn't a dangling pointer but it
  182. * may not be a head page anymore by the time
  183. * we obtain the lock. That is ok as long as it
  184. * can't be freed from under us.
  185. */
  186. flags = compound_lock_irqsave(page_head);
  187. /* here __split_huge_page_refcount won't run anymore */
  188. if (likely(PageTail(page))) {
  189. __get_page_tail_foll(page, false);
  190. got = true;
  191. }
  192. compound_unlock_irqrestore(page_head, flags);
  193. if (unlikely(!got))
  194. put_page(page_head);
  195. }
  196. return got;
  197. }
  198. EXPORT_SYMBOL(__get_page_tail);
  199. /**
  200. * put_pages_list() - release a list of pages
  201. * @pages: list of pages threaded on page->lru
  202. *
  203. * Release a list of pages which are strung together on page.lru. Currently
  204. * used by read_cache_pages() and related error recovery code.
  205. */
  206. void put_pages_list(struct list_head *pages)
  207. {
  208. while (!list_empty(pages)) {
  209. struct page *victim;
  210. victim = list_entry(pages->prev, struct page, lru);
  211. list_del(&victim->lru);
  212. page_cache_release(victim);
  213. }
  214. }
  215. EXPORT_SYMBOL(put_pages_list);
  216. static void pagevec_lru_move_fn(struct pagevec *pvec,
  217. void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
  218. void *arg)
  219. {
  220. int i;
  221. struct zone *zone = NULL;
  222. struct lruvec *lruvec;
  223. unsigned long flags = 0;
  224. for (i = 0; i < pagevec_count(pvec); i++) {
  225. struct page *page = pvec->pages[i];
  226. struct zone *pagezone = page_zone(page);
  227. if (pagezone != zone) {
  228. if (zone)
  229. spin_unlock_irqrestore(&zone->lru_lock, flags);
  230. zone = pagezone;
  231. spin_lock_irqsave(&zone->lru_lock, flags);
  232. }
  233. lruvec = mem_cgroup_page_lruvec(page, zone);
  234. (*move_fn)(page, lruvec, arg);
  235. }
  236. if (zone)
  237. spin_unlock_irqrestore(&zone->lru_lock, flags);
  238. release_pages(pvec->pages, pvec->nr, pvec->cold);
  239. pagevec_reinit(pvec);
  240. }
  241. static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
  242. void *arg)
  243. {
  244. int *pgmoved = arg;
  245. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  246. enum lru_list lru = page_lru_base_type(page);
  247. list_move_tail(&page->lru, &lruvec->lists[lru]);
  248. (*pgmoved)++;
  249. }
  250. }
  251. /*
  252. * pagevec_move_tail() must be called with IRQ disabled.
  253. * Otherwise this may cause nasty races.
  254. */
  255. static void pagevec_move_tail(struct pagevec *pvec)
  256. {
  257. int pgmoved = 0;
  258. pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
  259. __count_vm_events(PGROTATED, pgmoved);
  260. }
  261. /*
  262. * Writeback is about to end against a page which has been marked for immediate
  263. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  264. * inactive list.
  265. */
  266. void rotate_reclaimable_page(struct page *page)
  267. {
  268. if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
  269. !PageUnevictable(page) && PageLRU(page)) {
  270. struct pagevec *pvec;
  271. unsigned long flags;
  272. page_cache_get(page);
  273. local_irq_save(flags);
  274. pvec = &__get_cpu_var(lru_rotate_pvecs);
  275. if (!pagevec_add(pvec, page))
  276. pagevec_move_tail(pvec);
  277. local_irq_restore(flags);
  278. }
  279. }
  280. static void update_page_reclaim_stat(struct lruvec *lruvec,
  281. int file, int rotated)
  282. {
  283. struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
  284. reclaim_stat->recent_scanned[file]++;
  285. if (rotated)
  286. reclaim_stat->recent_rotated[file]++;
  287. }
  288. static void __activate_page(struct page *page, struct lruvec *lruvec,
  289. void *arg)
  290. {
  291. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  292. int file = page_is_file_cache(page);
  293. int lru = page_lru_base_type(page);
  294. del_page_from_lru_list(page, lruvec, lru);
  295. SetPageActive(page);
  296. lru += LRU_ACTIVE;
  297. add_page_to_lru_list(page, lruvec, lru);
  298. __count_vm_event(PGACTIVATE);
  299. update_page_reclaim_stat(lruvec, file, 1);
  300. }
  301. }
  302. #ifdef CONFIG_SMP
  303. static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
  304. static void activate_page_drain(int cpu)
  305. {
  306. struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
  307. if (pagevec_count(pvec))
  308. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  309. }
  310. void activate_page(struct page *page)
  311. {
  312. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  313. struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
  314. page_cache_get(page);
  315. if (!pagevec_add(pvec, page))
  316. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  317. put_cpu_var(activate_page_pvecs);
  318. }
  319. }
  320. #else
  321. static inline void activate_page_drain(int cpu)
  322. {
  323. }
  324. void activate_page(struct page *page)
  325. {
  326. struct zone *zone = page_zone(page);
  327. spin_lock_irq(&zone->lru_lock);
  328. __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
  329. spin_unlock_irq(&zone->lru_lock);
  330. }
  331. #endif
  332. /*
  333. * Mark a page as having seen activity.
  334. *
  335. * inactive,unreferenced -> inactive,referenced
  336. * inactive,referenced -> active,unreferenced
  337. * active,unreferenced -> active,referenced
  338. */
  339. void mark_page_accessed(struct page *page)
  340. {
  341. if (!PageActive(page) && !PageUnevictable(page) &&
  342. PageReferenced(page) && PageLRU(page)) {
  343. activate_page(page);
  344. ClearPageReferenced(page);
  345. } else if (!PageReferenced(page)) {
  346. SetPageReferenced(page);
  347. }
  348. }
  349. EXPORT_SYMBOL(mark_page_accessed);
  350. void __lru_cache_add(struct page *page, enum lru_list lru)
  351. {
  352. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
  353. page_cache_get(page);
  354. if (!pagevec_add(pvec, page))
  355. __pagevec_lru_add(pvec, lru);
  356. put_cpu_var(lru_add_pvecs);
  357. }
  358. EXPORT_SYMBOL(__lru_cache_add);
  359. /**
  360. * lru_cache_add_lru - add a page to a page list
  361. * @page: the page to be added to the LRU.
  362. * @lru: the LRU list to which the page is added.
  363. */
  364. void lru_cache_add_lru(struct page *page, enum lru_list lru)
  365. {
  366. if (PageActive(page)) {
  367. VM_BUG_ON(PageUnevictable(page));
  368. ClearPageActive(page);
  369. } else if (PageUnevictable(page)) {
  370. VM_BUG_ON(PageActive(page));
  371. ClearPageUnevictable(page);
  372. }
  373. VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
  374. __lru_cache_add(page, lru);
  375. }
  376. /**
  377. * add_page_to_unevictable_list - add a page to the unevictable list
  378. * @page: the page to be added to the unevictable list
  379. *
  380. * Add page directly to its zone's unevictable list. To avoid races with
  381. * tasks that might be making the page evictable, through eg. munlock,
  382. * munmap or exit, while it's not on the lru, we want to add the page
  383. * while it's locked or otherwise "invisible" to other tasks. This is
  384. * difficult to do when using the pagevec cache, so bypass that.
  385. */
  386. void add_page_to_unevictable_list(struct page *page)
  387. {
  388. struct zone *zone = page_zone(page);
  389. struct lruvec *lruvec;
  390. spin_lock_irq(&zone->lru_lock);
  391. lruvec = mem_cgroup_page_lruvec(page, zone);
  392. SetPageUnevictable(page);
  393. SetPageLRU(page);
  394. add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
  395. spin_unlock_irq(&zone->lru_lock);
  396. }
  397. /*
  398. * If the page can not be invalidated, it is moved to the
  399. * inactive list to speed up its reclaim. It is moved to the
  400. * head of the list, rather than the tail, to give the flusher
  401. * threads some time to write it out, as this is much more
  402. * effective than the single-page writeout from reclaim.
  403. *
  404. * If the page isn't page_mapped and dirty/writeback, the page
  405. * could reclaim asap using PG_reclaim.
  406. *
  407. * 1. active, mapped page -> none
  408. * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
  409. * 3. inactive, mapped page -> none
  410. * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
  411. * 5. inactive, clean -> inactive, tail
  412. * 6. Others -> none
  413. *
  414. * In 4, why it moves inactive's head, the VM expects the page would
  415. * be write it out by flusher threads as this is much more effective
  416. * than the single-page writeout from reclaim.
  417. */
  418. static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
  419. void *arg)
  420. {
  421. int lru, file;
  422. bool active;
  423. if (!PageLRU(page))
  424. return;
  425. if (PageUnevictable(page))
  426. return;
  427. /* Some processes are using the page */
  428. if (page_mapped(page))
  429. return;
  430. active = PageActive(page);
  431. file = page_is_file_cache(page);
  432. lru = page_lru_base_type(page);
  433. del_page_from_lru_list(page, lruvec, lru + active);
  434. ClearPageActive(page);
  435. ClearPageReferenced(page);
  436. add_page_to_lru_list(page, lruvec, lru);
  437. if (PageWriteback(page) || PageDirty(page)) {
  438. /*
  439. * PG_reclaim could be raced with end_page_writeback
  440. * It can make readahead confusing. But race window
  441. * is _really_ small and it's non-critical problem.
  442. */
  443. SetPageReclaim(page);
  444. } else {
  445. /*
  446. * The page's writeback ends up during pagevec
  447. * We moves tha page into tail of inactive.
  448. */
  449. list_move_tail(&page->lru, &lruvec->lists[lru]);
  450. __count_vm_event(PGROTATED);
  451. }
  452. if (active)
  453. __count_vm_event(PGDEACTIVATE);
  454. update_page_reclaim_stat(lruvec, file, 0);
  455. }
  456. /*
  457. * Drain pages out of the cpu's pagevecs.
  458. * Either "cpu" is the current CPU, and preemption has already been
  459. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  460. */
  461. void lru_add_drain_cpu(int cpu)
  462. {
  463. struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
  464. struct pagevec *pvec;
  465. int lru;
  466. for_each_lru(lru) {
  467. pvec = &pvecs[lru - LRU_BASE];
  468. if (pagevec_count(pvec))
  469. __pagevec_lru_add(pvec, lru);
  470. }
  471. pvec = &per_cpu(lru_rotate_pvecs, cpu);
  472. if (pagevec_count(pvec)) {
  473. unsigned long flags;
  474. /* No harm done if a racing interrupt already did this */
  475. local_irq_save(flags);
  476. pagevec_move_tail(pvec);
  477. local_irq_restore(flags);
  478. }
  479. pvec = &per_cpu(lru_deactivate_pvecs, cpu);
  480. if (pagevec_count(pvec))
  481. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  482. activate_page_drain(cpu);
  483. }
  484. /**
  485. * deactivate_page - forcefully deactivate a page
  486. * @page: page to deactivate
  487. *
  488. * This function hints the VM that @page is a good reclaim candidate,
  489. * for example if its invalidation fails due to the page being dirty
  490. * or under writeback.
  491. */
  492. void deactivate_page(struct page *page)
  493. {
  494. /*
  495. * In a workload with many unevictable page such as mprotect, unevictable
  496. * page deactivation for accelerating reclaim is pointless.
  497. */
  498. if (PageUnevictable(page))
  499. return;
  500. if (likely(get_page_unless_zero(page))) {
  501. struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
  502. if (!pagevec_add(pvec, page))
  503. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  504. put_cpu_var(lru_deactivate_pvecs);
  505. }
  506. }
  507. void lru_add_drain(void)
  508. {
  509. lru_add_drain_cpu(get_cpu());
  510. put_cpu();
  511. }
  512. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  513. {
  514. lru_add_drain();
  515. }
  516. /*
  517. * Returns 0 for success
  518. */
  519. int lru_add_drain_all(void)
  520. {
  521. return schedule_on_each_cpu(lru_add_drain_per_cpu);
  522. }
  523. /*
  524. * Batched page_cache_release(). Decrement the reference count on all the
  525. * passed pages. If it fell to zero then remove the page from the LRU and
  526. * free it.
  527. *
  528. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  529. * for the remainder of the operation.
  530. *
  531. * The locking in this function is against shrink_inactive_list(): we recheck
  532. * the page count inside the lock to see whether shrink_inactive_list()
  533. * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
  534. * will free it.
  535. */
  536. void release_pages(struct page **pages, int nr, int cold)
  537. {
  538. int i;
  539. LIST_HEAD(pages_to_free);
  540. struct zone *zone = NULL;
  541. struct lruvec *lruvec;
  542. unsigned long uninitialized_var(flags);
  543. for (i = 0; i < nr; i++) {
  544. struct page *page = pages[i];
  545. if (unlikely(PageCompound(page))) {
  546. if (zone) {
  547. spin_unlock_irqrestore(&zone->lru_lock, flags);
  548. zone = NULL;
  549. }
  550. put_compound_page(page);
  551. continue;
  552. }
  553. if (!put_page_testzero(page))
  554. continue;
  555. if (PageLRU(page)) {
  556. struct zone *pagezone = page_zone(page);
  557. if (pagezone != zone) {
  558. if (zone)
  559. spin_unlock_irqrestore(&zone->lru_lock,
  560. flags);
  561. zone = pagezone;
  562. spin_lock_irqsave(&zone->lru_lock, flags);
  563. }
  564. lruvec = mem_cgroup_page_lruvec(page, zone);
  565. VM_BUG_ON(!PageLRU(page));
  566. __ClearPageLRU(page);
  567. del_page_from_lru_list(page, lruvec, page_off_lru(page));
  568. }
  569. list_add(&page->lru, &pages_to_free);
  570. }
  571. if (zone)
  572. spin_unlock_irqrestore(&zone->lru_lock, flags);
  573. free_hot_cold_page_list(&pages_to_free, cold);
  574. }
  575. EXPORT_SYMBOL(release_pages);
  576. /*
  577. * The pages which we're about to release may be in the deferred lru-addition
  578. * queues. That would prevent them from really being freed right now. That's
  579. * OK from a correctness point of view but is inefficient - those pages may be
  580. * cache-warm and we want to give them back to the page allocator ASAP.
  581. *
  582. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  583. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  584. * mutual recursion.
  585. */
  586. void __pagevec_release(struct pagevec *pvec)
  587. {
  588. lru_add_drain();
  589. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  590. pagevec_reinit(pvec);
  591. }
  592. EXPORT_SYMBOL(__pagevec_release);
  593. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  594. /* used by __split_huge_page_refcount() */
  595. void lru_add_page_tail(struct page *page, struct page *page_tail,
  596. struct lruvec *lruvec)
  597. {
  598. int uninitialized_var(active);
  599. enum lru_list lru;
  600. const int file = 0;
  601. VM_BUG_ON(!PageHead(page));
  602. VM_BUG_ON(PageCompound(page_tail));
  603. VM_BUG_ON(PageLRU(page_tail));
  604. VM_BUG_ON(NR_CPUS != 1 &&
  605. !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
  606. SetPageLRU(page_tail);
  607. if (page_evictable(page_tail, NULL)) {
  608. if (PageActive(page)) {
  609. SetPageActive(page_tail);
  610. active = 1;
  611. lru = LRU_ACTIVE_ANON;
  612. } else {
  613. active = 0;
  614. lru = LRU_INACTIVE_ANON;
  615. }
  616. } else {
  617. SetPageUnevictable(page_tail);
  618. lru = LRU_UNEVICTABLE;
  619. }
  620. if (likely(PageLRU(page)))
  621. list_add_tail(&page_tail->lru, &page->lru);
  622. else {
  623. struct list_head *list_head;
  624. /*
  625. * Head page has not yet been counted, as an hpage,
  626. * so we must account for each subpage individually.
  627. *
  628. * Use the standard add function to put page_tail on the list,
  629. * but then correct its position so they all end up in order.
  630. */
  631. add_page_to_lru_list(page_tail, lruvec, lru);
  632. list_head = page_tail->lru.prev;
  633. list_move_tail(&page_tail->lru, list_head);
  634. }
  635. if (!PageUnevictable(page))
  636. update_page_reclaim_stat(lruvec, file, active);
  637. }
  638. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  639. static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
  640. void *arg)
  641. {
  642. enum lru_list lru = (enum lru_list)arg;
  643. int file = is_file_lru(lru);
  644. int active = is_active_lru(lru);
  645. VM_BUG_ON(PageActive(page));
  646. VM_BUG_ON(PageUnevictable(page));
  647. VM_BUG_ON(PageLRU(page));
  648. SetPageLRU(page);
  649. if (active)
  650. SetPageActive(page);
  651. add_page_to_lru_list(page, lruvec, lru);
  652. update_page_reclaim_stat(lruvec, file, active);
  653. }
  654. /*
  655. * Add the passed pages to the LRU, then drop the caller's refcount
  656. * on them. Reinitialises the caller's pagevec.
  657. */
  658. void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
  659. {
  660. VM_BUG_ON(is_unevictable_lru(lru));
  661. pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
  662. }
  663. EXPORT_SYMBOL(__pagevec_lru_add);
  664. /**
  665. * pagevec_lookup - gang pagecache lookup
  666. * @pvec: Where the resulting pages are placed
  667. * @mapping: The address_space to search
  668. * @start: The starting page index
  669. * @nr_pages: The maximum number of pages
  670. *
  671. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  672. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  673. * reference against the pages in @pvec.
  674. *
  675. * The search returns a group of mapping-contiguous pages with ascending
  676. * indexes. There may be holes in the indices due to not-present pages.
  677. *
  678. * pagevec_lookup() returns the number of pages which were found.
  679. */
  680. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  681. pgoff_t start, unsigned nr_pages)
  682. {
  683. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  684. return pagevec_count(pvec);
  685. }
  686. EXPORT_SYMBOL(pagevec_lookup);
  687. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  688. pgoff_t *index, int tag, unsigned nr_pages)
  689. {
  690. pvec->nr = find_get_pages_tag(mapping, index, tag,
  691. nr_pages, pvec->pages);
  692. return pagevec_count(pvec);
  693. }
  694. EXPORT_SYMBOL(pagevec_lookup_tag);
  695. /*
  696. * Perform any setup for the swap system
  697. */
  698. void __init swap_setup(void)
  699. {
  700. unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
  701. #ifdef CONFIG_SWAP
  702. bdi_init(swapper_space.backing_dev_info);
  703. #endif
  704. /* Use a smaller cluster for small-memory machines */
  705. if (megs < 16)
  706. page_cluster = 2;
  707. else
  708. page_cluster = 3;
  709. /*
  710. * Right now other parts of the system means that we
  711. * _really_ don't want to cluster much more
  712. */
  713. }