swap.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the operation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/export.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/percpu_counter.h>
  25. #include <linux/percpu.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/backing-dev.h>
  29. #include <linux/memcontrol.h>
  30. #include <linux/gfp.h>
  31. #include "internal.h"
  32. /* How many pages do we try to swap or page in/out together? */
  33. int page_cluster;
  34. static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  35. static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  36. static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  37. /*
  38. * This path almost never happens for VM activity - pages are normally
  39. * freed via pagevecs. But it gets used by networking.
  40. */
  41. static void __page_cache_release(struct page *page)
  42. {
  43. if (PageLRU(page)) {
  44. unsigned long flags;
  45. struct zone *zone = page_zone(page);
  46. spin_lock_irqsave(&zone->lru_lock, flags);
  47. VM_BUG_ON(!PageLRU(page));
  48. __ClearPageLRU(page);
  49. del_page_from_lru_list(zone, page, page_off_lru(page));
  50. spin_unlock_irqrestore(&zone->lru_lock, flags);
  51. }
  52. }
  53. static void __put_single_page(struct page *page)
  54. {
  55. __page_cache_release(page);
  56. free_hot_cold_page(page, 0);
  57. }
  58. static void __put_compound_page(struct page *page)
  59. {
  60. compound_page_dtor *dtor;
  61. __page_cache_release(page);
  62. dtor = get_compound_page_dtor(page);
  63. (*dtor)(page);
  64. }
  65. static void put_compound_page(struct page *page)
  66. {
  67. if (unlikely(PageTail(page))) {
  68. /* __split_huge_page_refcount can run under us */
  69. struct page *page_head = compound_trans_head(page);
  70. if (likely(page != page_head &&
  71. get_page_unless_zero(page_head))) {
  72. unsigned long flags;
  73. /*
  74. * page_head wasn't a dangling pointer but it
  75. * may not be a head page anymore by the time
  76. * we obtain the lock. That is ok as long as it
  77. * can't be freed from under us.
  78. */
  79. flags = compound_lock_irqsave(page_head);
  80. if (unlikely(!PageTail(page))) {
  81. /* __split_huge_page_refcount run before us */
  82. compound_unlock_irqrestore(page_head, flags);
  83. VM_BUG_ON(PageHead(page_head));
  84. if (put_page_testzero(page_head))
  85. __put_single_page(page_head);
  86. out_put_single:
  87. if (put_page_testzero(page))
  88. __put_single_page(page);
  89. return;
  90. }
  91. VM_BUG_ON(page_head != page->first_page);
  92. /*
  93. * We can release the refcount taken by
  94. * get_page_unless_zero() now that
  95. * __split_huge_page_refcount() is blocked on
  96. * the compound_lock.
  97. */
  98. if (put_page_testzero(page_head))
  99. VM_BUG_ON(1);
  100. /* __split_huge_page_refcount will wait now */
  101. VM_BUG_ON(page_mapcount(page) <= 0);
  102. atomic_dec(&page->_mapcount);
  103. VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
  104. VM_BUG_ON(atomic_read(&page->_count) != 0);
  105. compound_unlock_irqrestore(page_head, flags);
  106. if (put_page_testzero(page_head)) {
  107. if (PageHead(page_head))
  108. __put_compound_page(page_head);
  109. else
  110. __put_single_page(page_head);
  111. }
  112. } else {
  113. /* page_head is a dangling pointer */
  114. VM_BUG_ON(PageTail(page));
  115. goto out_put_single;
  116. }
  117. } else if (put_page_testzero(page)) {
  118. if (PageHead(page))
  119. __put_compound_page(page);
  120. else
  121. __put_single_page(page);
  122. }
  123. }
  124. void put_page(struct page *page)
  125. {
  126. if (unlikely(PageCompound(page)))
  127. put_compound_page(page);
  128. else if (put_page_testzero(page))
  129. __put_single_page(page);
  130. }
  131. EXPORT_SYMBOL(put_page);
  132. /*
  133. * This function is exported but must not be called by anything other
  134. * than get_page(). It implements the slow path of get_page().
  135. */
  136. bool __get_page_tail(struct page *page)
  137. {
  138. /*
  139. * This takes care of get_page() if run on a tail page
  140. * returned by one of the get_user_pages/follow_page variants.
  141. * get_user_pages/follow_page itself doesn't need the compound
  142. * lock because it runs __get_page_tail_foll() under the
  143. * proper PT lock that already serializes against
  144. * split_huge_page().
  145. */
  146. unsigned long flags;
  147. bool got = false;
  148. struct page *page_head = compound_trans_head(page);
  149. if (likely(page != page_head && get_page_unless_zero(page_head))) {
  150. /*
  151. * page_head wasn't a dangling pointer but it
  152. * may not be a head page anymore by the time
  153. * we obtain the lock. That is ok as long as it
  154. * can't be freed from under us.
  155. */
  156. flags = compound_lock_irqsave(page_head);
  157. /* here __split_huge_page_refcount won't run anymore */
  158. if (likely(PageTail(page))) {
  159. __get_page_tail_foll(page, false);
  160. got = true;
  161. }
  162. compound_unlock_irqrestore(page_head, flags);
  163. if (unlikely(!got))
  164. put_page(page_head);
  165. }
  166. return got;
  167. }
  168. EXPORT_SYMBOL(__get_page_tail);
  169. /**
  170. * put_pages_list() - release a list of pages
  171. * @pages: list of pages threaded on page->lru
  172. *
  173. * Release a list of pages which are strung together on page.lru. Currently
  174. * used by read_cache_pages() and related error recovery code.
  175. */
  176. void put_pages_list(struct list_head *pages)
  177. {
  178. while (!list_empty(pages)) {
  179. struct page *victim;
  180. victim = list_entry(pages->prev, struct page, lru);
  181. list_del(&victim->lru);
  182. page_cache_release(victim);
  183. }
  184. }
  185. EXPORT_SYMBOL(put_pages_list);
  186. static void pagevec_lru_move_fn(struct pagevec *pvec,
  187. void (*move_fn)(struct page *page, void *arg),
  188. void *arg)
  189. {
  190. int i;
  191. struct zone *zone = NULL;
  192. unsigned long flags = 0;
  193. for (i = 0; i < pagevec_count(pvec); i++) {
  194. struct page *page = pvec->pages[i];
  195. struct zone *pagezone = page_zone(page);
  196. if (pagezone != zone) {
  197. if (zone)
  198. spin_unlock_irqrestore(&zone->lru_lock, flags);
  199. zone = pagezone;
  200. spin_lock_irqsave(&zone->lru_lock, flags);
  201. }
  202. (*move_fn)(page, arg);
  203. }
  204. if (zone)
  205. spin_unlock_irqrestore(&zone->lru_lock, flags);
  206. release_pages(pvec->pages, pvec->nr, pvec->cold);
  207. pagevec_reinit(pvec);
  208. }
  209. static void pagevec_move_tail_fn(struct page *page, void *arg)
  210. {
  211. int *pgmoved = arg;
  212. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  213. enum lru_list lru = page_lru_base_type(page);
  214. struct lruvec *lruvec;
  215. lruvec = mem_cgroup_lru_move_lists(page_zone(page),
  216. page, lru, lru);
  217. list_move_tail(&page->lru, &lruvec->lists[lru]);
  218. (*pgmoved)++;
  219. }
  220. }
  221. /*
  222. * pagevec_move_tail() must be called with IRQ disabled.
  223. * Otherwise this may cause nasty races.
  224. */
  225. static void pagevec_move_tail(struct pagevec *pvec)
  226. {
  227. int pgmoved = 0;
  228. pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
  229. __count_vm_events(PGROTATED, pgmoved);
  230. }
  231. /*
  232. * Writeback is about to end against a page which has been marked for immediate
  233. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  234. * inactive list.
  235. */
  236. void rotate_reclaimable_page(struct page *page)
  237. {
  238. if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
  239. !PageUnevictable(page) && PageLRU(page)) {
  240. struct pagevec *pvec;
  241. unsigned long flags;
  242. page_cache_get(page);
  243. local_irq_save(flags);
  244. pvec = &__get_cpu_var(lru_rotate_pvecs);
  245. if (!pagevec_add(pvec, page))
  246. pagevec_move_tail(pvec);
  247. local_irq_restore(flags);
  248. }
  249. }
  250. static void update_page_reclaim_stat(struct zone *zone, struct page *page,
  251. int file, int rotated)
  252. {
  253. struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
  254. struct zone_reclaim_stat *memcg_reclaim_stat;
  255. memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
  256. reclaim_stat->recent_scanned[file]++;
  257. if (rotated)
  258. reclaim_stat->recent_rotated[file]++;
  259. if (!memcg_reclaim_stat)
  260. return;
  261. memcg_reclaim_stat->recent_scanned[file]++;
  262. if (rotated)
  263. memcg_reclaim_stat->recent_rotated[file]++;
  264. }
  265. static void __activate_page(struct page *page, void *arg)
  266. {
  267. struct zone *zone = page_zone(page);
  268. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  269. int file = page_is_file_cache(page);
  270. int lru = page_lru_base_type(page);
  271. del_page_from_lru_list(zone, page, lru);
  272. SetPageActive(page);
  273. lru += LRU_ACTIVE;
  274. add_page_to_lru_list(zone, page, lru);
  275. __count_vm_event(PGACTIVATE);
  276. update_page_reclaim_stat(zone, page, file, 1);
  277. }
  278. }
  279. #ifdef CONFIG_SMP
  280. static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
  281. static void activate_page_drain(int cpu)
  282. {
  283. struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
  284. if (pagevec_count(pvec))
  285. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  286. }
  287. void activate_page(struct page *page)
  288. {
  289. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  290. struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
  291. page_cache_get(page);
  292. if (!pagevec_add(pvec, page))
  293. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  294. put_cpu_var(activate_page_pvecs);
  295. }
  296. }
  297. #else
  298. static inline void activate_page_drain(int cpu)
  299. {
  300. }
  301. void activate_page(struct page *page)
  302. {
  303. struct zone *zone = page_zone(page);
  304. spin_lock_irq(&zone->lru_lock);
  305. __activate_page(page, NULL);
  306. spin_unlock_irq(&zone->lru_lock);
  307. }
  308. #endif
  309. /*
  310. * Mark a page as having seen activity.
  311. *
  312. * inactive,unreferenced -> inactive,referenced
  313. * inactive,referenced -> active,unreferenced
  314. * active,unreferenced -> active,referenced
  315. */
  316. void mark_page_accessed(struct page *page)
  317. {
  318. if (!PageActive(page) && !PageUnevictable(page) &&
  319. PageReferenced(page) && PageLRU(page)) {
  320. activate_page(page);
  321. ClearPageReferenced(page);
  322. } else if (!PageReferenced(page)) {
  323. SetPageReferenced(page);
  324. }
  325. }
  326. EXPORT_SYMBOL(mark_page_accessed);
  327. void __lru_cache_add(struct page *page, enum lru_list lru)
  328. {
  329. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
  330. page_cache_get(page);
  331. if (!pagevec_add(pvec, page))
  332. __pagevec_lru_add(pvec, lru);
  333. put_cpu_var(lru_add_pvecs);
  334. }
  335. EXPORT_SYMBOL(__lru_cache_add);
  336. /**
  337. * lru_cache_add_lru - add a page to a page list
  338. * @page: the page to be added to the LRU.
  339. * @lru: the LRU list to which the page is added.
  340. */
  341. void lru_cache_add_lru(struct page *page, enum lru_list lru)
  342. {
  343. if (PageActive(page)) {
  344. VM_BUG_ON(PageUnevictable(page));
  345. ClearPageActive(page);
  346. } else if (PageUnevictable(page)) {
  347. VM_BUG_ON(PageActive(page));
  348. ClearPageUnevictable(page);
  349. }
  350. VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
  351. __lru_cache_add(page, lru);
  352. }
  353. /**
  354. * add_page_to_unevictable_list - add a page to the unevictable list
  355. * @page: the page to be added to the unevictable list
  356. *
  357. * Add page directly to its zone's unevictable list. To avoid races with
  358. * tasks that might be making the page evictable, through eg. munlock,
  359. * munmap or exit, while it's not on the lru, we want to add the page
  360. * while it's locked or otherwise "invisible" to other tasks. This is
  361. * difficult to do when using the pagevec cache, so bypass that.
  362. */
  363. void add_page_to_unevictable_list(struct page *page)
  364. {
  365. struct zone *zone = page_zone(page);
  366. spin_lock_irq(&zone->lru_lock);
  367. SetPageUnevictable(page);
  368. SetPageLRU(page);
  369. add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
  370. spin_unlock_irq(&zone->lru_lock);
  371. }
  372. /*
  373. * If the page can not be invalidated, it is moved to the
  374. * inactive list to speed up its reclaim. It is moved to the
  375. * head of the list, rather than the tail, to give the flusher
  376. * threads some time to write it out, as this is much more
  377. * effective than the single-page writeout from reclaim.
  378. *
  379. * If the page isn't page_mapped and dirty/writeback, the page
  380. * could reclaim asap using PG_reclaim.
  381. *
  382. * 1. active, mapped page -> none
  383. * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
  384. * 3. inactive, mapped page -> none
  385. * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
  386. * 5. inactive, clean -> inactive, tail
  387. * 6. Others -> none
  388. *
  389. * In 4, why it moves inactive's head, the VM expects the page would
  390. * be write it out by flusher threads as this is much more effective
  391. * than the single-page writeout from reclaim.
  392. */
  393. static void lru_deactivate_fn(struct page *page, void *arg)
  394. {
  395. int lru, file;
  396. bool active;
  397. struct zone *zone = page_zone(page);
  398. if (!PageLRU(page))
  399. return;
  400. if (PageUnevictable(page))
  401. return;
  402. /* Some processes are using the page */
  403. if (page_mapped(page))
  404. return;
  405. active = PageActive(page);
  406. file = page_is_file_cache(page);
  407. lru = page_lru_base_type(page);
  408. del_page_from_lru_list(zone, page, lru + active);
  409. ClearPageActive(page);
  410. ClearPageReferenced(page);
  411. add_page_to_lru_list(zone, page, lru);
  412. if (PageWriteback(page) || PageDirty(page)) {
  413. /*
  414. * PG_reclaim could be raced with end_page_writeback
  415. * It can make readahead confusing. But race window
  416. * is _really_ small and it's non-critical problem.
  417. */
  418. SetPageReclaim(page);
  419. } else {
  420. struct lruvec *lruvec;
  421. /*
  422. * The page's writeback ends up during pagevec
  423. * We moves tha page into tail of inactive.
  424. */
  425. lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
  426. list_move_tail(&page->lru, &lruvec->lists[lru]);
  427. __count_vm_event(PGROTATED);
  428. }
  429. if (active)
  430. __count_vm_event(PGDEACTIVATE);
  431. update_page_reclaim_stat(zone, page, file, 0);
  432. }
  433. /*
  434. * Drain pages out of the cpu's pagevecs.
  435. * Either "cpu" is the current CPU, and preemption has already been
  436. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  437. */
  438. void lru_add_drain_cpu(int cpu)
  439. {
  440. struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
  441. struct pagevec *pvec;
  442. int lru;
  443. for_each_lru(lru) {
  444. pvec = &pvecs[lru - LRU_BASE];
  445. if (pagevec_count(pvec))
  446. __pagevec_lru_add(pvec, lru);
  447. }
  448. pvec = &per_cpu(lru_rotate_pvecs, cpu);
  449. if (pagevec_count(pvec)) {
  450. unsigned long flags;
  451. /* No harm done if a racing interrupt already did this */
  452. local_irq_save(flags);
  453. pagevec_move_tail(pvec);
  454. local_irq_restore(flags);
  455. }
  456. pvec = &per_cpu(lru_deactivate_pvecs, cpu);
  457. if (pagevec_count(pvec))
  458. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  459. activate_page_drain(cpu);
  460. }
  461. /**
  462. * deactivate_page - forcefully deactivate a page
  463. * @page: page to deactivate
  464. *
  465. * This function hints the VM that @page is a good reclaim candidate,
  466. * for example if its invalidation fails due to the page being dirty
  467. * or under writeback.
  468. */
  469. void deactivate_page(struct page *page)
  470. {
  471. /*
  472. * In a workload with many unevictable page such as mprotect, unevictable
  473. * page deactivation for accelerating reclaim is pointless.
  474. */
  475. if (PageUnevictable(page))
  476. return;
  477. if (likely(get_page_unless_zero(page))) {
  478. struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
  479. if (!pagevec_add(pvec, page))
  480. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  481. put_cpu_var(lru_deactivate_pvecs);
  482. }
  483. }
  484. void lru_add_drain(void)
  485. {
  486. lru_add_drain_cpu(get_cpu());
  487. put_cpu();
  488. }
  489. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  490. {
  491. lru_add_drain();
  492. }
  493. /*
  494. * Returns 0 for success
  495. */
  496. int lru_add_drain_all(void)
  497. {
  498. return schedule_on_each_cpu(lru_add_drain_per_cpu);
  499. }
  500. /*
  501. * Batched page_cache_release(). Decrement the reference count on all the
  502. * passed pages. If it fell to zero then remove the page from the LRU and
  503. * free it.
  504. *
  505. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  506. * for the remainder of the operation.
  507. *
  508. * The locking in this function is against shrink_inactive_list(): we recheck
  509. * the page count inside the lock to see whether shrink_inactive_list()
  510. * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
  511. * will free it.
  512. */
  513. void release_pages(struct page **pages, int nr, int cold)
  514. {
  515. int i;
  516. LIST_HEAD(pages_to_free);
  517. struct zone *zone = NULL;
  518. unsigned long uninitialized_var(flags);
  519. for (i = 0; i < nr; i++) {
  520. struct page *page = pages[i];
  521. if (unlikely(PageCompound(page))) {
  522. if (zone) {
  523. spin_unlock_irqrestore(&zone->lru_lock, flags);
  524. zone = NULL;
  525. }
  526. put_compound_page(page);
  527. continue;
  528. }
  529. if (!put_page_testzero(page))
  530. continue;
  531. if (PageLRU(page)) {
  532. struct zone *pagezone = page_zone(page);
  533. if (pagezone != zone) {
  534. if (zone)
  535. spin_unlock_irqrestore(&zone->lru_lock,
  536. flags);
  537. zone = pagezone;
  538. spin_lock_irqsave(&zone->lru_lock, flags);
  539. }
  540. VM_BUG_ON(!PageLRU(page));
  541. __ClearPageLRU(page);
  542. del_page_from_lru_list(zone, page, page_off_lru(page));
  543. }
  544. list_add(&page->lru, &pages_to_free);
  545. }
  546. if (zone)
  547. spin_unlock_irqrestore(&zone->lru_lock, flags);
  548. free_hot_cold_page_list(&pages_to_free, cold);
  549. }
  550. EXPORT_SYMBOL(release_pages);
  551. /*
  552. * The pages which we're about to release may be in the deferred lru-addition
  553. * queues. That would prevent them from really being freed right now. That's
  554. * OK from a correctness point of view but is inefficient - those pages may be
  555. * cache-warm and we want to give them back to the page allocator ASAP.
  556. *
  557. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  558. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  559. * mutual recursion.
  560. */
  561. void __pagevec_release(struct pagevec *pvec)
  562. {
  563. lru_add_drain();
  564. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  565. pagevec_reinit(pvec);
  566. }
  567. EXPORT_SYMBOL(__pagevec_release);
  568. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  569. /* used by __split_huge_page_refcount() */
  570. void lru_add_page_tail(struct zone* zone,
  571. struct page *page, struct page *page_tail)
  572. {
  573. int uninitialized_var(active);
  574. enum lru_list lru;
  575. const int file = 0;
  576. VM_BUG_ON(!PageHead(page));
  577. VM_BUG_ON(PageCompound(page_tail));
  578. VM_BUG_ON(PageLRU(page_tail));
  579. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
  580. SetPageLRU(page_tail);
  581. if (page_evictable(page_tail, NULL)) {
  582. if (PageActive(page)) {
  583. SetPageActive(page_tail);
  584. active = 1;
  585. lru = LRU_ACTIVE_ANON;
  586. } else {
  587. active = 0;
  588. lru = LRU_INACTIVE_ANON;
  589. }
  590. } else {
  591. SetPageUnevictable(page_tail);
  592. lru = LRU_UNEVICTABLE;
  593. }
  594. if (likely(PageLRU(page)))
  595. list_add_tail(&page_tail->lru, &page->lru);
  596. else {
  597. struct list_head *list_head;
  598. /*
  599. * Head page has not yet been counted, as an hpage,
  600. * so we must account for each subpage individually.
  601. *
  602. * Use the standard add function to put page_tail on the list,
  603. * but then correct its position so they all end up in order.
  604. */
  605. add_page_to_lru_list(zone, page_tail, lru);
  606. list_head = page_tail->lru.prev;
  607. list_move_tail(&page_tail->lru, list_head);
  608. }
  609. if (!PageUnevictable(page))
  610. update_page_reclaim_stat(zone, page_tail, file, active);
  611. }
  612. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  613. static void __pagevec_lru_add_fn(struct page *page, void *arg)
  614. {
  615. enum lru_list lru = (enum lru_list)arg;
  616. struct zone *zone = page_zone(page);
  617. int file = is_file_lru(lru);
  618. int active = is_active_lru(lru);
  619. VM_BUG_ON(PageActive(page));
  620. VM_BUG_ON(PageUnevictable(page));
  621. VM_BUG_ON(PageLRU(page));
  622. SetPageLRU(page);
  623. if (active)
  624. SetPageActive(page);
  625. add_page_to_lru_list(zone, page, lru);
  626. update_page_reclaim_stat(zone, page, file, active);
  627. }
  628. /*
  629. * Add the passed pages to the LRU, then drop the caller's refcount
  630. * on them. Reinitialises the caller's pagevec.
  631. */
  632. void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
  633. {
  634. VM_BUG_ON(is_unevictable_lru(lru));
  635. pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
  636. }
  637. EXPORT_SYMBOL(__pagevec_lru_add);
  638. /**
  639. * pagevec_lookup - gang pagecache lookup
  640. * @pvec: Where the resulting pages are placed
  641. * @mapping: The address_space to search
  642. * @start: The starting page index
  643. * @nr_pages: The maximum number of pages
  644. *
  645. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  646. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  647. * reference against the pages in @pvec.
  648. *
  649. * The search returns a group of mapping-contiguous pages with ascending
  650. * indexes. There may be holes in the indices due to not-present pages.
  651. *
  652. * pagevec_lookup() returns the number of pages which were found.
  653. */
  654. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  655. pgoff_t start, unsigned nr_pages)
  656. {
  657. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  658. return pagevec_count(pvec);
  659. }
  660. EXPORT_SYMBOL(pagevec_lookup);
  661. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  662. pgoff_t *index, int tag, unsigned nr_pages)
  663. {
  664. pvec->nr = find_get_pages_tag(mapping, index, tag,
  665. nr_pages, pvec->pages);
  666. return pagevec_count(pvec);
  667. }
  668. EXPORT_SYMBOL(pagevec_lookup_tag);
  669. /*
  670. * Perform any setup for the swap system
  671. */
  672. void __init swap_setup(void)
  673. {
  674. unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
  675. #ifdef CONFIG_SWAP
  676. bdi_init(swapper_space.backing_dev_info);
  677. #endif
  678. /* Use a smaller cluster for small-memory machines */
  679. if (megs < 16)
  680. page_cluster = 2;
  681. else
  682. page_cluster = 3;
  683. /*
  684. * Right now other parts of the system means that we
  685. * _really_ don't want to cluster much more
  686. */
  687. }