swap.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the operation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/export.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/percpu_counter.h>
  26. #include <linux/percpu.h>
  27. #include <linux/cpu.h>
  28. #include <linux/notifier.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/memcontrol.h>
  31. #include <linux/gfp.h>
  32. #include "internal.h"
  33. /* How many pages do we try to swap or page in/out together? */
  34. int page_cluster;
  35. static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  36. static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  37. static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  38. /*
  39. * This path almost never happens for VM activity - pages are normally
  40. * freed via pagevecs. But it gets used by networking.
  41. */
  42. static void __page_cache_release(struct page *page)
  43. {
  44. if (PageLRU(page)) {
  45. unsigned long flags;
  46. struct zone *zone = page_zone(page);
  47. spin_lock_irqsave(&zone->lru_lock, flags);
  48. VM_BUG_ON(!PageLRU(page));
  49. __ClearPageLRU(page);
  50. del_page_from_lru(zone, page);
  51. spin_unlock_irqrestore(&zone->lru_lock, flags);
  52. }
  53. }
  54. static void __put_single_page(struct page *page)
  55. {
  56. __page_cache_release(page);
  57. free_hot_cold_page(page, 0);
  58. }
  59. static void __put_compound_page(struct page *page)
  60. {
  61. compound_page_dtor *dtor;
  62. __page_cache_release(page);
  63. dtor = get_compound_page_dtor(page);
  64. (*dtor)(page);
  65. }
  66. static void put_compound_page(struct page *page)
  67. {
  68. if (unlikely(PageTail(page))) {
  69. /* __split_huge_page_refcount can run under us */
  70. struct page *page_head = compound_trans_head(page);
  71. if (likely(page != page_head &&
  72. get_page_unless_zero(page_head))) {
  73. unsigned long flags;
  74. /*
  75. * page_head wasn't a dangling pointer but it
  76. * may not be a head page anymore by the time
  77. * we obtain the lock. That is ok as long as it
  78. * can't be freed from under us.
  79. */
  80. flags = compound_lock_irqsave(page_head);
  81. if (unlikely(!PageTail(page))) {
  82. /* __split_huge_page_refcount run before us */
  83. compound_unlock_irqrestore(page_head, flags);
  84. VM_BUG_ON(PageHead(page_head));
  85. if (put_page_testzero(page_head))
  86. __put_single_page(page_head);
  87. out_put_single:
  88. if (put_page_testzero(page))
  89. __put_single_page(page);
  90. return;
  91. }
  92. VM_BUG_ON(page_head != page->first_page);
  93. /*
  94. * We can release the refcount taken by
  95. * get_page_unless_zero() now that
  96. * __split_huge_page_refcount() is blocked on
  97. * the compound_lock.
  98. */
  99. if (put_page_testzero(page_head))
  100. VM_BUG_ON(1);
  101. /* __split_huge_page_refcount will wait now */
  102. VM_BUG_ON(page_mapcount(page) <= 0);
  103. atomic_dec(&page->_mapcount);
  104. VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
  105. VM_BUG_ON(atomic_read(&page->_count) != 0);
  106. compound_unlock_irqrestore(page_head, flags);
  107. if (put_page_testzero(page_head)) {
  108. if (PageHead(page_head))
  109. __put_compound_page(page_head);
  110. else
  111. __put_single_page(page_head);
  112. }
  113. } else {
  114. /* page_head is a dangling pointer */
  115. VM_BUG_ON(PageTail(page));
  116. goto out_put_single;
  117. }
  118. } else if (put_page_testzero(page)) {
  119. if (PageHead(page))
  120. __put_compound_page(page);
  121. else
  122. __put_single_page(page);
  123. }
  124. }
  125. void put_page(struct page *page)
  126. {
  127. if (unlikely(PageCompound(page)))
  128. put_compound_page(page);
  129. else if (put_page_testzero(page))
  130. __put_single_page(page);
  131. }
  132. EXPORT_SYMBOL(put_page);
  133. /*
  134. * This function is exported but must not be called by anything other
  135. * than get_page(). It implements the slow path of get_page().
  136. */
  137. bool __get_page_tail(struct page *page)
  138. {
  139. /*
  140. * This takes care of get_page() if run on a tail page
  141. * returned by one of the get_user_pages/follow_page variants.
  142. * get_user_pages/follow_page itself doesn't need the compound
  143. * lock because it runs __get_page_tail_foll() under the
  144. * proper PT lock that already serializes against
  145. * split_huge_page().
  146. */
  147. unsigned long flags;
  148. bool got = false;
  149. struct page *page_head = compound_trans_head(page);
  150. if (likely(page != page_head && get_page_unless_zero(page_head))) {
  151. /*
  152. * page_head wasn't a dangling pointer but it
  153. * may not be a head page anymore by the time
  154. * we obtain the lock. That is ok as long as it
  155. * can't be freed from under us.
  156. */
  157. flags = compound_lock_irqsave(page_head);
  158. /* here __split_huge_page_refcount won't run anymore */
  159. if (likely(PageTail(page))) {
  160. __get_page_tail_foll(page, false);
  161. got = true;
  162. }
  163. compound_unlock_irqrestore(page_head, flags);
  164. if (unlikely(!got))
  165. put_page(page_head);
  166. }
  167. return got;
  168. }
  169. EXPORT_SYMBOL(__get_page_tail);
  170. /**
  171. * put_pages_list() - release a list of pages
  172. * @pages: list of pages threaded on page->lru
  173. *
  174. * Release a list of pages which are strung together on page.lru. Currently
  175. * used by read_cache_pages() and related error recovery code.
  176. */
  177. void put_pages_list(struct list_head *pages)
  178. {
  179. while (!list_empty(pages)) {
  180. struct page *victim;
  181. victim = list_entry(pages->prev, struct page, lru);
  182. list_del(&victim->lru);
  183. page_cache_release(victim);
  184. }
  185. }
  186. EXPORT_SYMBOL(put_pages_list);
  187. static void pagevec_lru_move_fn(struct pagevec *pvec,
  188. void (*move_fn)(struct page *page, void *arg),
  189. void *arg)
  190. {
  191. int i;
  192. struct zone *zone = NULL;
  193. unsigned long flags = 0;
  194. for (i = 0; i < pagevec_count(pvec); i++) {
  195. struct page *page = pvec->pages[i];
  196. struct zone *pagezone = page_zone(page);
  197. if (pagezone != zone) {
  198. if (zone)
  199. spin_unlock_irqrestore(&zone->lru_lock, flags);
  200. zone = pagezone;
  201. spin_lock_irqsave(&zone->lru_lock, flags);
  202. }
  203. (*move_fn)(page, arg);
  204. }
  205. if (zone)
  206. spin_unlock_irqrestore(&zone->lru_lock, flags);
  207. release_pages(pvec->pages, pvec->nr, pvec->cold);
  208. pagevec_reinit(pvec);
  209. }
  210. static void pagevec_move_tail_fn(struct page *page, void *arg)
  211. {
  212. int *pgmoved = arg;
  213. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  214. enum lru_list lru = page_lru_base_type(page);
  215. struct lruvec *lruvec;
  216. lruvec = mem_cgroup_lru_move_lists(page_zone(page),
  217. page, lru, lru);
  218. list_move_tail(&page->lru, &lruvec->lists[lru]);
  219. (*pgmoved)++;
  220. }
  221. }
  222. /*
  223. * pagevec_move_tail() must be called with IRQ disabled.
  224. * Otherwise this may cause nasty races.
  225. */
  226. static void pagevec_move_tail(struct pagevec *pvec)
  227. {
  228. int pgmoved = 0;
  229. pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
  230. __count_vm_events(PGROTATED, pgmoved);
  231. }
  232. /*
  233. * Writeback is about to end against a page which has been marked for immediate
  234. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  235. * inactive list.
  236. */
  237. void rotate_reclaimable_page(struct page *page)
  238. {
  239. if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
  240. !PageUnevictable(page) && PageLRU(page)) {
  241. struct pagevec *pvec;
  242. unsigned long flags;
  243. page_cache_get(page);
  244. local_irq_save(flags);
  245. pvec = &__get_cpu_var(lru_rotate_pvecs);
  246. if (!pagevec_add(pvec, page))
  247. pagevec_move_tail(pvec);
  248. local_irq_restore(flags);
  249. }
  250. }
  251. static void update_page_reclaim_stat(struct zone *zone, struct page *page,
  252. int file, int rotated)
  253. {
  254. struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
  255. struct zone_reclaim_stat *memcg_reclaim_stat;
  256. memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
  257. reclaim_stat->recent_scanned[file]++;
  258. if (rotated)
  259. reclaim_stat->recent_rotated[file]++;
  260. if (!memcg_reclaim_stat)
  261. return;
  262. memcg_reclaim_stat->recent_scanned[file]++;
  263. if (rotated)
  264. memcg_reclaim_stat->recent_rotated[file]++;
  265. }
  266. static void __activate_page(struct page *page, void *arg)
  267. {
  268. struct zone *zone = page_zone(page);
  269. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  270. int file = page_is_file_cache(page);
  271. int lru = page_lru_base_type(page);
  272. del_page_from_lru_list(zone, page, lru);
  273. SetPageActive(page);
  274. lru += LRU_ACTIVE;
  275. add_page_to_lru_list(zone, page, lru);
  276. __count_vm_event(PGACTIVATE);
  277. update_page_reclaim_stat(zone, page, file, 1);
  278. }
  279. }
  280. #ifdef CONFIG_SMP
  281. static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
  282. static void activate_page_drain(int cpu)
  283. {
  284. struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
  285. if (pagevec_count(pvec))
  286. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  287. }
  288. void activate_page(struct page *page)
  289. {
  290. if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
  291. struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
  292. page_cache_get(page);
  293. if (!pagevec_add(pvec, page))
  294. pagevec_lru_move_fn(pvec, __activate_page, NULL);
  295. put_cpu_var(activate_page_pvecs);
  296. }
  297. }
  298. #else
  299. static inline void activate_page_drain(int cpu)
  300. {
  301. }
  302. void activate_page(struct page *page)
  303. {
  304. struct zone *zone = page_zone(page);
  305. spin_lock_irq(&zone->lru_lock);
  306. __activate_page(page, NULL);
  307. spin_unlock_irq(&zone->lru_lock);
  308. }
  309. #endif
  310. /*
  311. * Mark a page as having seen activity.
  312. *
  313. * inactive,unreferenced -> inactive,referenced
  314. * inactive,referenced -> active,unreferenced
  315. * active,unreferenced -> active,referenced
  316. */
  317. void mark_page_accessed(struct page *page)
  318. {
  319. if (!PageActive(page) && !PageUnevictable(page) &&
  320. PageReferenced(page) && PageLRU(page)) {
  321. activate_page(page);
  322. ClearPageReferenced(page);
  323. } else if (!PageReferenced(page)) {
  324. SetPageReferenced(page);
  325. }
  326. }
  327. EXPORT_SYMBOL(mark_page_accessed);
  328. void __lru_cache_add(struct page *page, enum lru_list lru)
  329. {
  330. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
  331. page_cache_get(page);
  332. if (!pagevec_add(pvec, page))
  333. ____pagevec_lru_add(pvec, lru);
  334. put_cpu_var(lru_add_pvecs);
  335. }
  336. EXPORT_SYMBOL(__lru_cache_add);
  337. /**
  338. * lru_cache_add_lru - add a page to a page list
  339. * @page: the page to be added to the LRU.
  340. * @lru: the LRU list to which the page is added.
  341. */
  342. void lru_cache_add_lru(struct page *page, enum lru_list lru)
  343. {
  344. if (PageActive(page)) {
  345. VM_BUG_ON(PageUnevictable(page));
  346. ClearPageActive(page);
  347. } else if (PageUnevictable(page)) {
  348. VM_BUG_ON(PageActive(page));
  349. ClearPageUnevictable(page);
  350. }
  351. VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
  352. __lru_cache_add(page, lru);
  353. }
  354. /**
  355. * add_page_to_unevictable_list - add a page to the unevictable list
  356. * @page: the page to be added to the unevictable list
  357. *
  358. * Add page directly to its zone's unevictable list. To avoid races with
  359. * tasks that might be making the page evictable, through eg. munlock,
  360. * munmap or exit, while it's not on the lru, we want to add the page
  361. * while it's locked or otherwise "invisible" to other tasks. This is
  362. * difficult to do when using the pagevec cache, so bypass that.
  363. */
  364. void add_page_to_unevictable_list(struct page *page)
  365. {
  366. struct zone *zone = page_zone(page);
  367. spin_lock_irq(&zone->lru_lock);
  368. SetPageUnevictable(page);
  369. SetPageLRU(page);
  370. add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
  371. spin_unlock_irq(&zone->lru_lock);
  372. }
  373. /*
  374. * If the page can not be invalidated, it is moved to the
  375. * inactive list to speed up its reclaim. It is moved to the
  376. * head of the list, rather than the tail, to give the flusher
  377. * threads some time to write it out, as this is much more
  378. * effective than the single-page writeout from reclaim.
  379. *
  380. * If the page isn't page_mapped and dirty/writeback, the page
  381. * could reclaim asap using PG_reclaim.
  382. *
  383. * 1. active, mapped page -> none
  384. * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
  385. * 3. inactive, mapped page -> none
  386. * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
  387. * 5. inactive, clean -> inactive, tail
  388. * 6. Others -> none
  389. *
  390. * In 4, why it moves inactive's head, the VM expects the page would
  391. * be write it out by flusher threads as this is much more effective
  392. * than the single-page writeout from reclaim.
  393. */
  394. static void lru_deactivate_fn(struct page *page, void *arg)
  395. {
  396. int lru, file;
  397. bool active;
  398. struct zone *zone = page_zone(page);
  399. if (!PageLRU(page))
  400. return;
  401. if (PageUnevictable(page))
  402. return;
  403. /* Some processes are using the page */
  404. if (page_mapped(page))
  405. return;
  406. active = PageActive(page);
  407. file = page_is_file_cache(page);
  408. lru = page_lru_base_type(page);
  409. del_page_from_lru_list(zone, page, lru + active);
  410. ClearPageActive(page);
  411. ClearPageReferenced(page);
  412. add_page_to_lru_list(zone, page, lru);
  413. if (PageWriteback(page) || PageDirty(page)) {
  414. /*
  415. * PG_reclaim could be raced with end_page_writeback
  416. * It can make readahead confusing. But race window
  417. * is _really_ small and it's non-critical problem.
  418. */
  419. SetPageReclaim(page);
  420. } else {
  421. struct lruvec *lruvec;
  422. /*
  423. * The page's writeback ends up during pagevec
  424. * We moves tha page into tail of inactive.
  425. */
  426. lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
  427. list_move_tail(&page->lru, &lruvec->lists[lru]);
  428. __count_vm_event(PGROTATED);
  429. }
  430. if (active)
  431. __count_vm_event(PGDEACTIVATE);
  432. update_page_reclaim_stat(zone, page, file, 0);
  433. }
  434. /*
  435. * Drain pages out of the cpu's pagevecs.
  436. * Either "cpu" is the current CPU, and preemption has already been
  437. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  438. */
  439. static void drain_cpu_pagevecs(int cpu)
  440. {
  441. struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
  442. struct pagevec *pvec;
  443. int lru;
  444. for_each_lru(lru) {
  445. pvec = &pvecs[lru - LRU_BASE];
  446. if (pagevec_count(pvec))
  447. ____pagevec_lru_add(pvec, lru);
  448. }
  449. pvec = &per_cpu(lru_rotate_pvecs, cpu);
  450. if (pagevec_count(pvec)) {
  451. unsigned long flags;
  452. /* No harm done if a racing interrupt already did this */
  453. local_irq_save(flags);
  454. pagevec_move_tail(pvec);
  455. local_irq_restore(flags);
  456. }
  457. pvec = &per_cpu(lru_deactivate_pvecs, cpu);
  458. if (pagevec_count(pvec))
  459. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  460. activate_page_drain(cpu);
  461. }
  462. /**
  463. * deactivate_page - forcefully deactivate a page
  464. * @page: page to deactivate
  465. *
  466. * This function hints the VM that @page is a good reclaim candidate,
  467. * for example if its invalidation fails due to the page being dirty
  468. * or under writeback.
  469. */
  470. void deactivate_page(struct page *page)
  471. {
  472. /*
  473. * In a workload with many unevictable page such as mprotect, unevictable
  474. * page deactivation for accelerating reclaim is pointless.
  475. */
  476. if (PageUnevictable(page))
  477. return;
  478. if (likely(get_page_unless_zero(page))) {
  479. struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
  480. if (!pagevec_add(pvec, page))
  481. pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
  482. put_cpu_var(lru_deactivate_pvecs);
  483. }
  484. }
  485. void lru_add_drain(void)
  486. {
  487. drain_cpu_pagevecs(get_cpu());
  488. put_cpu();
  489. }
  490. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  491. {
  492. lru_add_drain();
  493. }
  494. /*
  495. * Returns 0 for success
  496. */
  497. int lru_add_drain_all(void)
  498. {
  499. return schedule_on_each_cpu(lru_add_drain_per_cpu);
  500. }
  501. /*
  502. * Batched page_cache_release(). Decrement the reference count on all the
  503. * passed pages. If it fell to zero then remove the page from the LRU and
  504. * free it.
  505. *
  506. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  507. * for the remainder of the operation.
  508. *
  509. * The locking in this function is against shrink_inactive_list(): we recheck
  510. * the page count inside the lock to see whether shrink_inactive_list()
  511. * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
  512. * will free it.
  513. */
  514. void release_pages(struct page **pages, int nr, int cold)
  515. {
  516. int i;
  517. LIST_HEAD(pages_to_free);
  518. struct zone *zone = NULL;
  519. unsigned long uninitialized_var(flags);
  520. for (i = 0; i < nr; i++) {
  521. struct page *page = pages[i];
  522. if (unlikely(PageCompound(page))) {
  523. if (zone) {
  524. spin_unlock_irqrestore(&zone->lru_lock, flags);
  525. zone = NULL;
  526. }
  527. put_compound_page(page);
  528. continue;
  529. }
  530. if (!put_page_testzero(page))
  531. continue;
  532. if (PageLRU(page)) {
  533. struct zone *pagezone = page_zone(page);
  534. if (pagezone != zone) {
  535. if (zone)
  536. spin_unlock_irqrestore(&zone->lru_lock,
  537. flags);
  538. zone = pagezone;
  539. spin_lock_irqsave(&zone->lru_lock, flags);
  540. }
  541. VM_BUG_ON(!PageLRU(page));
  542. __ClearPageLRU(page);
  543. del_page_from_lru(zone, page);
  544. }
  545. list_add(&page->lru, &pages_to_free);
  546. }
  547. if (zone)
  548. spin_unlock_irqrestore(&zone->lru_lock, flags);
  549. free_hot_cold_page_list(&pages_to_free, cold);
  550. }
  551. EXPORT_SYMBOL(release_pages);
  552. /*
  553. * The pages which we're about to release may be in the deferred lru-addition
  554. * queues. That would prevent them from really being freed right now. That's
  555. * OK from a correctness point of view but is inefficient - those pages may be
  556. * cache-warm and we want to give them back to the page allocator ASAP.
  557. *
  558. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  559. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  560. * mutual recursion.
  561. */
  562. void __pagevec_release(struct pagevec *pvec)
  563. {
  564. lru_add_drain();
  565. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  566. pagevec_reinit(pvec);
  567. }
  568. EXPORT_SYMBOL(__pagevec_release);
  569. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  570. /* used by __split_huge_page_refcount() */
  571. void lru_add_page_tail(struct zone* zone,
  572. struct page *page, struct page *page_tail)
  573. {
  574. int active;
  575. enum lru_list lru;
  576. const int file = 0;
  577. VM_BUG_ON(!PageHead(page));
  578. VM_BUG_ON(PageCompound(page_tail));
  579. VM_BUG_ON(PageLRU(page_tail));
  580. VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
  581. SetPageLRU(page_tail);
  582. if (page_evictable(page_tail, NULL)) {
  583. if (PageActive(page)) {
  584. SetPageActive(page_tail);
  585. active = 1;
  586. lru = LRU_ACTIVE_ANON;
  587. } else {
  588. active = 0;
  589. lru = LRU_INACTIVE_ANON;
  590. }
  591. update_page_reclaim_stat(zone, page_tail, file, active);
  592. } else {
  593. SetPageUnevictable(page_tail);
  594. lru = LRU_UNEVICTABLE;
  595. }
  596. if (likely(PageLRU(page)))
  597. list_add_tail(&page_tail->lru, &page->lru);
  598. else {
  599. struct list_head *list_head;
  600. /*
  601. * Head page has not yet been counted, as an hpage,
  602. * so we must account for each subpage individually.
  603. *
  604. * Use the standard add function to put page_tail on the list,
  605. * but then correct its position so they all end up in order.
  606. */
  607. add_page_to_lru_list(zone, page_tail, lru);
  608. list_head = page_tail->lru.prev;
  609. list_move_tail(&page_tail->lru, list_head);
  610. }
  611. }
  612. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  613. static void ____pagevec_lru_add_fn(struct page *page, void *arg)
  614. {
  615. enum lru_list lru = (enum lru_list)arg;
  616. struct zone *zone = page_zone(page);
  617. int file = is_file_lru(lru);
  618. int active = is_active_lru(lru);
  619. VM_BUG_ON(PageActive(page));
  620. VM_BUG_ON(PageUnevictable(page));
  621. VM_BUG_ON(PageLRU(page));
  622. SetPageLRU(page);
  623. if (active)
  624. SetPageActive(page);
  625. update_page_reclaim_stat(zone, page, file, active);
  626. add_page_to_lru_list(zone, page, lru);
  627. }
  628. /*
  629. * Add the passed pages to the LRU, then drop the caller's refcount
  630. * on them. Reinitialises the caller's pagevec.
  631. */
  632. void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
  633. {
  634. VM_BUG_ON(is_unevictable_lru(lru));
  635. pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
  636. }
  637. EXPORT_SYMBOL(____pagevec_lru_add);
  638. /*
  639. * Try to drop buffers from the pages in a pagevec
  640. */
  641. void pagevec_strip(struct pagevec *pvec)
  642. {
  643. int i;
  644. for (i = 0; i < pagevec_count(pvec); i++) {
  645. struct page *page = pvec->pages[i];
  646. if (page_has_private(page) && trylock_page(page)) {
  647. if (page_has_private(page))
  648. try_to_release_page(page, 0);
  649. unlock_page(page);
  650. }
  651. }
  652. }
  653. /**
  654. * pagevec_lookup - gang pagecache lookup
  655. * @pvec: Where the resulting pages are placed
  656. * @mapping: The address_space to search
  657. * @start: The starting page index
  658. * @nr_pages: The maximum number of pages
  659. *
  660. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  661. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  662. * reference against the pages in @pvec.
  663. *
  664. * The search returns a group of mapping-contiguous pages with ascending
  665. * indexes. There may be holes in the indices due to not-present pages.
  666. *
  667. * pagevec_lookup() returns the number of pages which were found.
  668. */
  669. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  670. pgoff_t start, unsigned nr_pages)
  671. {
  672. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  673. return pagevec_count(pvec);
  674. }
  675. EXPORT_SYMBOL(pagevec_lookup);
  676. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  677. pgoff_t *index, int tag, unsigned nr_pages)
  678. {
  679. pvec->nr = find_get_pages_tag(mapping, index, tag,
  680. nr_pages, pvec->pages);
  681. return pagevec_count(pvec);
  682. }
  683. EXPORT_SYMBOL(pagevec_lookup_tag);
  684. /*
  685. * Perform any setup for the swap system
  686. */
  687. void __init swap_setup(void)
  688. {
  689. unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
  690. #ifdef CONFIG_SWAP
  691. bdi_init(swapper_space.backing_dev_info);
  692. #endif
  693. /* Use a smaller cluster for small-memory machines */
  694. if (megs < 16)
  695. page_cluster = 2;
  696. else
  697. page_cluster = 3;
  698. /*
  699. * Right now other parts of the system means that we
  700. * _really_ don't want to cluster much more
  701. */
  702. }