swap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the operation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/percpu_counter.h>
  26. #include <linux/percpu.h>
  27. #include <linux/cpu.h>
  28. #include <linux/notifier.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/memcontrol.h>
  31. /* How many pages do we try to swap or page in/out together? */
  32. int page_cluster;
  33. static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
  34. static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
  35. static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
  36. /*
  37. * This path almost never happens for VM activity - pages are normally
  38. * freed via pagevecs. But it gets used by networking.
  39. */
  40. static void __page_cache_release(struct page *page)
  41. {
  42. if (PageLRU(page)) {
  43. unsigned long flags;
  44. struct zone *zone = page_zone(page);
  45. spin_lock_irqsave(&zone->lru_lock, flags);
  46. VM_BUG_ON(!PageLRU(page));
  47. __ClearPageLRU(page);
  48. del_page_from_lru(zone, page);
  49. spin_unlock_irqrestore(&zone->lru_lock, flags);
  50. }
  51. free_hot_page(page);
  52. }
  53. static void put_compound_page(struct page *page)
  54. {
  55. page = compound_head(page);
  56. if (put_page_testzero(page)) {
  57. compound_page_dtor *dtor;
  58. dtor = get_compound_page_dtor(page);
  59. (*dtor)(page);
  60. }
  61. }
  62. void put_page(struct page *page)
  63. {
  64. if (unlikely(PageCompound(page)))
  65. put_compound_page(page);
  66. else if (put_page_testzero(page))
  67. __page_cache_release(page);
  68. }
  69. EXPORT_SYMBOL(put_page);
  70. /**
  71. * put_pages_list(): release a list of pages
  72. *
  73. * Release a list of pages which are strung together on page.lru. Currently
  74. * used by read_cache_pages() and related error recovery code.
  75. *
  76. * @pages: list of pages threaded on page->lru
  77. */
  78. void put_pages_list(struct list_head *pages)
  79. {
  80. while (!list_empty(pages)) {
  81. struct page *victim;
  82. victim = list_entry(pages->prev, struct page, lru);
  83. list_del(&victim->lru);
  84. page_cache_release(victim);
  85. }
  86. }
  87. EXPORT_SYMBOL(put_pages_list);
  88. /*
  89. * pagevec_move_tail() must be called with IRQ disabled.
  90. * Otherwise this may cause nasty races.
  91. */
  92. static void pagevec_move_tail(struct pagevec *pvec)
  93. {
  94. int i;
  95. int pgmoved = 0;
  96. struct zone *zone = NULL;
  97. for (i = 0; i < pagevec_count(pvec); i++) {
  98. struct page *page = pvec->pages[i];
  99. struct zone *pagezone = page_zone(page);
  100. if (pagezone != zone) {
  101. if (zone)
  102. spin_unlock(&zone->lru_lock);
  103. zone = pagezone;
  104. spin_lock(&zone->lru_lock);
  105. }
  106. if (PageLRU(page) && !PageActive(page)) {
  107. list_move_tail(&page->lru, &zone->inactive_list);
  108. pgmoved++;
  109. }
  110. }
  111. if (zone)
  112. spin_unlock(&zone->lru_lock);
  113. __count_vm_events(PGROTATED, pgmoved);
  114. release_pages(pvec->pages, pvec->nr, pvec->cold);
  115. pagevec_reinit(pvec);
  116. }
  117. /*
  118. * Writeback is about to end against a page which has been marked for immediate
  119. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  120. * inactive list.
  121. *
  122. * Returns zero if it cleared PG_writeback.
  123. */
  124. int rotate_reclaimable_page(struct page *page)
  125. {
  126. struct pagevec *pvec;
  127. unsigned long flags;
  128. if (PageLocked(page))
  129. return 1;
  130. if (PageDirty(page))
  131. return 1;
  132. if (PageActive(page))
  133. return 1;
  134. if (!PageLRU(page))
  135. return 1;
  136. page_cache_get(page);
  137. local_irq_save(flags);
  138. pvec = &__get_cpu_var(lru_rotate_pvecs);
  139. if (!pagevec_add(pvec, page))
  140. pagevec_move_tail(pvec);
  141. local_irq_restore(flags);
  142. if (!test_clear_page_writeback(page))
  143. BUG();
  144. return 0;
  145. }
  146. /*
  147. * FIXME: speed this up?
  148. */
  149. void activate_page(struct page *page)
  150. {
  151. struct zone *zone = page_zone(page);
  152. spin_lock_irq(&zone->lru_lock);
  153. if (PageLRU(page) && !PageActive(page)) {
  154. del_page_from_inactive_list(zone, page);
  155. SetPageActive(page);
  156. add_page_to_active_list(zone, page);
  157. __count_vm_event(PGACTIVATE);
  158. mem_cgroup_move_lists(page_get_page_cgroup(page), true);
  159. }
  160. spin_unlock_irq(&zone->lru_lock);
  161. }
  162. /*
  163. * Mark a page as having seen activity.
  164. *
  165. * inactive,unreferenced -> inactive,referenced
  166. * inactive,referenced -> active,unreferenced
  167. * active,unreferenced -> active,referenced
  168. */
  169. void mark_page_accessed(struct page *page)
  170. {
  171. if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
  172. activate_page(page);
  173. ClearPageReferenced(page);
  174. } else if (!PageReferenced(page)) {
  175. SetPageReferenced(page);
  176. }
  177. }
  178. EXPORT_SYMBOL(mark_page_accessed);
  179. /**
  180. * lru_cache_add: add a page to the page lists
  181. * @page: the page to add
  182. */
  183. void lru_cache_add(struct page *page)
  184. {
  185. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
  186. page_cache_get(page);
  187. if (!pagevec_add(pvec, page))
  188. __pagevec_lru_add(pvec);
  189. put_cpu_var(lru_add_pvecs);
  190. }
  191. void lru_cache_add_active(struct page *page)
  192. {
  193. struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
  194. page_cache_get(page);
  195. if (!pagevec_add(pvec, page))
  196. __pagevec_lru_add_active(pvec);
  197. put_cpu_var(lru_add_active_pvecs);
  198. }
  199. /*
  200. * Drain pages out of the cpu's pagevecs.
  201. * Either "cpu" is the current CPU, and preemption has already been
  202. * disabled; or "cpu" is being hot-unplugged, and is already dead.
  203. */
  204. static void drain_cpu_pagevecs(int cpu)
  205. {
  206. struct pagevec *pvec;
  207. pvec = &per_cpu(lru_add_pvecs, cpu);
  208. if (pagevec_count(pvec))
  209. __pagevec_lru_add(pvec);
  210. pvec = &per_cpu(lru_add_active_pvecs, cpu);
  211. if (pagevec_count(pvec))
  212. __pagevec_lru_add_active(pvec);
  213. pvec = &per_cpu(lru_rotate_pvecs, cpu);
  214. if (pagevec_count(pvec)) {
  215. unsigned long flags;
  216. /* No harm done if a racing interrupt already did this */
  217. local_irq_save(flags);
  218. pagevec_move_tail(pvec);
  219. local_irq_restore(flags);
  220. }
  221. }
  222. void lru_add_drain(void)
  223. {
  224. drain_cpu_pagevecs(get_cpu());
  225. put_cpu();
  226. }
  227. #ifdef CONFIG_NUMA
  228. static void lru_add_drain_per_cpu(struct work_struct *dummy)
  229. {
  230. lru_add_drain();
  231. }
  232. /*
  233. * Returns 0 for success
  234. */
  235. int lru_add_drain_all(void)
  236. {
  237. return schedule_on_each_cpu(lru_add_drain_per_cpu);
  238. }
  239. #else
  240. /*
  241. * Returns 0 for success
  242. */
  243. int lru_add_drain_all(void)
  244. {
  245. lru_add_drain();
  246. return 0;
  247. }
  248. #endif
  249. /*
  250. * Batched page_cache_release(). Decrement the reference count on all the
  251. * passed pages. If it fell to zero then remove the page from the LRU and
  252. * free it.
  253. *
  254. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  255. * for the remainder of the operation.
  256. *
  257. * The locking in this function is against shrink_cache(): we recheck the
  258. * page count inside the lock to see whether shrink_cache grabbed the page
  259. * via the LRU. If it did, give up: shrink_cache will free it.
  260. */
  261. void release_pages(struct page **pages, int nr, int cold)
  262. {
  263. int i;
  264. struct pagevec pages_to_free;
  265. struct zone *zone = NULL;
  266. unsigned long uninitialized_var(flags);
  267. pagevec_init(&pages_to_free, cold);
  268. for (i = 0; i < nr; i++) {
  269. struct page *page = pages[i];
  270. if (unlikely(PageCompound(page))) {
  271. if (zone) {
  272. spin_unlock_irqrestore(&zone->lru_lock, flags);
  273. zone = NULL;
  274. }
  275. put_compound_page(page);
  276. continue;
  277. }
  278. if (!put_page_testzero(page))
  279. continue;
  280. if (PageLRU(page)) {
  281. struct zone *pagezone = page_zone(page);
  282. if (pagezone != zone) {
  283. if (zone)
  284. spin_unlock_irqrestore(&zone->lru_lock,
  285. flags);
  286. zone = pagezone;
  287. spin_lock_irqsave(&zone->lru_lock, flags);
  288. }
  289. VM_BUG_ON(!PageLRU(page));
  290. __ClearPageLRU(page);
  291. del_page_from_lru(zone, page);
  292. }
  293. if (!pagevec_add(&pages_to_free, page)) {
  294. if (zone) {
  295. spin_unlock_irqrestore(&zone->lru_lock, flags);
  296. zone = NULL;
  297. }
  298. __pagevec_free(&pages_to_free);
  299. pagevec_reinit(&pages_to_free);
  300. }
  301. }
  302. if (zone)
  303. spin_unlock_irqrestore(&zone->lru_lock, flags);
  304. pagevec_free(&pages_to_free);
  305. }
  306. /*
  307. * The pages which we're about to release may be in the deferred lru-addition
  308. * queues. That would prevent them from really being freed right now. That's
  309. * OK from a correctness point of view but is inefficient - those pages may be
  310. * cache-warm and we want to give them back to the page allocator ASAP.
  311. *
  312. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  313. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  314. * mutual recursion.
  315. */
  316. void __pagevec_release(struct pagevec *pvec)
  317. {
  318. lru_add_drain();
  319. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  320. pagevec_reinit(pvec);
  321. }
  322. EXPORT_SYMBOL(__pagevec_release);
  323. /*
  324. * pagevec_release() for pages which are known to not be on the LRU
  325. *
  326. * This function reinitialises the caller's pagevec.
  327. */
  328. void __pagevec_release_nonlru(struct pagevec *pvec)
  329. {
  330. int i;
  331. struct pagevec pages_to_free;
  332. pagevec_init(&pages_to_free, pvec->cold);
  333. for (i = 0; i < pagevec_count(pvec); i++) {
  334. struct page *page = pvec->pages[i];
  335. VM_BUG_ON(PageLRU(page));
  336. if (put_page_testzero(page))
  337. pagevec_add(&pages_to_free, page);
  338. }
  339. pagevec_free(&pages_to_free);
  340. pagevec_reinit(pvec);
  341. }
  342. /*
  343. * Add the passed pages to the LRU, then drop the caller's refcount
  344. * on them. Reinitialises the caller's pagevec.
  345. */
  346. void __pagevec_lru_add(struct pagevec *pvec)
  347. {
  348. int i;
  349. struct zone *zone = NULL;
  350. for (i = 0; i < pagevec_count(pvec); i++) {
  351. struct page *page = pvec->pages[i];
  352. struct zone *pagezone = page_zone(page);
  353. if (pagezone != zone) {
  354. if (zone)
  355. spin_unlock_irq(&zone->lru_lock);
  356. zone = pagezone;
  357. spin_lock_irq(&zone->lru_lock);
  358. }
  359. VM_BUG_ON(PageLRU(page));
  360. SetPageLRU(page);
  361. add_page_to_inactive_list(zone, page);
  362. }
  363. if (zone)
  364. spin_unlock_irq(&zone->lru_lock);
  365. release_pages(pvec->pages, pvec->nr, pvec->cold);
  366. pagevec_reinit(pvec);
  367. }
  368. EXPORT_SYMBOL(__pagevec_lru_add);
  369. void __pagevec_lru_add_active(struct pagevec *pvec)
  370. {
  371. int i;
  372. struct zone *zone = NULL;
  373. for (i = 0; i < pagevec_count(pvec); i++) {
  374. struct page *page = pvec->pages[i];
  375. struct zone *pagezone = page_zone(page);
  376. if (pagezone != zone) {
  377. if (zone)
  378. spin_unlock_irq(&zone->lru_lock);
  379. zone = pagezone;
  380. spin_lock_irq(&zone->lru_lock);
  381. }
  382. VM_BUG_ON(PageLRU(page));
  383. SetPageLRU(page);
  384. VM_BUG_ON(PageActive(page));
  385. SetPageActive(page);
  386. add_page_to_active_list(zone, page);
  387. }
  388. if (zone)
  389. spin_unlock_irq(&zone->lru_lock);
  390. release_pages(pvec->pages, pvec->nr, pvec->cold);
  391. pagevec_reinit(pvec);
  392. }
  393. /*
  394. * Try to drop buffers from the pages in a pagevec
  395. */
  396. void pagevec_strip(struct pagevec *pvec)
  397. {
  398. int i;
  399. for (i = 0; i < pagevec_count(pvec); i++) {
  400. struct page *page = pvec->pages[i];
  401. if (PagePrivate(page) && !TestSetPageLocked(page)) {
  402. if (PagePrivate(page))
  403. try_to_release_page(page, 0);
  404. unlock_page(page);
  405. }
  406. }
  407. }
  408. /**
  409. * pagevec_lookup - gang pagecache lookup
  410. * @pvec: Where the resulting pages are placed
  411. * @mapping: The address_space to search
  412. * @start: The starting page index
  413. * @nr_pages: The maximum number of pages
  414. *
  415. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  416. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  417. * reference against the pages in @pvec.
  418. *
  419. * The search returns a group of mapping-contiguous pages with ascending
  420. * indexes. There may be holes in the indices due to not-present pages.
  421. *
  422. * pagevec_lookup() returns the number of pages which were found.
  423. */
  424. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  425. pgoff_t start, unsigned nr_pages)
  426. {
  427. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  428. return pagevec_count(pvec);
  429. }
  430. EXPORT_SYMBOL(pagevec_lookup);
  431. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  432. pgoff_t *index, int tag, unsigned nr_pages)
  433. {
  434. pvec->nr = find_get_pages_tag(mapping, index, tag,
  435. nr_pages, pvec->pages);
  436. return pagevec_count(pvec);
  437. }
  438. EXPORT_SYMBOL(pagevec_lookup_tag);
  439. #ifdef CONFIG_SMP
  440. /*
  441. * We tolerate a little inaccuracy to avoid ping-ponging the counter between
  442. * CPUs
  443. */
  444. #define ACCT_THRESHOLD max(16, NR_CPUS * 2)
  445. static DEFINE_PER_CPU(long, committed_space) = 0;
  446. void vm_acct_memory(long pages)
  447. {
  448. long *local;
  449. preempt_disable();
  450. local = &__get_cpu_var(committed_space);
  451. *local += pages;
  452. if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
  453. atomic_add(*local, &vm_committed_space);
  454. *local = 0;
  455. }
  456. preempt_enable();
  457. }
  458. #ifdef CONFIG_HOTPLUG_CPU
  459. /* Drop the CPU's cached committed space back into the central pool. */
  460. static int cpu_swap_callback(struct notifier_block *nfb,
  461. unsigned long action,
  462. void *hcpu)
  463. {
  464. long *committed;
  465. committed = &per_cpu(committed_space, (long)hcpu);
  466. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  467. atomic_add(*committed, &vm_committed_space);
  468. *committed = 0;
  469. drain_cpu_pagevecs((long)hcpu);
  470. }
  471. return NOTIFY_OK;
  472. }
  473. #endif /* CONFIG_HOTPLUG_CPU */
  474. #endif /* CONFIG_SMP */
  475. /*
  476. * Perform any setup for the swap system
  477. */
  478. void __init swap_setup(void)
  479. {
  480. unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
  481. #ifdef CONFIG_SWAP
  482. bdi_init(swapper_space.backing_dev_info);
  483. #endif
  484. /* Use a smaller cluster for small-memory machines */
  485. if (megs < 16)
  486. page_cluster = 2;
  487. else
  488. page_cluster = 3;
  489. /*
  490. * Right now other parts of the system means that we
  491. * _really_ don't want to cluster much more
  492. */
  493. #ifdef CONFIG_HOTPLUG_CPU
  494. hotcpu_notifier(cpu_swap_callback, 0);
  495. #endif
  496. }