swap.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the opereation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/module.h>
  26. #include <linux/percpu_counter.h>
  27. #include <linux/percpu.h>
  28. #include <linux/cpu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/init.h>
  31. /* How many pages do we try to swap or page in/out together? */
  32. int page_cluster;
  33. static void put_compound_page(struct page *page)
  34. {
  35. page = (struct page *)page_private(page);
  36. if (put_page_testzero(page)) {
  37. void (*dtor)(struct page *page);
  38. dtor = (void (*)(struct page *))page[1].lru.next;
  39. (*dtor)(page);
  40. }
  41. }
  42. void put_page(struct page *page)
  43. {
  44. if (unlikely(PageCompound(page)))
  45. put_compound_page(page);
  46. else if (put_page_testzero(page))
  47. __page_cache_release(page);
  48. }
  49. EXPORT_SYMBOL(put_page);
  50. /**
  51. * put_pages_list(): release a list of pages
  52. *
  53. * Release a list of pages which are strung together on page.lru. Currently
  54. * used by read_cache_pages() and related error recovery code.
  55. *
  56. * @pages: list of pages threaded on page->lru
  57. */
  58. void put_pages_list(struct list_head *pages)
  59. {
  60. while (!list_empty(pages)) {
  61. struct page *victim;
  62. victim = list_entry(pages->prev, struct page, lru);
  63. list_del(&victim->lru);
  64. page_cache_release(victim);
  65. }
  66. }
  67. EXPORT_SYMBOL(put_pages_list);
  68. /*
  69. * Writeback is about to end against a page which has been marked for immediate
  70. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  71. * inactive list. The page still has PageWriteback set, which will pin it.
  72. *
  73. * We don't expect many pages to come through here, so don't bother batching
  74. * things up.
  75. *
  76. * To avoid placing the page at the tail of the LRU while PG_writeback is still
  77. * set, this function will clear PG_writeback before performing the page
  78. * motion. Do that inside the lru lock because once PG_writeback is cleared
  79. * we may not touch the page.
  80. *
  81. * Returns zero if it cleared PG_writeback.
  82. */
  83. int rotate_reclaimable_page(struct page *page)
  84. {
  85. struct zone *zone;
  86. unsigned long flags;
  87. if (PageLocked(page))
  88. return 1;
  89. if (PageDirty(page))
  90. return 1;
  91. if (PageActive(page))
  92. return 1;
  93. if (!PageLRU(page))
  94. return 1;
  95. zone = page_zone(page);
  96. spin_lock_irqsave(&zone->lru_lock, flags);
  97. if (PageLRU(page) && !PageActive(page)) {
  98. list_move_tail(&page->lru, &zone->inactive_list);
  99. __count_vm_event(PGROTATED);
  100. }
  101. if (!test_clear_page_writeback(page))
  102. BUG();
  103. spin_unlock_irqrestore(&zone->lru_lock, flags);
  104. return 0;
  105. }
  106. /*
  107. * FIXME: speed this up?
  108. */
  109. void fastcall activate_page(struct page *page)
  110. {
  111. struct zone *zone = page_zone(page);
  112. spin_lock_irq(&zone->lru_lock);
  113. if (PageLRU(page) && !PageActive(page)) {
  114. del_page_from_inactive_list(zone, page);
  115. SetPageActive(page);
  116. add_page_to_active_list(zone, page);
  117. __count_vm_event(PGACTIVATE);
  118. }
  119. spin_unlock_irq(&zone->lru_lock);
  120. }
  121. /*
  122. * Mark a page as having seen activity.
  123. *
  124. * inactive,unreferenced -> inactive,referenced
  125. * inactive,referenced -> active,unreferenced
  126. * active,unreferenced -> active,referenced
  127. */
  128. void fastcall mark_page_accessed(struct page *page)
  129. {
  130. if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
  131. activate_page(page);
  132. ClearPageReferenced(page);
  133. } else if (!PageReferenced(page)) {
  134. SetPageReferenced(page);
  135. }
  136. }
  137. EXPORT_SYMBOL(mark_page_accessed);
  138. /**
  139. * lru_cache_add: add a page to the page lists
  140. * @page: the page to add
  141. */
  142. static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
  143. static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
  144. void fastcall lru_cache_add(struct page *page)
  145. {
  146. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
  147. page_cache_get(page);
  148. if (!pagevec_add(pvec, page))
  149. __pagevec_lru_add(pvec);
  150. put_cpu_var(lru_add_pvecs);
  151. }
  152. void fastcall lru_cache_add_active(struct page *page)
  153. {
  154. struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
  155. page_cache_get(page);
  156. if (!pagevec_add(pvec, page))
  157. __pagevec_lru_add_active(pvec);
  158. put_cpu_var(lru_add_active_pvecs);
  159. }
  160. static void __lru_add_drain(int cpu)
  161. {
  162. struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
  163. /* CPU is dead, so no locking needed. */
  164. if (pagevec_count(pvec))
  165. __pagevec_lru_add(pvec);
  166. pvec = &per_cpu(lru_add_active_pvecs, cpu);
  167. if (pagevec_count(pvec))
  168. __pagevec_lru_add_active(pvec);
  169. }
  170. void lru_add_drain(void)
  171. {
  172. __lru_add_drain(get_cpu());
  173. put_cpu();
  174. }
  175. #ifdef CONFIG_NUMA
  176. static void lru_add_drain_per_cpu(void *dummy)
  177. {
  178. lru_add_drain();
  179. }
  180. /*
  181. * Returns 0 for success
  182. */
  183. int lru_add_drain_all(void)
  184. {
  185. return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
  186. }
  187. #else
  188. /*
  189. * Returns 0 for success
  190. */
  191. int lru_add_drain_all(void)
  192. {
  193. lru_add_drain();
  194. return 0;
  195. }
  196. #endif
  197. /*
  198. * This path almost never happens for VM activity - pages are normally
  199. * freed via pagevecs. But it gets used by networking.
  200. */
  201. void fastcall __page_cache_release(struct page *page)
  202. {
  203. if (PageLRU(page)) {
  204. unsigned long flags;
  205. struct zone *zone = page_zone(page);
  206. spin_lock_irqsave(&zone->lru_lock, flags);
  207. BUG_ON(!PageLRU(page));
  208. __ClearPageLRU(page);
  209. del_page_from_lru(zone, page);
  210. spin_unlock_irqrestore(&zone->lru_lock, flags);
  211. }
  212. free_hot_page(page);
  213. }
  214. EXPORT_SYMBOL(__page_cache_release);
  215. /*
  216. * Batched page_cache_release(). Decrement the reference count on all the
  217. * passed pages. If it fell to zero then remove the page from the LRU and
  218. * free it.
  219. *
  220. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  221. * for the remainder of the operation.
  222. *
  223. * The locking in this function is against shrink_cache(): we recheck the
  224. * page count inside the lock to see whether shrink_cache grabbed the page
  225. * via the LRU. If it did, give up: shrink_cache will free it.
  226. */
  227. void release_pages(struct page **pages, int nr, int cold)
  228. {
  229. int i;
  230. struct pagevec pages_to_free;
  231. struct zone *zone = NULL;
  232. pagevec_init(&pages_to_free, cold);
  233. for (i = 0; i < nr; i++) {
  234. struct page *page = pages[i];
  235. if (unlikely(PageCompound(page))) {
  236. if (zone) {
  237. spin_unlock_irq(&zone->lru_lock);
  238. zone = NULL;
  239. }
  240. put_compound_page(page);
  241. continue;
  242. }
  243. if (!put_page_testzero(page))
  244. continue;
  245. if (PageLRU(page)) {
  246. struct zone *pagezone = page_zone(page);
  247. if (pagezone != zone) {
  248. if (zone)
  249. spin_unlock_irq(&zone->lru_lock);
  250. zone = pagezone;
  251. spin_lock_irq(&zone->lru_lock);
  252. }
  253. BUG_ON(!PageLRU(page));
  254. __ClearPageLRU(page);
  255. del_page_from_lru(zone, page);
  256. }
  257. if (!pagevec_add(&pages_to_free, page)) {
  258. if (zone) {
  259. spin_unlock_irq(&zone->lru_lock);
  260. zone = NULL;
  261. }
  262. __pagevec_free(&pages_to_free);
  263. pagevec_reinit(&pages_to_free);
  264. }
  265. }
  266. if (zone)
  267. spin_unlock_irq(&zone->lru_lock);
  268. pagevec_free(&pages_to_free);
  269. }
  270. /*
  271. * The pages which we're about to release may be in the deferred lru-addition
  272. * queues. That would prevent them from really being freed right now. That's
  273. * OK from a correctness point of view but is inefficient - those pages may be
  274. * cache-warm and we want to give them back to the page allocator ASAP.
  275. *
  276. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  277. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  278. * mutual recursion.
  279. */
  280. void __pagevec_release(struct pagevec *pvec)
  281. {
  282. lru_add_drain();
  283. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  284. pagevec_reinit(pvec);
  285. }
  286. EXPORT_SYMBOL(__pagevec_release);
  287. /*
  288. * pagevec_release() for pages which are known to not be on the LRU
  289. *
  290. * This function reinitialises the caller's pagevec.
  291. */
  292. void __pagevec_release_nonlru(struct pagevec *pvec)
  293. {
  294. int i;
  295. struct pagevec pages_to_free;
  296. pagevec_init(&pages_to_free, pvec->cold);
  297. for (i = 0; i < pagevec_count(pvec); i++) {
  298. struct page *page = pvec->pages[i];
  299. BUG_ON(PageLRU(page));
  300. if (put_page_testzero(page))
  301. pagevec_add(&pages_to_free, page);
  302. }
  303. pagevec_free(&pages_to_free);
  304. pagevec_reinit(pvec);
  305. }
  306. /*
  307. * Add the passed pages to the LRU, then drop the caller's refcount
  308. * on them. Reinitialises the caller's pagevec.
  309. */
  310. void __pagevec_lru_add(struct pagevec *pvec)
  311. {
  312. int i;
  313. struct zone *zone = NULL;
  314. for (i = 0; i < pagevec_count(pvec); i++) {
  315. struct page *page = pvec->pages[i];
  316. struct zone *pagezone = page_zone(page);
  317. if (pagezone != zone) {
  318. if (zone)
  319. spin_unlock_irq(&zone->lru_lock);
  320. zone = pagezone;
  321. spin_lock_irq(&zone->lru_lock);
  322. }
  323. BUG_ON(PageLRU(page));
  324. SetPageLRU(page);
  325. add_page_to_inactive_list(zone, page);
  326. }
  327. if (zone)
  328. spin_unlock_irq(&zone->lru_lock);
  329. release_pages(pvec->pages, pvec->nr, pvec->cold);
  330. pagevec_reinit(pvec);
  331. }
  332. EXPORT_SYMBOL(__pagevec_lru_add);
  333. void __pagevec_lru_add_active(struct pagevec *pvec)
  334. {
  335. int i;
  336. struct zone *zone = NULL;
  337. for (i = 0; i < pagevec_count(pvec); i++) {
  338. struct page *page = pvec->pages[i];
  339. struct zone *pagezone = page_zone(page);
  340. if (pagezone != zone) {
  341. if (zone)
  342. spin_unlock_irq(&zone->lru_lock);
  343. zone = pagezone;
  344. spin_lock_irq(&zone->lru_lock);
  345. }
  346. BUG_ON(PageLRU(page));
  347. SetPageLRU(page);
  348. BUG_ON(PageActive(page));
  349. SetPageActive(page);
  350. add_page_to_active_list(zone, page);
  351. }
  352. if (zone)
  353. spin_unlock_irq(&zone->lru_lock);
  354. release_pages(pvec->pages, pvec->nr, pvec->cold);
  355. pagevec_reinit(pvec);
  356. }
  357. /*
  358. * Try to drop buffers from the pages in a pagevec
  359. */
  360. void pagevec_strip(struct pagevec *pvec)
  361. {
  362. int i;
  363. for (i = 0; i < pagevec_count(pvec); i++) {
  364. struct page *page = pvec->pages[i];
  365. if (PagePrivate(page) && !TestSetPageLocked(page)) {
  366. if (PagePrivate(page))
  367. try_to_release_page(page, 0);
  368. unlock_page(page);
  369. }
  370. }
  371. }
  372. /**
  373. * pagevec_lookup - gang pagecache lookup
  374. * @pvec: Where the resulting pages are placed
  375. * @mapping: The address_space to search
  376. * @start: The starting page index
  377. * @nr_pages: The maximum number of pages
  378. *
  379. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  380. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  381. * reference against the pages in @pvec.
  382. *
  383. * The search returns a group of mapping-contiguous pages with ascending
  384. * indexes. There may be holes in the indices due to not-present pages.
  385. *
  386. * pagevec_lookup() returns the number of pages which were found.
  387. */
  388. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  389. pgoff_t start, unsigned nr_pages)
  390. {
  391. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  392. return pagevec_count(pvec);
  393. }
  394. EXPORT_SYMBOL(pagevec_lookup);
  395. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  396. pgoff_t *index, int tag, unsigned nr_pages)
  397. {
  398. pvec->nr = find_get_pages_tag(mapping, index, tag,
  399. nr_pages, pvec->pages);
  400. return pagevec_count(pvec);
  401. }
  402. EXPORT_SYMBOL(pagevec_lookup_tag);
  403. #ifdef CONFIG_SMP
  404. /*
  405. * We tolerate a little inaccuracy to avoid ping-ponging the counter between
  406. * CPUs
  407. */
  408. #define ACCT_THRESHOLD max(16, NR_CPUS * 2)
  409. static DEFINE_PER_CPU(long, committed_space) = 0;
  410. void vm_acct_memory(long pages)
  411. {
  412. long *local;
  413. preempt_disable();
  414. local = &__get_cpu_var(committed_space);
  415. *local += pages;
  416. if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
  417. atomic_add(*local, &vm_committed_space);
  418. *local = 0;
  419. }
  420. preempt_enable();
  421. }
  422. #ifdef CONFIG_HOTPLUG_CPU
  423. /* Drop the CPU's cached committed space back into the central pool. */
  424. static int cpu_swap_callback(struct notifier_block *nfb,
  425. unsigned long action,
  426. void *hcpu)
  427. {
  428. long *committed;
  429. committed = &per_cpu(committed_space, (long)hcpu);
  430. if (action == CPU_DEAD) {
  431. atomic_add(*committed, &vm_committed_space);
  432. *committed = 0;
  433. __lru_add_drain((long)hcpu);
  434. }
  435. return NOTIFY_OK;
  436. }
  437. #endif /* CONFIG_HOTPLUG_CPU */
  438. #endif /* CONFIG_SMP */
  439. /*
  440. * Perform any setup for the swap system
  441. */
  442. void __init swap_setup(void)
  443. {
  444. unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
  445. /* Use a smaller cluster for small-memory machines */
  446. if (megs < 16)
  447. page_cluster = 2;
  448. else
  449. page_cluster = 3;
  450. /*
  451. * Right now other parts of the system means that we
  452. * _really_ don't want to cluster much more
  453. */
  454. hotcpu_notifier(cpu_swap_callback, 0);
  455. }