swap.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the opereation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/module.h>
  26. #include <linux/percpu_counter.h>
  27. #include <linux/percpu.h>
  28. #include <linux/cpu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/init.h>
  31. /* How many pages do we try to swap or page in/out together? */
  32. int page_cluster;
  33. static void put_compound_page(struct page *page)
  34. {
  35. page = (struct page *)page_private(page);
  36. if (put_page_testzero(page)) {
  37. void (*dtor)(struct page *page);
  38. dtor = (void (*)(struct page *))page[1].lru.next;
  39. (*dtor)(page);
  40. }
  41. }
  42. void put_page(struct page *page)
  43. {
  44. if (unlikely(PageCompound(page)))
  45. put_compound_page(page);
  46. else if (put_page_testzero(page))
  47. __page_cache_release(page);
  48. }
  49. EXPORT_SYMBOL(put_page);
  50. /*
  51. * Writeback is about to end against a page which has been marked for immediate
  52. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  53. * inactive list. The page still has PageWriteback set, which will pin it.
  54. *
  55. * We don't expect many pages to come through here, so don't bother batching
  56. * things up.
  57. *
  58. * To avoid placing the page at the tail of the LRU while PG_writeback is still
  59. * set, this function will clear PG_writeback before performing the page
  60. * motion. Do that inside the lru lock because once PG_writeback is cleared
  61. * we may not touch the page.
  62. *
  63. * Returns zero if it cleared PG_writeback.
  64. */
  65. int rotate_reclaimable_page(struct page *page)
  66. {
  67. struct zone *zone;
  68. unsigned long flags;
  69. if (PageLocked(page))
  70. return 1;
  71. if (PageDirty(page))
  72. return 1;
  73. if (PageActive(page))
  74. return 1;
  75. if (!PageLRU(page))
  76. return 1;
  77. zone = page_zone(page);
  78. spin_lock_irqsave(&zone->lru_lock, flags);
  79. if (PageLRU(page) && !PageActive(page)) {
  80. list_move_tail(&page->lru, &zone->inactive_list);
  81. __count_vm_event(PGROTATED);
  82. }
  83. if (!test_clear_page_writeback(page))
  84. BUG();
  85. spin_unlock_irqrestore(&zone->lru_lock, flags);
  86. return 0;
  87. }
  88. /*
  89. * FIXME: speed this up?
  90. */
  91. void fastcall activate_page(struct page *page)
  92. {
  93. struct zone *zone = page_zone(page);
  94. spin_lock_irq(&zone->lru_lock);
  95. if (PageLRU(page) && !PageActive(page)) {
  96. del_page_from_inactive_list(zone, page);
  97. SetPageActive(page);
  98. add_page_to_active_list(zone, page);
  99. __count_vm_event(PGACTIVATE);
  100. }
  101. spin_unlock_irq(&zone->lru_lock);
  102. }
  103. /*
  104. * Mark a page as having seen activity.
  105. *
  106. * inactive,unreferenced -> inactive,referenced
  107. * inactive,referenced -> active,unreferenced
  108. * active,unreferenced -> active,referenced
  109. */
  110. void fastcall mark_page_accessed(struct page *page)
  111. {
  112. if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
  113. activate_page(page);
  114. ClearPageReferenced(page);
  115. } else if (!PageReferenced(page)) {
  116. SetPageReferenced(page);
  117. }
  118. }
  119. EXPORT_SYMBOL(mark_page_accessed);
  120. /**
  121. * lru_cache_add: add a page to the page lists
  122. * @page: the page to add
  123. */
  124. static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
  125. static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
  126. void fastcall lru_cache_add(struct page *page)
  127. {
  128. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
  129. page_cache_get(page);
  130. if (!pagevec_add(pvec, page))
  131. __pagevec_lru_add(pvec);
  132. put_cpu_var(lru_add_pvecs);
  133. }
  134. void fastcall lru_cache_add_active(struct page *page)
  135. {
  136. struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
  137. page_cache_get(page);
  138. if (!pagevec_add(pvec, page))
  139. __pagevec_lru_add_active(pvec);
  140. put_cpu_var(lru_add_active_pvecs);
  141. }
  142. static void __lru_add_drain(int cpu)
  143. {
  144. struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
  145. /* CPU is dead, so no locking needed. */
  146. if (pagevec_count(pvec))
  147. __pagevec_lru_add(pvec);
  148. pvec = &per_cpu(lru_add_active_pvecs, cpu);
  149. if (pagevec_count(pvec))
  150. __pagevec_lru_add_active(pvec);
  151. }
  152. void lru_add_drain(void)
  153. {
  154. __lru_add_drain(get_cpu());
  155. put_cpu();
  156. }
  157. #ifdef CONFIG_NUMA
  158. static void lru_add_drain_per_cpu(void *dummy)
  159. {
  160. lru_add_drain();
  161. }
  162. /*
  163. * Returns 0 for success
  164. */
  165. int lru_add_drain_all(void)
  166. {
  167. return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
  168. }
  169. #else
  170. /*
  171. * Returns 0 for success
  172. */
  173. int lru_add_drain_all(void)
  174. {
  175. lru_add_drain();
  176. return 0;
  177. }
  178. #endif
  179. /*
  180. * This path almost never happens for VM activity - pages are normally
  181. * freed via pagevecs. But it gets used by networking.
  182. */
  183. void fastcall __page_cache_release(struct page *page)
  184. {
  185. if (PageLRU(page)) {
  186. unsigned long flags;
  187. struct zone *zone = page_zone(page);
  188. spin_lock_irqsave(&zone->lru_lock, flags);
  189. BUG_ON(!PageLRU(page));
  190. __ClearPageLRU(page);
  191. del_page_from_lru(zone, page);
  192. spin_unlock_irqrestore(&zone->lru_lock, flags);
  193. }
  194. free_hot_page(page);
  195. }
  196. EXPORT_SYMBOL(__page_cache_release);
  197. /*
  198. * Batched page_cache_release(). Decrement the reference count on all the
  199. * passed pages. If it fell to zero then remove the page from the LRU and
  200. * free it.
  201. *
  202. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  203. * for the remainder of the operation.
  204. *
  205. * The locking in this function is against shrink_cache(): we recheck the
  206. * page count inside the lock to see whether shrink_cache grabbed the page
  207. * via the LRU. If it did, give up: shrink_cache will free it.
  208. */
  209. void release_pages(struct page **pages, int nr, int cold)
  210. {
  211. int i;
  212. struct pagevec pages_to_free;
  213. struct zone *zone = NULL;
  214. pagevec_init(&pages_to_free, cold);
  215. for (i = 0; i < nr; i++) {
  216. struct page *page = pages[i];
  217. if (unlikely(PageCompound(page))) {
  218. if (zone) {
  219. spin_unlock_irq(&zone->lru_lock);
  220. zone = NULL;
  221. }
  222. put_compound_page(page);
  223. continue;
  224. }
  225. if (!put_page_testzero(page))
  226. continue;
  227. if (PageLRU(page)) {
  228. struct zone *pagezone = page_zone(page);
  229. if (pagezone != zone) {
  230. if (zone)
  231. spin_unlock_irq(&zone->lru_lock);
  232. zone = pagezone;
  233. spin_lock_irq(&zone->lru_lock);
  234. }
  235. BUG_ON(!PageLRU(page));
  236. __ClearPageLRU(page);
  237. del_page_from_lru(zone, page);
  238. }
  239. if (!pagevec_add(&pages_to_free, page)) {
  240. if (zone) {
  241. spin_unlock_irq(&zone->lru_lock);
  242. zone = NULL;
  243. }
  244. __pagevec_free(&pages_to_free);
  245. pagevec_reinit(&pages_to_free);
  246. }
  247. }
  248. if (zone)
  249. spin_unlock_irq(&zone->lru_lock);
  250. pagevec_free(&pages_to_free);
  251. }
  252. /*
  253. * The pages which we're about to release may be in the deferred lru-addition
  254. * queues. That would prevent them from really being freed right now. That's
  255. * OK from a correctness point of view but is inefficient - those pages may be
  256. * cache-warm and we want to give them back to the page allocator ASAP.
  257. *
  258. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  259. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  260. * mutual recursion.
  261. */
  262. void __pagevec_release(struct pagevec *pvec)
  263. {
  264. lru_add_drain();
  265. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  266. pagevec_reinit(pvec);
  267. }
  268. EXPORT_SYMBOL(__pagevec_release);
  269. /*
  270. * pagevec_release() for pages which are known to not be on the LRU
  271. *
  272. * This function reinitialises the caller's pagevec.
  273. */
  274. void __pagevec_release_nonlru(struct pagevec *pvec)
  275. {
  276. int i;
  277. struct pagevec pages_to_free;
  278. pagevec_init(&pages_to_free, pvec->cold);
  279. for (i = 0; i < pagevec_count(pvec); i++) {
  280. struct page *page = pvec->pages[i];
  281. BUG_ON(PageLRU(page));
  282. if (put_page_testzero(page))
  283. pagevec_add(&pages_to_free, page);
  284. }
  285. pagevec_free(&pages_to_free);
  286. pagevec_reinit(pvec);
  287. }
  288. /*
  289. * Add the passed pages to the LRU, then drop the caller's refcount
  290. * on them. Reinitialises the caller's pagevec.
  291. */
  292. void __pagevec_lru_add(struct pagevec *pvec)
  293. {
  294. int i;
  295. struct zone *zone = NULL;
  296. for (i = 0; i < pagevec_count(pvec); i++) {
  297. struct page *page = pvec->pages[i];
  298. struct zone *pagezone = page_zone(page);
  299. if (pagezone != zone) {
  300. if (zone)
  301. spin_unlock_irq(&zone->lru_lock);
  302. zone = pagezone;
  303. spin_lock_irq(&zone->lru_lock);
  304. }
  305. BUG_ON(PageLRU(page));
  306. SetPageLRU(page);
  307. add_page_to_inactive_list(zone, page);
  308. }
  309. if (zone)
  310. spin_unlock_irq(&zone->lru_lock);
  311. release_pages(pvec->pages, pvec->nr, pvec->cold);
  312. pagevec_reinit(pvec);
  313. }
  314. EXPORT_SYMBOL(__pagevec_lru_add);
  315. void __pagevec_lru_add_active(struct pagevec *pvec)
  316. {
  317. int i;
  318. struct zone *zone = NULL;
  319. for (i = 0; i < pagevec_count(pvec); i++) {
  320. struct page *page = pvec->pages[i];
  321. struct zone *pagezone = page_zone(page);
  322. if (pagezone != zone) {
  323. if (zone)
  324. spin_unlock_irq(&zone->lru_lock);
  325. zone = pagezone;
  326. spin_lock_irq(&zone->lru_lock);
  327. }
  328. BUG_ON(PageLRU(page));
  329. SetPageLRU(page);
  330. BUG_ON(PageActive(page));
  331. SetPageActive(page);
  332. add_page_to_active_list(zone, page);
  333. }
  334. if (zone)
  335. spin_unlock_irq(&zone->lru_lock);
  336. release_pages(pvec->pages, pvec->nr, pvec->cold);
  337. pagevec_reinit(pvec);
  338. }
  339. /*
  340. * Try to drop buffers from the pages in a pagevec
  341. */
  342. void pagevec_strip(struct pagevec *pvec)
  343. {
  344. int i;
  345. for (i = 0; i < pagevec_count(pvec); i++) {
  346. struct page *page = pvec->pages[i];
  347. if (PagePrivate(page) && !TestSetPageLocked(page)) {
  348. if (PagePrivate(page))
  349. try_to_release_page(page, 0);
  350. unlock_page(page);
  351. }
  352. }
  353. }
  354. /**
  355. * pagevec_lookup - gang pagecache lookup
  356. * @pvec: Where the resulting pages are placed
  357. * @mapping: The address_space to search
  358. * @start: The starting page index
  359. * @nr_pages: The maximum number of pages
  360. *
  361. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  362. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  363. * reference against the pages in @pvec.
  364. *
  365. * The search returns a group of mapping-contiguous pages with ascending
  366. * indexes. There may be holes in the indices due to not-present pages.
  367. *
  368. * pagevec_lookup() returns the number of pages which were found.
  369. */
  370. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  371. pgoff_t start, unsigned nr_pages)
  372. {
  373. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  374. return pagevec_count(pvec);
  375. }
  376. EXPORT_SYMBOL(pagevec_lookup);
  377. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  378. pgoff_t *index, int tag, unsigned nr_pages)
  379. {
  380. pvec->nr = find_get_pages_tag(mapping, index, tag,
  381. nr_pages, pvec->pages);
  382. return pagevec_count(pvec);
  383. }
  384. EXPORT_SYMBOL(pagevec_lookup_tag);
  385. #ifdef CONFIG_SMP
  386. /*
  387. * We tolerate a little inaccuracy to avoid ping-ponging the counter between
  388. * CPUs
  389. */
  390. #define ACCT_THRESHOLD max(16, NR_CPUS * 2)
  391. static DEFINE_PER_CPU(long, committed_space) = 0;
  392. void vm_acct_memory(long pages)
  393. {
  394. long *local;
  395. preempt_disable();
  396. local = &__get_cpu_var(committed_space);
  397. *local += pages;
  398. if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
  399. atomic_add(*local, &vm_committed_space);
  400. *local = 0;
  401. }
  402. preempt_enable();
  403. }
  404. #ifdef CONFIG_HOTPLUG_CPU
  405. /* Drop the CPU's cached committed space back into the central pool. */
  406. static int cpu_swap_callback(struct notifier_block *nfb,
  407. unsigned long action,
  408. void *hcpu)
  409. {
  410. long *committed;
  411. committed = &per_cpu(committed_space, (long)hcpu);
  412. if (action == CPU_DEAD) {
  413. atomic_add(*committed, &vm_committed_space);
  414. *committed = 0;
  415. __lru_add_drain((long)hcpu);
  416. }
  417. return NOTIFY_OK;
  418. }
  419. #endif /* CONFIG_HOTPLUG_CPU */
  420. #endif /* CONFIG_SMP */
  421. /*
  422. * Perform any setup for the swap system
  423. */
  424. void __init swap_setup(void)
  425. {
  426. unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
  427. /* Use a smaller cluster for small-memory machines */
  428. if (megs < 16)
  429. page_cluster = 2;
  430. else
  431. page_cluster = 3;
  432. /*
  433. * Right now other parts of the system means that we
  434. * _really_ don't want to cluster much more
  435. */
  436. hotcpu_notifier(cpu_swap_callback, 0);
  437. }