swap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. /*
  2. * linux/mm/swap.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * This file contains the default values for the opereation of the
  8. * Linux VM subsystem. Fine-tuning documentation can be found in
  9. * Documentation/sysctl/vm.txt.
  10. * Started 18.12.91
  11. * Swap aging added 23.2.95, Stephen Tweedie.
  12. * Buffermem limits added 12.3.98, Rik van Riel.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/swap.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/pagevec.h>
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/buffer_head.h> /* for try_to_release_page() */
  25. #include <linux/module.h>
  26. #include <linux/percpu_counter.h>
  27. #include <linux/percpu.h>
  28. #include <linux/cpu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/init.h>
  31. /* How many pages do we try to swap or page in/out together? */
  32. int page_cluster;
  33. static void put_compound_page(struct page *page)
  34. {
  35. page = (struct page *)page_private(page);
  36. if (put_page_testzero(page)) {
  37. void (*dtor)(struct page *page);
  38. dtor = (void (*)(struct page *))page[1].lru.next;
  39. (*dtor)(page);
  40. }
  41. }
  42. void put_page(struct page *page)
  43. {
  44. if (unlikely(PageCompound(page)))
  45. put_compound_page(page);
  46. else if (put_page_testzero(page))
  47. __page_cache_release(page);
  48. }
  49. EXPORT_SYMBOL(put_page);
  50. /*
  51. * Writeback is about to end against a page which has been marked for immediate
  52. * reclaim. If it still appears to be reclaimable, move it to the tail of the
  53. * inactive list. The page still has PageWriteback set, which will pin it.
  54. *
  55. * We don't expect many pages to come through here, so don't bother batching
  56. * things up.
  57. *
  58. * To avoid placing the page at the tail of the LRU while PG_writeback is still
  59. * set, this function will clear PG_writeback before performing the page
  60. * motion. Do that inside the lru lock because once PG_writeback is cleared
  61. * we may not touch the page.
  62. *
  63. * Returns zero if it cleared PG_writeback.
  64. */
  65. int rotate_reclaimable_page(struct page *page)
  66. {
  67. struct zone *zone;
  68. unsigned long flags;
  69. if (PageLocked(page))
  70. return 1;
  71. if (PageDirty(page))
  72. return 1;
  73. if (PageActive(page))
  74. return 1;
  75. if (!PageLRU(page))
  76. return 1;
  77. zone = page_zone(page);
  78. spin_lock_irqsave(&zone->lru_lock, flags);
  79. if (PageLRU(page) && !PageActive(page)) {
  80. list_del(&page->lru);
  81. list_add_tail(&page->lru, &zone->inactive_list);
  82. inc_page_state(pgrotated);
  83. }
  84. if (!test_clear_page_writeback(page))
  85. BUG();
  86. spin_unlock_irqrestore(&zone->lru_lock, flags);
  87. return 0;
  88. }
  89. /*
  90. * FIXME: speed this up?
  91. */
  92. void fastcall activate_page(struct page *page)
  93. {
  94. struct zone *zone = page_zone(page);
  95. spin_lock_irq(&zone->lru_lock);
  96. if (PageLRU(page) && !PageActive(page)) {
  97. del_page_from_inactive_list(zone, page);
  98. SetPageActive(page);
  99. add_page_to_active_list(zone, page);
  100. inc_page_state(pgactivate);
  101. }
  102. spin_unlock_irq(&zone->lru_lock);
  103. }
  104. /*
  105. * Mark a page as having seen activity.
  106. *
  107. * inactive,unreferenced -> inactive,referenced
  108. * inactive,referenced -> active,unreferenced
  109. * active,unreferenced -> active,referenced
  110. */
  111. void fastcall mark_page_accessed(struct page *page)
  112. {
  113. if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
  114. activate_page(page);
  115. ClearPageReferenced(page);
  116. } else if (!PageReferenced(page)) {
  117. SetPageReferenced(page);
  118. }
  119. }
  120. EXPORT_SYMBOL(mark_page_accessed);
  121. /**
  122. * lru_cache_add: add a page to the page lists
  123. * @page: the page to add
  124. */
  125. static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
  126. static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
  127. void fastcall lru_cache_add(struct page *page)
  128. {
  129. struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
  130. page_cache_get(page);
  131. if (!pagevec_add(pvec, page))
  132. __pagevec_lru_add(pvec);
  133. put_cpu_var(lru_add_pvecs);
  134. }
  135. void fastcall lru_cache_add_active(struct page *page)
  136. {
  137. struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
  138. page_cache_get(page);
  139. if (!pagevec_add(pvec, page))
  140. __pagevec_lru_add_active(pvec);
  141. put_cpu_var(lru_add_active_pvecs);
  142. }
  143. static void __lru_add_drain(int cpu)
  144. {
  145. struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
  146. /* CPU is dead, so no locking needed. */
  147. if (pagevec_count(pvec))
  148. __pagevec_lru_add(pvec);
  149. pvec = &per_cpu(lru_add_active_pvecs, cpu);
  150. if (pagevec_count(pvec))
  151. __pagevec_lru_add_active(pvec);
  152. }
  153. void lru_add_drain(void)
  154. {
  155. __lru_add_drain(get_cpu());
  156. put_cpu();
  157. }
  158. #ifdef CONFIG_NUMA
  159. static void lru_add_drain_per_cpu(void *dummy)
  160. {
  161. lru_add_drain();
  162. }
  163. /*
  164. * Returns 0 for success
  165. */
  166. int lru_add_drain_all(void)
  167. {
  168. return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
  169. }
  170. #else
  171. /*
  172. * Returns 0 for success
  173. */
  174. int lru_add_drain_all(void)
  175. {
  176. lru_add_drain();
  177. return 0;
  178. }
  179. #endif
  180. /*
  181. * This path almost never happens for VM activity - pages are normally
  182. * freed via pagevecs. But it gets used by networking.
  183. */
  184. void fastcall __page_cache_release(struct page *page)
  185. {
  186. unsigned long flags;
  187. struct zone *zone = page_zone(page);
  188. spin_lock_irqsave(&zone->lru_lock, flags);
  189. if (TestClearPageLRU(page))
  190. del_page_from_lru(zone, page);
  191. if (page_count(page) != 0)
  192. page = NULL;
  193. spin_unlock_irqrestore(&zone->lru_lock, flags);
  194. if (page)
  195. free_hot_page(page);
  196. }
  197. EXPORT_SYMBOL(__page_cache_release);
  198. /*
  199. * Batched page_cache_release(). Decrement the reference count on all the
  200. * passed pages. If it fell to zero then remove the page from the LRU and
  201. * free it.
  202. *
  203. * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
  204. * for the remainder of the operation.
  205. *
  206. * The locking in this function is against shrink_cache(): we recheck the
  207. * page count inside the lock to see whether shrink_cache grabbed the page
  208. * via the LRU. If it did, give up: shrink_cache will free it.
  209. */
  210. void release_pages(struct page **pages, int nr, int cold)
  211. {
  212. int i;
  213. struct pagevec pages_to_free;
  214. struct zone *zone = NULL;
  215. pagevec_init(&pages_to_free, cold);
  216. for (i = 0; i < nr; i++) {
  217. struct page *page = pages[i];
  218. struct zone *pagezone;
  219. if (unlikely(PageCompound(page))) {
  220. if (zone) {
  221. spin_unlock_irq(&zone->lru_lock);
  222. zone = NULL;
  223. }
  224. put_compound_page(page);
  225. continue;
  226. }
  227. if (!put_page_testzero(page))
  228. continue;
  229. pagezone = page_zone(page);
  230. if (pagezone != zone) {
  231. if (zone)
  232. spin_unlock_irq(&zone->lru_lock);
  233. zone = pagezone;
  234. spin_lock_irq(&zone->lru_lock);
  235. }
  236. if (TestClearPageLRU(page))
  237. del_page_from_lru(zone, page);
  238. if (page_count(page) == 0) {
  239. if (!pagevec_add(&pages_to_free, page)) {
  240. spin_unlock_irq(&zone->lru_lock);
  241. __pagevec_free(&pages_to_free);
  242. pagevec_reinit(&pages_to_free);
  243. zone = NULL; /* No lock is held */
  244. }
  245. }
  246. }
  247. if (zone)
  248. spin_unlock_irq(&zone->lru_lock);
  249. pagevec_free(&pages_to_free);
  250. }
  251. /*
  252. * The pages which we're about to release may be in the deferred lru-addition
  253. * queues. That would prevent them from really being freed right now. That's
  254. * OK from a correctness point of view but is inefficient - those pages may be
  255. * cache-warm and we want to give them back to the page allocator ASAP.
  256. *
  257. * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
  258. * and __pagevec_lru_add_active() call release_pages() directly to avoid
  259. * mutual recursion.
  260. */
  261. void __pagevec_release(struct pagevec *pvec)
  262. {
  263. lru_add_drain();
  264. release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
  265. pagevec_reinit(pvec);
  266. }
  267. EXPORT_SYMBOL(__pagevec_release);
  268. /*
  269. * pagevec_release() for pages which are known to not be on the LRU
  270. *
  271. * This function reinitialises the caller's pagevec.
  272. */
  273. void __pagevec_release_nonlru(struct pagevec *pvec)
  274. {
  275. int i;
  276. struct pagevec pages_to_free;
  277. pagevec_init(&pages_to_free, pvec->cold);
  278. for (i = 0; i < pagevec_count(pvec); i++) {
  279. struct page *page = pvec->pages[i];
  280. BUG_ON(PageLRU(page));
  281. if (put_page_testzero(page))
  282. pagevec_add(&pages_to_free, page);
  283. }
  284. pagevec_free(&pages_to_free);
  285. pagevec_reinit(pvec);
  286. }
  287. /*
  288. * Add the passed pages to the LRU, then drop the caller's refcount
  289. * on them. Reinitialises the caller's pagevec.
  290. */
  291. void __pagevec_lru_add(struct pagevec *pvec)
  292. {
  293. int i;
  294. struct zone *zone = NULL;
  295. for (i = 0; i < pagevec_count(pvec); i++) {
  296. struct page *page = pvec->pages[i];
  297. struct zone *pagezone = page_zone(page);
  298. if (pagezone != zone) {
  299. if (zone)
  300. spin_unlock_irq(&zone->lru_lock);
  301. zone = pagezone;
  302. spin_lock_irq(&zone->lru_lock);
  303. }
  304. if (TestSetPageLRU(page))
  305. BUG();
  306. add_page_to_inactive_list(zone, page);
  307. }
  308. if (zone)
  309. spin_unlock_irq(&zone->lru_lock);
  310. release_pages(pvec->pages, pvec->nr, pvec->cold);
  311. pagevec_reinit(pvec);
  312. }
  313. EXPORT_SYMBOL(__pagevec_lru_add);
  314. void __pagevec_lru_add_active(struct pagevec *pvec)
  315. {
  316. int i;
  317. struct zone *zone = NULL;
  318. for (i = 0; i < pagevec_count(pvec); i++) {
  319. struct page *page = pvec->pages[i];
  320. struct zone *pagezone = page_zone(page);
  321. if (pagezone != zone) {
  322. if (zone)
  323. spin_unlock_irq(&zone->lru_lock);
  324. zone = pagezone;
  325. spin_lock_irq(&zone->lru_lock);
  326. }
  327. if (TestSetPageLRU(page))
  328. BUG();
  329. if (TestSetPageActive(page))
  330. BUG();
  331. add_page_to_active_list(zone, page);
  332. }
  333. if (zone)
  334. spin_unlock_irq(&zone->lru_lock);
  335. release_pages(pvec->pages, pvec->nr, pvec->cold);
  336. pagevec_reinit(pvec);
  337. }
  338. /*
  339. * Try to drop buffers from the pages in a pagevec
  340. */
  341. void pagevec_strip(struct pagevec *pvec)
  342. {
  343. int i;
  344. for (i = 0; i < pagevec_count(pvec); i++) {
  345. struct page *page = pvec->pages[i];
  346. if (PagePrivate(page) && !TestSetPageLocked(page)) {
  347. if (PagePrivate(page))
  348. try_to_release_page(page, 0);
  349. unlock_page(page);
  350. }
  351. }
  352. }
  353. /**
  354. * pagevec_lookup - gang pagecache lookup
  355. * @pvec: Where the resulting pages are placed
  356. * @mapping: The address_space to search
  357. * @start: The starting page index
  358. * @nr_pages: The maximum number of pages
  359. *
  360. * pagevec_lookup() will search for and return a group of up to @nr_pages pages
  361. * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a
  362. * reference against the pages in @pvec.
  363. *
  364. * The search returns a group of mapping-contiguous pages with ascending
  365. * indexes. There may be holes in the indices due to not-present pages.
  366. *
  367. * pagevec_lookup() returns the number of pages which were found.
  368. */
  369. unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
  370. pgoff_t start, unsigned nr_pages)
  371. {
  372. pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
  373. return pagevec_count(pvec);
  374. }
  375. EXPORT_SYMBOL(pagevec_lookup);
  376. unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
  377. pgoff_t *index, int tag, unsigned nr_pages)
  378. {
  379. pvec->nr = find_get_pages_tag(mapping, index, tag,
  380. nr_pages, pvec->pages);
  381. return pagevec_count(pvec);
  382. }
  383. EXPORT_SYMBOL(pagevec_lookup_tag);
  384. #ifdef CONFIG_SMP
  385. /*
  386. * We tolerate a little inaccuracy to avoid ping-ponging the counter between
  387. * CPUs
  388. */
  389. #define ACCT_THRESHOLD max(16, NR_CPUS * 2)
  390. static DEFINE_PER_CPU(long, committed_space) = 0;
  391. void vm_acct_memory(long pages)
  392. {
  393. long *local;
  394. preempt_disable();
  395. local = &__get_cpu_var(committed_space);
  396. *local += pages;
  397. if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
  398. atomic_add(*local, &vm_committed_space);
  399. *local = 0;
  400. }
  401. preempt_enable();
  402. }
  403. #ifdef CONFIG_HOTPLUG_CPU
  404. /* Drop the CPU's cached committed space back into the central pool. */
  405. static int cpu_swap_callback(struct notifier_block *nfb,
  406. unsigned long action,
  407. void *hcpu)
  408. {
  409. long *committed;
  410. committed = &per_cpu(committed_space, (long)hcpu);
  411. if (action == CPU_DEAD) {
  412. atomic_add(*committed, &vm_committed_space);
  413. *committed = 0;
  414. __lru_add_drain((long)hcpu);
  415. }
  416. return NOTIFY_OK;
  417. }
  418. #endif /* CONFIG_HOTPLUG_CPU */
  419. #endif /* CONFIG_SMP */
  420. #ifdef CONFIG_SMP
  421. void percpu_counter_mod(struct percpu_counter *fbc, long amount)
  422. {
  423. long count;
  424. long *pcount;
  425. int cpu = get_cpu();
  426. pcount = per_cpu_ptr(fbc->counters, cpu);
  427. count = *pcount + amount;
  428. if (count >= FBC_BATCH || count <= -FBC_BATCH) {
  429. spin_lock(&fbc->lock);
  430. fbc->count += count;
  431. *pcount = 0;
  432. spin_unlock(&fbc->lock);
  433. } else {
  434. *pcount = count;
  435. }
  436. put_cpu();
  437. }
  438. EXPORT_SYMBOL(percpu_counter_mod);
  439. /*
  440. * Add up all the per-cpu counts, return the result. This is a more accurate
  441. * but much slower version of percpu_counter_read_positive()
  442. */
  443. long percpu_counter_sum(struct percpu_counter *fbc)
  444. {
  445. long ret;
  446. int cpu;
  447. spin_lock(&fbc->lock);
  448. ret = fbc->count;
  449. for_each_cpu(cpu) {
  450. long *pcount = per_cpu_ptr(fbc->counters, cpu);
  451. ret += *pcount;
  452. }
  453. spin_unlock(&fbc->lock);
  454. return ret < 0 ? 0 : ret;
  455. }
  456. EXPORT_SYMBOL(percpu_counter_sum);
  457. #endif
  458. /*
  459. * Perform any setup for the swap system
  460. */
  461. void __init swap_setup(void)
  462. {
  463. unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
  464. /* Use a smaller cluster for small-memory machines */
  465. if (megs < 16)
  466. page_cluster = 2;
  467. else
  468. page_cluster = 3;
  469. /*
  470. * Right now other parts of the system means that we
  471. * _really_ don't want to cluster much more
  472. */
  473. hotcpu_notifier(cpu_swap_callback, 0);
  474. }