swap_state.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. /*
  2. * linux/mm/swap_state.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. * Swap reorganised 29.12.95, Stephen Tweedie
  6. *
  7. * Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8. */
  9. #include <linux/module.h>
  10. #include <linux/mm.h>
  11. #include <linux/gfp.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/swap.h>
  14. #include <linux/swapops.h>
  15. #include <linux/init.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/buffer_head.h>
  18. #include <linux/backing-dev.h>
  19. #include <linux/pagevec.h>
  20. #include <linux/migrate.h>
  21. #include <linux/page_cgroup.h>
  22. #include <asm/pgtable.h>
  23. /*
  24. * swapper_space is a fiction, retained to simplify the path through
  25. * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
  26. * future use of radix_tree tags in the swap cache.
  27. */
  28. static const struct address_space_operations swap_aops = {
  29. .writepage = swap_writepage,
  30. .sync_page = block_sync_page,
  31. .set_page_dirty = __set_page_dirty_nobuffers,
  32. .migratepage = migrate_page,
  33. };
  34. static struct backing_dev_info swap_backing_dev_info = {
  35. .name = "swap",
  36. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
  37. .unplug_io_fn = swap_unplug_io_fn,
  38. };
  39. struct address_space swapper_space = {
  40. .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  41. .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
  42. .a_ops = &swap_aops,
  43. .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
  44. .backing_dev_info = &swap_backing_dev_info,
  45. };
  46. #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
  47. static struct {
  48. unsigned long add_total;
  49. unsigned long del_total;
  50. unsigned long find_success;
  51. unsigned long find_total;
  52. } swap_cache_info;
  53. void show_swap_cache_info(void)
  54. {
  55. printk("%lu pages in swap cache\n", total_swapcache_pages);
  56. printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
  57. swap_cache_info.add_total, swap_cache_info.del_total,
  58. swap_cache_info.find_success, swap_cache_info.find_total);
  59. printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
  60. printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  61. }
  62. /*
  63. * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  64. * but sets SwapCache flag and private instead of mapping and index.
  65. */
  66. static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
  67. {
  68. int error;
  69. VM_BUG_ON(!PageLocked(page));
  70. VM_BUG_ON(PageSwapCache(page));
  71. VM_BUG_ON(!PageSwapBacked(page));
  72. page_cache_get(page);
  73. SetPageSwapCache(page);
  74. set_page_private(page, entry.val);
  75. spin_lock_irq(&swapper_space.tree_lock);
  76. error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
  77. if (likely(!error)) {
  78. total_swapcache_pages++;
  79. __inc_zone_page_state(page, NR_FILE_PAGES);
  80. INC_CACHE_INFO(add_total);
  81. }
  82. spin_unlock_irq(&swapper_space.tree_lock);
  83. if (unlikely(error)) {
  84. /*
  85. * Only the context which have set SWAP_HAS_CACHE flag
  86. * would call add_to_swap_cache().
  87. * So add_to_swap_cache() doesn't returns -EEXIST.
  88. */
  89. VM_BUG_ON(error == -EEXIST);
  90. set_page_private(page, 0UL);
  91. ClearPageSwapCache(page);
  92. page_cache_release(page);
  93. }
  94. return error;
  95. }
  96. int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
  97. {
  98. int error;
  99. error = radix_tree_preload(gfp_mask);
  100. if (!error) {
  101. error = __add_to_swap_cache(page, entry);
  102. radix_tree_preload_end();
  103. }
  104. return error;
  105. }
  106. /*
  107. * This must be called only on pages that have
  108. * been verified to be in the swap cache.
  109. */
  110. void __delete_from_swap_cache(struct page *page)
  111. {
  112. VM_BUG_ON(!PageLocked(page));
  113. VM_BUG_ON(!PageSwapCache(page));
  114. VM_BUG_ON(PageWriteback(page));
  115. radix_tree_delete(&swapper_space.page_tree, page_private(page));
  116. set_page_private(page, 0);
  117. ClearPageSwapCache(page);
  118. total_swapcache_pages--;
  119. __dec_zone_page_state(page, NR_FILE_PAGES);
  120. INC_CACHE_INFO(del_total);
  121. }
  122. /**
  123. * add_to_swap - allocate swap space for a page
  124. * @page: page we want to move to swap
  125. *
  126. * Allocate swap space for the page and add the page to the
  127. * swap cache. Caller needs to hold the page lock.
  128. */
  129. int add_to_swap(struct page *page)
  130. {
  131. swp_entry_t entry;
  132. int err;
  133. VM_BUG_ON(!PageLocked(page));
  134. VM_BUG_ON(!PageUptodate(page));
  135. entry = get_swap_page();
  136. if (!entry.val)
  137. return 0;
  138. if (unlikely(PageTransHuge(page)))
  139. if (unlikely(split_huge_page(page))) {
  140. swapcache_free(entry, NULL);
  141. return 0;
  142. }
  143. /*
  144. * Radix-tree node allocations from PF_MEMALLOC contexts could
  145. * completely exhaust the page allocator. __GFP_NOMEMALLOC
  146. * stops emergency reserves from being allocated.
  147. *
  148. * TODO: this could cause a theoretical memory reclaim
  149. * deadlock in the swap out path.
  150. */
  151. /*
  152. * Add it to the swap cache and mark it dirty
  153. */
  154. err = add_to_swap_cache(page, entry,
  155. __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
  156. if (!err) { /* Success */
  157. SetPageDirty(page);
  158. return 1;
  159. } else { /* -ENOMEM radix-tree allocation failure */
  160. /*
  161. * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  162. * clear SWAP_HAS_CACHE flag.
  163. */
  164. swapcache_free(entry, NULL);
  165. return 0;
  166. }
  167. }
  168. /*
  169. * This must be called only on pages that have
  170. * been verified to be in the swap cache and locked.
  171. * It will never put the page into the free list,
  172. * the caller has a reference on the page.
  173. */
  174. void delete_from_swap_cache(struct page *page)
  175. {
  176. swp_entry_t entry;
  177. entry.val = page_private(page);
  178. spin_lock_irq(&swapper_space.tree_lock);
  179. __delete_from_swap_cache(page);
  180. spin_unlock_irq(&swapper_space.tree_lock);
  181. swapcache_free(entry, page);
  182. page_cache_release(page);
  183. }
  184. /*
  185. * If we are the only user, then try to free up the swap cache.
  186. *
  187. * Its ok to check for PageSwapCache without the page lock
  188. * here because we are going to recheck again inside
  189. * try_to_free_swap() _with_ the lock.
  190. * - Marcelo
  191. */
  192. static inline void free_swap_cache(struct page *page)
  193. {
  194. if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
  195. try_to_free_swap(page);
  196. unlock_page(page);
  197. }
  198. }
  199. /*
  200. * Perform a free_page(), also freeing any swap cache associated with
  201. * this page if it is the last user of the page.
  202. */
  203. void free_page_and_swap_cache(struct page *page)
  204. {
  205. free_swap_cache(page);
  206. page_cache_release(page);
  207. }
  208. /*
  209. * Passed an array of pages, drop them all from swapcache and then release
  210. * them. They are removed from the LRU and freed if this is their last use.
  211. */
  212. void free_pages_and_swap_cache(struct page **pages, int nr)
  213. {
  214. struct page **pagep = pages;
  215. lru_add_drain();
  216. while (nr) {
  217. int todo = min(nr, PAGEVEC_SIZE);
  218. int i;
  219. for (i = 0; i < todo; i++)
  220. free_swap_cache(pagep[i]);
  221. release_pages(pagep, todo, 0);
  222. pagep += todo;
  223. nr -= todo;
  224. }
  225. }
  226. /*
  227. * Lookup a swap entry in the swap cache. A found page will be returned
  228. * unlocked and with its refcount incremented - we rely on the kernel
  229. * lock getting page table operations atomic even if we drop the page
  230. * lock before returning.
  231. */
  232. struct page * lookup_swap_cache(swp_entry_t entry)
  233. {
  234. struct page *page;
  235. page = find_get_page(&swapper_space, entry.val);
  236. if (page)
  237. INC_CACHE_INFO(find_success);
  238. INC_CACHE_INFO(find_total);
  239. return page;
  240. }
  241. /*
  242. * Locate a page of swap in physical memory, reserving swap cache space
  243. * and reading the disk if it is not already cached.
  244. * A failure return means that either the page allocation failed or that
  245. * the swap entry is no longer in use.
  246. */
  247. struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
  248. struct vm_area_struct *vma, unsigned long addr)
  249. {
  250. struct page *found_page, *new_page = NULL;
  251. int err;
  252. do {
  253. /*
  254. * First check the swap cache. Since this is normally
  255. * called after lookup_swap_cache() failed, re-calling
  256. * that would confuse statistics.
  257. */
  258. found_page = find_get_page(&swapper_space, entry.val);
  259. if (found_page)
  260. break;
  261. /*
  262. * Get a new page to read into from swap.
  263. */
  264. if (!new_page) {
  265. new_page = alloc_page_vma(gfp_mask, vma, addr);
  266. if (!new_page)
  267. break; /* Out of memory */
  268. }
  269. /*
  270. * call radix_tree_preload() while we can wait.
  271. */
  272. err = radix_tree_preload(gfp_mask & GFP_KERNEL);
  273. if (err)
  274. break;
  275. /*
  276. * Swap entry may have been freed since our caller observed it.
  277. */
  278. err = swapcache_prepare(entry);
  279. if (err == -EEXIST) { /* seems racy */
  280. radix_tree_preload_end();
  281. continue;
  282. }
  283. if (err) { /* swp entry is obsolete ? */
  284. radix_tree_preload_end();
  285. break;
  286. }
  287. /* May fail (-ENOMEM) if radix-tree node allocation failed. */
  288. __set_page_locked(new_page);
  289. SetPageSwapBacked(new_page);
  290. err = __add_to_swap_cache(new_page, entry);
  291. if (likely(!err)) {
  292. radix_tree_preload_end();
  293. /*
  294. * Initiate read into locked page and return.
  295. */
  296. lru_cache_add_anon(new_page);
  297. swap_readpage(new_page);
  298. return new_page;
  299. }
  300. radix_tree_preload_end();
  301. ClearPageSwapBacked(new_page);
  302. __clear_page_locked(new_page);
  303. /*
  304. * add_to_swap_cache() doesn't return -EEXIST, so we can safely
  305. * clear SWAP_HAS_CACHE flag.
  306. */
  307. swapcache_free(entry, NULL);
  308. } while (err != -ENOMEM);
  309. if (new_page)
  310. page_cache_release(new_page);
  311. return found_page;
  312. }
  313. /**
  314. * swapin_readahead - swap in pages in hope we need them soon
  315. * @entry: swap entry of this memory
  316. * @gfp_mask: memory allocation flags
  317. * @vma: user vma this address belongs to
  318. * @addr: target address for mempolicy
  319. *
  320. * Returns the struct page for entry and addr, after queueing swapin.
  321. *
  322. * Primitive swap readahead code. We simply read an aligned block of
  323. * (1 << page_cluster) entries in the swap area. This method is chosen
  324. * because it doesn't cost us any seek time. We also make sure to queue
  325. * the 'original' request together with the readahead ones...
  326. *
  327. * This has been extended to use the NUMA policies from the mm triggering
  328. * the readahead.
  329. *
  330. * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
  331. */
  332. struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
  333. struct vm_area_struct *vma, unsigned long addr)
  334. {
  335. int nr_pages;
  336. struct page *page;
  337. unsigned long offset;
  338. unsigned long end_offset;
  339. /*
  340. * Get starting offset for readaround, and number of pages to read.
  341. * Adjust starting address by readbehind (for NUMA interleave case)?
  342. * No, it's very unlikely that swap layout would follow vma layout,
  343. * more likely that neighbouring swap pages came from the same node:
  344. * so use the same "addr" to choose the same node for each swap read.
  345. */
  346. nr_pages = valid_swaphandles(entry, &offset);
  347. for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
  348. /* Ok, do the async read-ahead now */
  349. page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
  350. gfp_mask, vma, addr);
  351. if (!page)
  352. break;
  353. page_cache_release(page);
  354. }
  355. lru_add_drain(); /* Push any new pages onto the LRU now */
  356. return read_swap_cache_async(entry, gfp_mask, vma, addr);
  357. }