swap_state.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * linux/mm/swap_state.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. * Swap reorganised 29.12.95, Stephen Tweedie
  6. *
  7. * Rewritten to use page cache, (C) 1998 Stephen Tweedie
  8. */
  9. #include <linux/module.h>
  10. #include <linux/mm.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/swap.h>
  13. #include <linux/init.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/backing-dev.h>
  17. #include <asm/pgtable.h>
  18. /*
  19. * swapper_space is a fiction, retained to simplify the path through
  20. * vmscan's shrink_list, to make sync_page look nicer, and to allow
  21. * future use of radix_tree tags in the swap cache.
  22. */
  23. static struct address_space_operations swap_aops = {
  24. .writepage = swap_writepage,
  25. .sync_page = block_sync_page,
  26. .set_page_dirty = __set_page_dirty_nobuffers,
  27. };
  28. static struct backing_dev_info swap_backing_dev_info = {
  29. .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
  30. .unplug_io_fn = swap_unplug_io_fn,
  31. };
  32. struct address_space swapper_space = {
  33. .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  34. .tree_lock = RW_LOCK_UNLOCKED,
  35. .a_ops = &swap_aops,
  36. .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
  37. .backing_dev_info = &swap_backing_dev_info,
  38. };
  39. #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
  40. static struct {
  41. unsigned long add_total;
  42. unsigned long del_total;
  43. unsigned long find_success;
  44. unsigned long find_total;
  45. unsigned long noent_race;
  46. unsigned long exist_race;
  47. } swap_cache_info;
  48. void show_swap_cache_info(void)
  49. {
  50. printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
  51. swap_cache_info.add_total, swap_cache_info.del_total,
  52. swap_cache_info.find_success, swap_cache_info.find_total,
  53. swap_cache_info.noent_race, swap_cache_info.exist_race);
  54. printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
  55. printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  56. }
  57. /*
  58. * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
  59. * but sets SwapCache flag and private instead of mapping and index.
  60. */
  61. static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
  62. gfp_t gfp_mask)
  63. {
  64. int error;
  65. BUG_ON(PageSwapCache(page));
  66. BUG_ON(PagePrivate(page));
  67. error = radix_tree_preload(gfp_mask);
  68. if (!error) {
  69. write_lock_irq(&swapper_space.tree_lock);
  70. error = radix_tree_insert(&swapper_space.page_tree,
  71. entry.val, page);
  72. if (!error) {
  73. page_cache_get(page);
  74. SetPageLocked(page);
  75. SetPageSwapCache(page);
  76. set_page_private(page, entry.val);
  77. total_swapcache_pages++;
  78. pagecache_acct(1);
  79. }
  80. write_unlock_irq(&swapper_space.tree_lock);
  81. radix_tree_preload_end();
  82. }
  83. return error;
  84. }
  85. static int add_to_swap_cache(struct page *page, swp_entry_t entry)
  86. {
  87. int error;
  88. if (!swap_duplicate(entry)) {
  89. INC_CACHE_INFO(noent_race);
  90. return -ENOENT;
  91. }
  92. error = __add_to_swap_cache(page, entry, GFP_KERNEL);
  93. /*
  94. * Anon pages are already on the LRU, we don't run lru_cache_add here.
  95. */
  96. if (error) {
  97. swap_free(entry);
  98. if (error == -EEXIST)
  99. INC_CACHE_INFO(exist_race);
  100. return error;
  101. }
  102. INC_CACHE_INFO(add_total);
  103. return 0;
  104. }
  105. /*
  106. * This must be called only on pages that have
  107. * been verified to be in the swap cache.
  108. */
  109. void __delete_from_swap_cache(struct page *page)
  110. {
  111. BUG_ON(!PageLocked(page));
  112. BUG_ON(!PageSwapCache(page));
  113. BUG_ON(PageWriteback(page));
  114. BUG_ON(PagePrivate(page));
  115. radix_tree_delete(&swapper_space.page_tree, page_private(page));
  116. set_page_private(page, 0);
  117. ClearPageSwapCache(page);
  118. total_swapcache_pages--;
  119. pagecache_acct(-1);
  120. INC_CACHE_INFO(del_total);
  121. }
  122. /**
  123. * add_to_swap - allocate swap space for a page
  124. * @page: page we want to move to swap
  125. *
  126. * Allocate swap space for the page and add the page to the
  127. * swap cache. Caller needs to hold the page lock.
  128. */
  129. int add_to_swap(struct page * page)
  130. {
  131. swp_entry_t entry;
  132. int err;
  133. if (!PageLocked(page))
  134. BUG();
  135. for (;;) {
  136. entry = get_swap_page();
  137. if (!entry.val)
  138. return 0;
  139. /*
  140. * Radix-tree node allocations from PF_MEMALLOC contexts could
  141. * completely exhaust the page allocator. __GFP_NOMEMALLOC
  142. * stops emergency reserves from being allocated.
  143. *
  144. * TODO: this could cause a theoretical memory reclaim
  145. * deadlock in the swap out path.
  146. */
  147. /*
  148. * Add it to the swap cache and mark it dirty
  149. */
  150. err = __add_to_swap_cache(page, entry,
  151. GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
  152. switch (err) {
  153. case 0: /* Success */
  154. SetPageUptodate(page);
  155. SetPageDirty(page);
  156. INC_CACHE_INFO(add_total);
  157. return 1;
  158. case -EEXIST:
  159. /* Raced with "speculative" read_swap_cache_async */
  160. INC_CACHE_INFO(exist_race);
  161. swap_free(entry);
  162. continue;
  163. default:
  164. /* -ENOMEM radix-tree allocation failure */
  165. swap_free(entry);
  166. return 0;
  167. }
  168. }
  169. }
  170. /*
  171. * This must be called only on pages that have
  172. * been verified to be in the swap cache and locked.
  173. * It will never put the page into the free list,
  174. * the caller has a reference on the page.
  175. */
  176. void delete_from_swap_cache(struct page *page)
  177. {
  178. swp_entry_t entry;
  179. entry.val = page_private(page);
  180. write_lock_irq(&swapper_space.tree_lock);
  181. __delete_from_swap_cache(page);
  182. write_unlock_irq(&swapper_space.tree_lock);
  183. swap_free(entry);
  184. page_cache_release(page);
  185. }
  186. /*
  187. * Strange swizzling function only for use by shmem_writepage
  188. */
  189. int move_to_swap_cache(struct page *page, swp_entry_t entry)
  190. {
  191. int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
  192. if (!err) {
  193. remove_from_page_cache(page);
  194. page_cache_release(page); /* pagecache ref */
  195. if (!swap_duplicate(entry))
  196. BUG();
  197. SetPageDirty(page);
  198. INC_CACHE_INFO(add_total);
  199. } else if (err == -EEXIST)
  200. INC_CACHE_INFO(exist_race);
  201. return err;
  202. }
  203. /*
  204. * Strange swizzling function for shmem_getpage (and shmem_unuse)
  205. */
  206. int move_from_swap_cache(struct page *page, unsigned long index,
  207. struct address_space *mapping)
  208. {
  209. int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
  210. if (!err) {
  211. delete_from_swap_cache(page);
  212. /* shift page from clean_pages to dirty_pages list */
  213. ClearPageDirty(page);
  214. set_page_dirty(page);
  215. }
  216. return err;
  217. }
  218. /*
  219. * If we are the only user, then try to free up the swap cache.
  220. *
  221. * Its ok to check for PageSwapCache without the page lock
  222. * here because we are going to recheck again inside
  223. * exclusive_swap_page() _with_ the lock.
  224. * - Marcelo
  225. */
  226. static inline void free_swap_cache(struct page *page)
  227. {
  228. if (PageSwapCache(page) && !TestSetPageLocked(page)) {
  229. remove_exclusive_swap_page(page);
  230. unlock_page(page);
  231. }
  232. }
  233. /*
  234. * Perform a free_page(), also freeing any swap cache associated with
  235. * this page if it is the last user of the page.
  236. */
  237. void free_page_and_swap_cache(struct page *page)
  238. {
  239. free_swap_cache(page);
  240. page_cache_release(page);
  241. }
  242. /*
  243. * Passed an array of pages, drop them all from swapcache and then release
  244. * them. They are removed from the LRU and freed if this is their last use.
  245. */
  246. void free_pages_and_swap_cache(struct page **pages, int nr)
  247. {
  248. int chunk = 16;
  249. struct page **pagep = pages;
  250. lru_add_drain();
  251. while (nr) {
  252. int todo = min(chunk, nr);
  253. int i;
  254. for (i = 0; i < todo; i++)
  255. free_swap_cache(pagep[i]);
  256. release_pages(pagep, todo, 0);
  257. pagep += todo;
  258. nr -= todo;
  259. }
  260. }
  261. /*
  262. * Lookup a swap entry in the swap cache. A found page will be returned
  263. * unlocked and with its refcount incremented - we rely on the kernel
  264. * lock getting page table operations atomic even if we drop the page
  265. * lock before returning.
  266. */
  267. struct page * lookup_swap_cache(swp_entry_t entry)
  268. {
  269. struct page *page;
  270. page = find_get_page(&swapper_space, entry.val);
  271. if (page)
  272. INC_CACHE_INFO(find_success);
  273. INC_CACHE_INFO(find_total);
  274. return page;
  275. }
  276. /*
  277. * Locate a page of swap in physical memory, reserving swap cache space
  278. * and reading the disk if it is not already cached.
  279. * A failure return means that either the page allocation failed or that
  280. * the swap entry is no longer in use.
  281. */
  282. struct page *read_swap_cache_async(swp_entry_t entry,
  283. struct vm_area_struct *vma, unsigned long addr)
  284. {
  285. struct page *found_page, *new_page = NULL;
  286. int err;
  287. do {
  288. /*
  289. * First check the swap cache. Since this is normally
  290. * called after lookup_swap_cache() failed, re-calling
  291. * that would confuse statistics.
  292. */
  293. found_page = find_get_page(&swapper_space, entry.val);
  294. if (found_page)
  295. break;
  296. /*
  297. * Get a new page to read into from swap.
  298. */
  299. if (!new_page) {
  300. new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
  301. if (!new_page)
  302. break; /* Out of memory */
  303. }
  304. /*
  305. * Associate the page with swap entry in the swap cache.
  306. * May fail (-ENOENT) if swap entry has been freed since
  307. * our caller observed it. May fail (-EEXIST) if there
  308. * is already a page associated with this entry in the
  309. * swap cache: added by a racing read_swap_cache_async,
  310. * or by try_to_swap_out (or shmem_writepage) re-using
  311. * the just freed swap entry for an existing page.
  312. * May fail (-ENOMEM) if radix-tree node allocation failed.
  313. */
  314. err = add_to_swap_cache(new_page, entry);
  315. if (!err) {
  316. /*
  317. * Initiate read into locked page and return.
  318. */
  319. lru_cache_add_active(new_page);
  320. swap_readpage(NULL, new_page);
  321. return new_page;
  322. }
  323. } while (err != -ENOENT && err != -ENOMEM);
  324. if (new_page)
  325. page_cache_release(new_page);
  326. return found_page;
  327. }