frontswap.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * Frontswap frontend
  3. *
  4. * This code provides the generic "frontend" layer to call a matching
  5. * "backend" driver implementation of frontswap. See
  6. * Documentation/vm/frontswap.txt for more information.
  7. *
  8. * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
  9. * Author: Dan Magenheimer
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/mman.h>
  15. #include <linux/swap.h>
  16. #include <linux/swapops.h>
  17. #include <linux/proc_fs.h>
  18. #include <linux/security.h>
  19. #include <linux/capability.h>
  20. #include <linux/module.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/frontswap.h>
  24. #include <linux/swapfile.h>
  25. /*
  26. * frontswap_ops is set by frontswap_register_ops to contain the pointers
  27. * to the frontswap "backend" implementation functions.
  28. */
  29. static struct frontswap_ops frontswap_ops __read_mostly;
  30. /*
  31. * This global enablement flag reduces overhead on systems where frontswap_ops
  32. * has not been registered, so is preferred to the slower alternative: a
  33. * function call that checks a non-global.
  34. */
  35. bool frontswap_enabled __read_mostly;
  36. EXPORT_SYMBOL(frontswap_enabled);
  37. /*
  38. * If enabled, frontswap_store will return failure even on success. As
  39. * a result, the swap subsystem will always write the page to swap, in
  40. * effect converting frontswap into a writethrough cache. In this mode,
  41. * there is no direct reduction in swap writes, but a frontswap backend
  42. * can unilaterally "reclaim" any pages in use with no data loss, thus
  43. * providing increases control over maximum memory usage due to frontswap.
  44. */
  45. static bool frontswap_writethrough_enabled __read_mostly;
  46. #ifdef CONFIG_DEBUG_FS
  47. /*
  48. * Counters available via /sys/kernel/debug/frontswap (if debugfs is
  49. * properly configured). These are for information only so are not protected
  50. * against increment races.
  51. */
  52. static u64 frontswap_loads;
  53. static u64 frontswap_succ_stores;
  54. static u64 frontswap_failed_stores;
  55. static u64 frontswap_invalidates;
  56. static inline void inc_frontswap_loads(void) {
  57. frontswap_loads++;
  58. }
  59. static inline void inc_frontswap_succ_stores(void) {
  60. frontswap_succ_stores++;
  61. }
  62. static inline void inc_frontswap_failed_stores(void) {
  63. frontswap_failed_stores++;
  64. }
  65. static inline void inc_frontswap_invalidates(void) {
  66. frontswap_invalidates++;
  67. }
  68. #else
  69. static inline void inc_frontswap_loads(void) { }
  70. static inline void inc_frontswap_succ_stores(void) { }
  71. static inline void inc_frontswap_failed_stores(void) { }
  72. static inline void inc_frontswap_invalidates(void) { }
  73. #endif
  74. /*
  75. * Register operations for frontswap, returning previous thus allowing
  76. * detection of multiple backends and possible nesting.
  77. */
  78. struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
  79. {
  80. struct frontswap_ops old = frontswap_ops;
  81. frontswap_ops = *ops;
  82. frontswap_enabled = true;
  83. return old;
  84. }
  85. EXPORT_SYMBOL(frontswap_register_ops);
  86. /*
  87. * Enable/disable frontswap writethrough (see above).
  88. */
  89. void frontswap_writethrough(bool enable)
  90. {
  91. frontswap_writethrough_enabled = enable;
  92. }
  93. EXPORT_SYMBOL(frontswap_writethrough);
  94. /*
  95. * Called when a swap device is swapon'd.
  96. */
  97. void __frontswap_init(unsigned type)
  98. {
  99. struct swap_info_struct *sis = swap_info[type];
  100. BUG_ON(sis == NULL);
  101. if (sis->frontswap_map == NULL)
  102. return;
  103. if (frontswap_enabled)
  104. frontswap_ops.init(type);
  105. }
  106. EXPORT_SYMBOL(__frontswap_init);
  107. /*
  108. * "Store" data from a page to frontswap and associate it with the page's
  109. * swaptype and offset. Page must be locked and in the swap cache.
  110. * If frontswap already contains a page with matching swaptype and
  111. * offset, the frontswap implmentation may either overwrite the data and
  112. * return success or invalidate the page from frontswap and return failure.
  113. */
  114. int __frontswap_store(struct page *page)
  115. {
  116. int ret = -1, dup = 0;
  117. swp_entry_t entry = { .val = page_private(page), };
  118. int type = swp_type(entry);
  119. struct swap_info_struct *sis = swap_info[type];
  120. pgoff_t offset = swp_offset(entry);
  121. BUG_ON(!PageLocked(page));
  122. BUG_ON(sis == NULL);
  123. if (frontswap_test(sis, offset))
  124. dup = 1;
  125. ret = frontswap_ops.store(type, offset, page);
  126. if (ret == 0) {
  127. frontswap_set(sis, offset);
  128. inc_frontswap_succ_stores();
  129. if (!dup)
  130. atomic_inc(&sis->frontswap_pages);
  131. } else if (dup) {
  132. /*
  133. failed dup always results in automatic invalidate of
  134. the (older) page from frontswap
  135. */
  136. frontswap_clear(sis, offset);
  137. atomic_dec(&sis->frontswap_pages);
  138. inc_frontswap_failed_stores();
  139. } else {
  140. inc_frontswap_failed_stores();
  141. }
  142. if (frontswap_writethrough_enabled)
  143. /* report failure so swap also writes to swap device */
  144. ret = -1;
  145. return ret;
  146. }
  147. EXPORT_SYMBOL(__frontswap_store);
  148. /*
  149. * "Get" data from frontswap associated with swaptype and offset that were
  150. * specified when the data was put to frontswap and use it to fill the
  151. * specified page with data. Page must be locked and in the swap cache.
  152. */
  153. int __frontswap_load(struct page *page)
  154. {
  155. int ret = -1;
  156. swp_entry_t entry = { .val = page_private(page), };
  157. int type = swp_type(entry);
  158. struct swap_info_struct *sis = swap_info[type];
  159. pgoff_t offset = swp_offset(entry);
  160. BUG_ON(!PageLocked(page));
  161. BUG_ON(sis == NULL);
  162. if (frontswap_test(sis, offset))
  163. ret = frontswap_ops.load(type, offset, page);
  164. if (ret == 0)
  165. inc_frontswap_loads();
  166. return ret;
  167. }
  168. EXPORT_SYMBOL(__frontswap_load);
  169. /*
  170. * Invalidate any data from frontswap associated with the specified swaptype
  171. * and offset so that a subsequent "get" will fail.
  172. */
  173. void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
  174. {
  175. struct swap_info_struct *sis = swap_info[type];
  176. BUG_ON(sis == NULL);
  177. if (frontswap_test(sis, offset)) {
  178. frontswap_ops.invalidate_page(type, offset);
  179. atomic_dec(&sis->frontswap_pages);
  180. frontswap_clear(sis, offset);
  181. inc_frontswap_invalidates();
  182. }
  183. }
  184. EXPORT_SYMBOL(__frontswap_invalidate_page);
  185. /*
  186. * Invalidate all data from frontswap associated with all offsets for the
  187. * specified swaptype.
  188. */
  189. void __frontswap_invalidate_area(unsigned type)
  190. {
  191. struct swap_info_struct *sis = swap_info[type];
  192. BUG_ON(sis == NULL);
  193. if (sis->frontswap_map == NULL)
  194. return;
  195. frontswap_ops.invalidate_area(type);
  196. atomic_set(&sis->frontswap_pages, 0);
  197. memset(sis->frontswap_map, 0, sis->max / sizeof(long));
  198. }
  199. EXPORT_SYMBOL(__frontswap_invalidate_area);
  200. static unsigned long __frontswap_curr_pages(void)
  201. {
  202. int type;
  203. unsigned long totalpages = 0;
  204. struct swap_info_struct *si = NULL;
  205. assert_spin_locked(&swap_lock);
  206. for (type = swap_list.head; type >= 0; type = si->next) {
  207. si = swap_info[type];
  208. totalpages += atomic_read(&si->frontswap_pages);
  209. }
  210. return totalpages;
  211. }
  212. /*
  213. * Frontswap, like a true swap device, may unnecessarily retain pages
  214. * under certain circumstances; "shrink" frontswap is essentially a
  215. * "partial swapoff" and works by calling try_to_unuse to attempt to
  216. * unuse enough frontswap pages to attempt to -- subject to memory
  217. * constraints -- reduce the number of pages in frontswap to the
  218. * number given in the parameter target_pages.
  219. */
  220. void frontswap_shrink(unsigned long target_pages)
  221. {
  222. struct swap_info_struct *si = NULL;
  223. int si_frontswap_pages;
  224. unsigned long total_pages = 0, total_pages_to_unuse;
  225. unsigned long pages = 0, pages_to_unuse = 0;
  226. int type;
  227. bool locked = false;
  228. /*
  229. * we don't want to hold swap_lock while doing a very
  230. * lengthy try_to_unuse, but swap_list may change
  231. * so restart scan from swap_list.head each time
  232. */
  233. spin_lock(&swap_lock);
  234. locked = true;
  235. total_pages = __frontswap_curr_pages();
  236. if (total_pages <= target_pages)
  237. goto out;
  238. total_pages_to_unuse = total_pages - target_pages;
  239. for (type = swap_list.head; type >= 0; type = si->next) {
  240. si = swap_info[type];
  241. si_frontswap_pages = atomic_read(&si->frontswap_pages);
  242. if (total_pages_to_unuse < si_frontswap_pages) {
  243. pages = pages_to_unuse = total_pages_to_unuse;
  244. } else {
  245. pages = si_frontswap_pages;
  246. pages_to_unuse = 0; /* unuse all */
  247. }
  248. /* ensure there is enough RAM to fetch pages from frontswap */
  249. if (security_vm_enough_memory_mm(current->mm, pages))
  250. continue;
  251. vm_unacct_memory(pages);
  252. break;
  253. }
  254. if (type < 0)
  255. goto out;
  256. locked = false;
  257. spin_unlock(&swap_lock);
  258. try_to_unuse(type, true, pages_to_unuse);
  259. out:
  260. if (locked)
  261. spin_unlock(&swap_lock);
  262. return;
  263. }
  264. EXPORT_SYMBOL(frontswap_shrink);
  265. /*
  266. * Count and return the number of frontswap pages across all
  267. * swap devices. This is exported so that backend drivers can
  268. * determine current usage without reading debugfs.
  269. */
  270. unsigned long frontswap_curr_pages(void)
  271. {
  272. unsigned long totalpages = 0;
  273. spin_lock(&swap_lock);
  274. totalpages = __frontswap_curr_pages();
  275. spin_unlock(&swap_lock);
  276. return totalpages;
  277. }
  278. EXPORT_SYMBOL(frontswap_curr_pages);
  279. static int __init init_frontswap(void)
  280. {
  281. #ifdef CONFIG_DEBUG_FS
  282. struct dentry *root = debugfs_create_dir("frontswap", NULL);
  283. if (root == NULL)
  284. return -ENXIO;
  285. debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
  286. debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
  287. debugfs_create_u64("failed_stores", S_IRUGO, root,
  288. &frontswap_failed_stores);
  289. debugfs_create_u64("invalidates", S_IRUGO,
  290. root, &frontswap_invalidates);
  291. #endif
  292. return 0;
  293. }
  294. module_init(init_frontswap);