frontswap.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * Frontswap frontend
  3. *
  4. * This code provides the generic "frontend" layer to call a matching
  5. * "backend" driver implementation of frontswap. See
  6. * Documentation/vm/frontswap.txt for more information.
  7. *
  8. * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
  9. * Author: Dan Magenheimer
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2.
  12. */
  13. #include <linux/mm.h>
  14. #include <linux/mman.h>
  15. #include <linux/swap.h>
  16. #include <linux/swapops.h>
  17. #include <linux/proc_fs.h>
  18. #include <linux/security.h>
  19. #include <linux/capability.h>
  20. #include <linux/module.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/frontswap.h>
  24. #include <linux/swapfile.h>
  25. /*
  26. * frontswap_ops is set by frontswap_register_ops to contain the pointers
  27. * to the frontswap "backend" implementation functions.
  28. */
  29. static struct frontswap_ops frontswap_ops __read_mostly;
  30. /*
  31. * This global enablement flag reduces overhead on systems where frontswap_ops
  32. * has not been registered, so is preferred to the slower alternative: a
  33. * function call that checks a non-global.
  34. */
  35. bool frontswap_enabled __read_mostly;
  36. EXPORT_SYMBOL(frontswap_enabled);
  37. /*
  38. * If enabled, frontswap_store will return failure even on success. As
  39. * a result, the swap subsystem will always write the page to swap, in
  40. * effect converting frontswap into a writethrough cache. In this mode,
  41. * there is no direct reduction in swap writes, but a frontswap backend
  42. * can unilaterally "reclaim" any pages in use with no data loss, thus
  43. * providing increases control over maximum memory usage due to frontswap.
  44. */
  45. static bool frontswap_writethrough_enabled __read_mostly;
  46. #ifdef CONFIG_DEBUG_FS
  47. /*
  48. * Counters available via /sys/kernel/debug/frontswap (if debugfs is
  49. * properly configured). These are for information only so are not protected
  50. * against increment races.
  51. */
  52. static u64 frontswap_loads;
  53. static u64 frontswap_succ_stores;
  54. static u64 frontswap_failed_stores;
  55. static u64 frontswap_invalidates;
  56. static inline void inc_frontswap_loads(void) {
  57. frontswap_loads++;
  58. }
  59. static inline void inc_frontswap_succ_stores(void) {
  60. frontswap_succ_stores++;
  61. }
  62. static inline void inc_frontswap_failed_stores(void) {
  63. frontswap_failed_stores++;
  64. }
  65. static inline void inc_frontswap_invalidates(void) {
  66. frontswap_invalidates++;
  67. }
  68. #else
  69. static inline void inc_frontswap_loads(void) { }
  70. static inline void inc_frontswap_succ_stores(void) { }
  71. static inline void inc_frontswap_failed_stores(void) { }
  72. static inline void inc_frontswap_invalidates(void) { }
  73. #endif
  74. /*
  75. * Register operations for frontswap, returning previous thus allowing
  76. * detection of multiple backends and possible nesting.
  77. */
  78. struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
  79. {
  80. struct frontswap_ops old = frontswap_ops;
  81. frontswap_ops = *ops;
  82. frontswap_enabled = true;
  83. return old;
  84. }
  85. EXPORT_SYMBOL(frontswap_register_ops);
  86. /*
  87. * Enable/disable frontswap writethrough (see above).
  88. */
  89. void frontswap_writethrough(bool enable)
  90. {
  91. frontswap_writethrough_enabled = enable;
  92. }
  93. EXPORT_SYMBOL(frontswap_writethrough);
  94. /*
  95. * Called when a swap device is swapon'd.
  96. */
  97. void __frontswap_init(unsigned type)
  98. {
  99. struct swap_info_struct *sis = swap_info[type];
  100. BUG_ON(sis == NULL);
  101. if (sis->frontswap_map == NULL)
  102. return;
  103. if (frontswap_enabled)
  104. (*frontswap_ops.init)(type);
  105. }
  106. EXPORT_SYMBOL(__frontswap_init);
  107. /*
  108. * "Store" data from a page to frontswap and associate it with the page's
  109. * swaptype and offset. Page must be locked and in the swap cache.
  110. * If frontswap already contains a page with matching swaptype and
  111. * offset, the frontswap implmentation may either overwrite the data and
  112. * return success or invalidate the page from frontswap and return failure.
  113. */
  114. int __frontswap_store(struct page *page)
  115. {
  116. int ret = -1, dup = 0;
  117. swp_entry_t entry = { .val = page_private(page), };
  118. int type = swp_type(entry);
  119. struct swap_info_struct *sis = swap_info[type];
  120. pgoff_t offset = swp_offset(entry);
  121. BUG_ON(!PageLocked(page));
  122. BUG_ON(sis == NULL);
  123. if (frontswap_test(sis, offset))
  124. dup = 1;
  125. ret = (*frontswap_ops.store)(type, offset, page);
  126. if (ret == 0) {
  127. frontswap_set(sis, offset);
  128. inc_frontswap_succ_stores();
  129. if (!dup)
  130. atomic_inc(&sis->frontswap_pages);
  131. } else if (dup) {
  132. /*
  133. failed dup always results in automatic invalidate of
  134. the (older) page from frontswap
  135. */
  136. frontswap_clear(sis, offset);
  137. atomic_dec(&sis->frontswap_pages);
  138. inc_frontswap_failed_stores();
  139. } else
  140. inc_frontswap_failed_stores();
  141. if (frontswap_writethrough_enabled)
  142. /* report failure so swap also writes to swap device */
  143. ret = -1;
  144. return ret;
  145. }
  146. EXPORT_SYMBOL(__frontswap_store);
  147. /*
  148. * "Get" data from frontswap associated with swaptype and offset that were
  149. * specified when the data was put to frontswap and use it to fill the
  150. * specified page with data. Page must be locked and in the swap cache.
  151. */
  152. int __frontswap_load(struct page *page)
  153. {
  154. int ret = -1;
  155. swp_entry_t entry = { .val = page_private(page), };
  156. int type = swp_type(entry);
  157. struct swap_info_struct *sis = swap_info[type];
  158. pgoff_t offset = swp_offset(entry);
  159. BUG_ON(!PageLocked(page));
  160. BUG_ON(sis == NULL);
  161. if (frontswap_test(sis, offset))
  162. ret = (*frontswap_ops.load)(type, offset, page);
  163. if (ret == 0)
  164. inc_frontswap_loads();
  165. return ret;
  166. }
  167. EXPORT_SYMBOL(__frontswap_load);
  168. /*
  169. * Invalidate any data from frontswap associated with the specified swaptype
  170. * and offset so that a subsequent "get" will fail.
  171. */
  172. void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
  173. {
  174. struct swap_info_struct *sis = swap_info[type];
  175. BUG_ON(sis == NULL);
  176. if (frontswap_test(sis, offset)) {
  177. (*frontswap_ops.invalidate_page)(type, offset);
  178. atomic_dec(&sis->frontswap_pages);
  179. frontswap_clear(sis, offset);
  180. inc_frontswap_invalidates();
  181. }
  182. }
  183. EXPORT_SYMBOL(__frontswap_invalidate_page);
  184. /*
  185. * Invalidate all data from frontswap associated with all offsets for the
  186. * specified swaptype.
  187. */
  188. void __frontswap_invalidate_area(unsigned type)
  189. {
  190. struct swap_info_struct *sis = swap_info[type];
  191. BUG_ON(sis == NULL);
  192. if (sis->frontswap_map == NULL)
  193. return;
  194. (*frontswap_ops.invalidate_area)(type);
  195. atomic_set(&sis->frontswap_pages, 0);
  196. memset(sis->frontswap_map, 0, sis->max / sizeof(long));
  197. }
  198. EXPORT_SYMBOL(__frontswap_invalidate_area);
  199. /*
  200. * Frontswap, like a true swap device, may unnecessarily retain pages
  201. * under certain circumstances; "shrink" frontswap is essentially a
  202. * "partial swapoff" and works by calling try_to_unuse to attempt to
  203. * unuse enough frontswap pages to attempt to -- subject to memory
  204. * constraints -- reduce the number of pages in frontswap to the
  205. * number given in the parameter target_pages.
  206. */
  207. void frontswap_shrink(unsigned long target_pages)
  208. {
  209. struct swap_info_struct *si = NULL;
  210. int si_frontswap_pages;
  211. unsigned long total_pages = 0, total_pages_to_unuse;
  212. unsigned long pages = 0, pages_to_unuse = 0;
  213. int type;
  214. bool locked = false;
  215. /*
  216. * we don't want to hold swap_lock while doing a very
  217. * lengthy try_to_unuse, but swap_list may change
  218. * so restart scan from swap_list.head each time
  219. */
  220. spin_lock(&swap_lock);
  221. locked = true;
  222. total_pages = 0;
  223. for (type = swap_list.head; type >= 0; type = si->next) {
  224. si = swap_info[type];
  225. total_pages += atomic_read(&si->frontswap_pages);
  226. }
  227. if (total_pages <= target_pages)
  228. goto out;
  229. total_pages_to_unuse = total_pages - target_pages;
  230. for (type = swap_list.head; type >= 0; type = si->next) {
  231. si = swap_info[type];
  232. si_frontswap_pages = atomic_read(&si->frontswap_pages);
  233. if (total_pages_to_unuse < si_frontswap_pages)
  234. pages = pages_to_unuse = total_pages_to_unuse;
  235. else {
  236. pages = si_frontswap_pages;
  237. pages_to_unuse = 0; /* unuse all */
  238. }
  239. /* ensure there is enough RAM to fetch pages from frontswap */
  240. if (security_vm_enough_memory_mm(current->mm, pages))
  241. continue;
  242. vm_unacct_memory(pages);
  243. break;
  244. }
  245. if (type < 0)
  246. goto out;
  247. locked = false;
  248. spin_unlock(&swap_lock);
  249. try_to_unuse(type, true, pages_to_unuse);
  250. out:
  251. if (locked)
  252. spin_unlock(&swap_lock);
  253. return;
  254. }
  255. EXPORT_SYMBOL(frontswap_shrink);
  256. /*
  257. * Count and return the number of frontswap pages across all
  258. * swap devices. This is exported so that backend drivers can
  259. * determine current usage without reading debugfs.
  260. */
  261. unsigned long frontswap_curr_pages(void)
  262. {
  263. int type;
  264. unsigned long totalpages = 0;
  265. struct swap_info_struct *si = NULL;
  266. spin_lock(&swap_lock);
  267. for (type = swap_list.head; type >= 0; type = si->next) {
  268. si = swap_info[type];
  269. totalpages += atomic_read(&si->frontswap_pages);
  270. }
  271. spin_unlock(&swap_lock);
  272. return totalpages;
  273. }
  274. EXPORT_SYMBOL(frontswap_curr_pages);
  275. static int __init init_frontswap(void)
  276. {
  277. #ifdef CONFIG_DEBUG_FS
  278. struct dentry *root = debugfs_create_dir("frontswap", NULL);
  279. if (root == NULL)
  280. return -ENXIO;
  281. debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
  282. debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
  283. debugfs_create_u64("failed_stores", S_IRUGO, root,
  284. &frontswap_failed_stores);
  285. debugfs_create_u64("invalidates", S_IRUGO,
  286. root, &frontswap_invalidates);
  287. #endif
  288. return 0;
  289. }
  290. module_init(init_frontswap);