frontswap.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * Frontswap frontend
  3. *
  4. * This code provides the generic "frontend" layer to call a matching
  5. * "backend" driver implementation of frontswap. See
  6. * Documentation/vm/frontswap.txt for more information.
  7. *
  8. * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
  9. * Author: Dan Magenheimer
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2.
  12. */
  13. #include <linux/mman.h>
  14. #include <linux/swap.h>
  15. #include <linux/swapops.h>
  16. #include <linux/security.h>
  17. #include <linux/module.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/frontswap.h>
  20. #include <linux/swapfile.h>
  21. /*
  22. * frontswap_ops is set by frontswap_register_ops to contain the pointers
  23. * to the frontswap "backend" implementation functions.
  24. */
  25. static struct frontswap_ops frontswap_ops __read_mostly;
  26. /*
  27. * This global enablement flag reduces overhead on systems where frontswap_ops
  28. * has not been registered, so is preferred to the slower alternative: a
  29. * function call that checks a non-global.
  30. */
  31. bool frontswap_enabled __read_mostly;
  32. EXPORT_SYMBOL(frontswap_enabled);
  33. /*
  34. * If enabled, frontswap_store will return failure even on success. As
  35. * a result, the swap subsystem will always write the page to swap, in
  36. * effect converting frontswap into a writethrough cache. In this mode,
  37. * there is no direct reduction in swap writes, but a frontswap backend
  38. * can unilaterally "reclaim" any pages in use with no data loss, thus
  39. * providing increases control over maximum memory usage due to frontswap.
  40. */
  41. static bool frontswap_writethrough_enabled __read_mostly;
  42. /*
  43. * If enabled, the underlying tmem implementation is capable of doing
  44. * exclusive gets, so frontswap_load, on a successful tmem_get must
  45. * mark the page as no longer in frontswap AND mark it dirty.
  46. */
  47. static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
  48. #ifdef CONFIG_DEBUG_FS
  49. /*
  50. * Counters available via /sys/kernel/debug/frontswap (if debugfs is
  51. * properly configured). These are for information only so are not protected
  52. * against increment races.
  53. */
  54. static u64 frontswap_loads;
  55. static u64 frontswap_succ_stores;
  56. static u64 frontswap_failed_stores;
  57. static u64 frontswap_invalidates;
  58. static inline void inc_frontswap_loads(void) {
  59. frontswap_loads++;
  60. }
  61. static inline void inc_frontswap_succ_stores(void) {
  62. frontswap_succ_stores++;
  63. }
  64. static inline void inc_frontswap_failed_stores(void) {
  65. frontswap_failed_stores++;
  66. }
  67. static inline void inc_frontswap_invalidates(void) {
  68. frontswap_invalidates++;
  69. }
  70. #else
  71. static inline void inc_frontswap_loads(void) { }
  72. static inline void inc_frontswap_succ_stores(void) { }
  73. static inline void inc_frontswap_failed_stores(void) { }
  74. static inline void inc_frontswap_invalidates(void) { }
  75. #endif
  76. /*
  77. * Register operations for frontswap, returning previous thus allowing
  78. * detection of multiple backends and possible nesting.
  79. */
  80. struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
  81. {
  82. struct frontswap_ops old = frontswap_ops;
  83. frontswap_ops = *ops;
  84. frontswap_enabled = true;
  85. return old;
  86. }
  87. EXPORT_SYMBOL(frontswap_register_ops);
  88. /*
  89. * Enable/disable frontswap writethrough (see above).
  90. */
  91. void frontswap_writethrough(bool enable)
  92. {
  93. frontswap_writethrough_enabled = enable;
  94. }
  95. EXPORT_SYMBOL(frontswap_writethrough);
  96. /*
  97. * Enable/disable frontswap exclusive gets (see above).
  98. */
  99. void frontswap_tmem_exclusive_gets(bool enable)
  100. {
  101. frontswap_tmem_exclusive_gets_enabled = enable;
  102. }
  103. EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
  104. /*
  105. * Called when a swap device is swapon'd.
  106. */
  107. void __frontswap_init(unsigned type)
  108. {
  109. struct swap_info_struct *sis = swap_info[type];
  110. BUG_ON(sis == NULL);
  111. if (sis->frontswap_map == NULL)
  112. return;
  113. frontswap_ops.init(type);
  114. }
  115. EXPORT_SYMBOL(__frontswap_init);
  116. static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
  117. {
  118. frontswap_clear(sis, offset);
  119. atomic_dec(&sis->frontswap_pages);
  120. }
  121. /*
  122. * "Store" data from a page to frontswap and associate it with the page's
  123. * swaptype and offset. Page must be locked and in the swap cache.
  124. * If frontswap already contains a page with matching swaptype and
  125. * offset, the frontswap implementation may either overwrite the data and
  126. * return success or invalidate the page from frontswap and return failure.
  127. */
  128. int __frontswap_store(struct page *page)
  129. {
  130. int ret = -1, dup = 0;
  131. swp_entry_t entry = { .val = page_private(page), };
  132. int type = swp_type(entry);
  133. struct swap_info_struct *sis = swap_info[type];
  134. pgoff_t offset = swp_offset(entry);
  135. BUG_ON(!PageLocked(page));
  136. BUG_ON(sis == NULL);
  137. if (frontswap_test(sis, offset))
  138. dup = 1;
  139. ret = frontswap_ops.store(type, offset, page);
  140. if (ret == 0) {
  141. frontswap_set(sis, offset);
  142. inc_frontswap_succ_stores();
  143. if (!dup)
  144. atomic_inc(&sis->frontswap_pages);
  145. } else {
  146. /*
  147. failed dup always results in automatic invalidate of
  148. the (older) page from frontswap
  149. */
  150. inc_frontswap_failed_stores();
  151. if (dup)
  152. __frontswap_clear(sis, offset);
  153. }
  154. if (frontswap_writethrough_enabled)
  155. /* report failure so swap also writes to swap device */
  156. ret = -1;
  157. return ret;
  158. }
  159. EXPORT_SYMBOL(__frontswap_store);
  160. /*
  161. * "Get" data from frontswap associated with swaptype and offset that were
  162. * specified when the data was put to frontswap and use it to fill the
  163. * specified page with data. Page must be locked and in the swap cache.
  164. */
  165. int __frontswap_load(struct page *page)
  166. {
  167. int ret = -1;
  168. swp_entry_t entry = { .val = page_private(page), };
  169. int type = swp_type(entry);
  170. struct swap_info_struct *sis = swap_info[type];
  171. pgoff_t offset = swp_offset(entry);
  172. BUG_ON(!PageLocked(page));
  173. BUG_ON(sis == NULL);
  174. if (frontswap_test(sis, offset))
  175. ret = frontswap_ops.load(type, offset, page);
  176. if (ret == 0) {
  177. inc_frontswap_loads();
  178. if (frontswap_tmem_exclusive_gets_enabled) {
  179. SetPageDirty(page);
  180. frontswap_clear(sis, offset);
  181. }
  182. }
  183. return ret;
  184. }
  185. EXPORT_SYMBOL(__frontswap_load);
  186. /*
  187. * Invalidate any data from frontswap associated with the specified swaptype
  188. * and offset so that a subsequent "get" will fail.
  189. */
  190. void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
  191. {
  192. struct swap_info_struct *sis = swap_info[type];
  193. BUG_ON(sis == NULL);
  194. if (frontswap_test(sis, offset)) {
  195. frontswap_ops.invalidate_page(type, offset);
  196. __frontswap_clear(sis, offset);
  197. inc_frontswap_invalidates();
  198. }
  199. }
  200. EXPORT_SYMBOL(__frontswap_invalidate_page);
  201. /*
  202. * Invalidate all data from frontswap associated with all offsets for the
  203. * specified swaptype.
  204. */
  205. void __frontswap_invalidate_area(unsigned type)
  206. {
  207. struct swap_info_struct *sis = swap_info[type];
  208. BUG_ON(sis == NULL);
  209. if (sis->frontswap_map == NULL)
  210. return;
  211. frontswap_ops.invalidate_area(type);
  212. atomic_set(&sis->frontswap_pages, 0);
  213. memset(sis->frontswap_map, 0, sis->max / sizeof(long));
  214. }
  215. EXPORT_SYMBOL(__frontswap_invalidate_area);
  216. static unsigned long __frontswap_curr_pages(void)
  217. {
  218. int type;
  219. unsigned long totalpages = 0;
  220. struct swap_info_struct *si = NULL;
  221. assert_spin_locked(&swap_lock);
  222. for (type = swap_list.head; type >= 0; type = si->next) {
  223. si = swap_info[type];
  224. totalpages += atomic_read(&si->frontswap_pages);
  225. }
  226. return totalpages;
  227. }
  228. static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
  229. int *swapid)
  230. {
  231. int ret = -EINVAL;
  232. struct swap_info_struct *si = NULL;
  233. int si_frontswap_pages;
  234. unsigned long total_pages_to_unuse = total;
  235. unsigned long pages = 0, pages_to_unuse = 0;
  236. int type;
  237. assert_spin_locked(&swap_lock);
  238. for (type = swap_list.head; type >= 0; type = si->next) {
  239. si = swap_info[type];
  240. si_frontswap_pages = atomic_read(&si->frontswap_pages);
  241. if (total_pages_to_unuse < si_frontswap_pages) {
  242. pages = pages_to_unuse = total_pages_to_unuse;
  243. } else {
  244. pages = si_frontswap_pages;
  245. pages_to_unuse = 0; /* unuse all */
  246. }
  247. /* ensure there is enough RAM to fetch pages from frontswap */
  248. if (security_vm_enough_memory_mm(current->mm, pages)) {
  249. ret = -ENOMEM;
  250. continue;
  251. }
  252. vm_unacct_memory(pages);
  253. *unused = pages_to_unuse;
  254. *swapid = type;
  255. ret = 0;
  256. break;
  257. }
  258. return ret;
  259. }
  260. /*
  261. * Used to check if it's necessory and feasible to unuse pages.
  262. * Return 1 when nothing to do, 0 when need to shink pages,
  263. * error code when there is an error.
  264. */
  265. static int __frontswap_shrink(unsigned long target_pages,
  266. unsigned long *pages_to_unuse,
  267. int *type)
  268. {
  269. unsigned long total_pages = 0, total_pages_to_unuse;
  270. assert_spin_locked(&swap_lock);
  271. total_pages = __frontswap_curr_pages();
  272. if (total_pages <= target_pages) {
  273. /* Nothing to do */
  274. *pages_to_unuse = 0;
  275. return 1;
  276. }
  277. total_pages_to_unuse = total_pages - target_pages;
  278. return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
  279. }
  280. /*
  281. * Frontswap, like a true swap device, may unnecessarily retain pages
  282. * under certain circumstances; "shrink" frontswap is essentially a
  283. * "partial swapoff" and works by calling try_to_unuse to attempt to
  284. * unuse enough frontswap pages to attempt to -- subject to memory
  285. * constraints -- reduce the number of pages in frontswap to the
  286. * number given in the parameter target_pages.
  287. */
  288. void frontswap_shrink(unsigned long target_pages)
  289. {
  290. unsigned long pages_to_unuse = 0;
  291. int uninitialized_var(type), ret;
  292. /*
  293. * we don't want to hold swap_lock while doing a very
  294. * lengthy try_to_unuse, but swap_list may change
  295. * so restart scan from swap_list.head each time
  296. */
  297. spin_lock(&swap_lock);
  298. ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
  299. spin_unlock(&swap_lock);
  300. if (ret == 0)
  301. try_to_unuse(type, true, pages_to_unuse);
  302. return;
  303. }
  304. EXPORT_SYMBOL(frontswap_shrink);
  305. /*
  306. * Count and return the number of frontswap pages across all
  307. * swap devices. This is exported so that backend drivers can
  308. * determine current usage without reading debugfs.
  309. */
  310. unsigned long frontswap_curr_pages(void)
  311. {
  312. unsigned long totalpages = 0;
  313. spin_lock(&swap_lock);
  314. totalpages = __frontswap_curr_pages();
  315. spin_unlock(&swap_lock);
  316. return totalpages;
  317. }
  318. EXPORT_SYMBOL(frontswap_curr_pages);
  319. static int __init init_frontswap(void)
  320. {
  321. #ifdef CONFIG_DEBUG_FS
  322. struct dentry *root = debugfs_create_dir("frontswap", NULL);
  323. if (root == NULL)
  324. return -ENXIO;
  325. debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
  326. debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
  327. debugfs_create_u64("failed_stores", S_IRUGO, root,
  328. &frontswap_failed_stores);
  329. debugfs_create_u64("invalidates", S_IRUGO,
  330. root, &frontswap_invalidates);
  331. #endif
  332. return 0;
  333. }
  334. module_init(init_frontswap);