|
@@ -11,15 +11,11 @@
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2.
|
|
|
*/
|
|
|
|
|
|
-#include <linux/mm.h>
|
|
|
#include <linux/mman.h>
|
|
|
#include <linux/swap.h>
|
|
|
#include <linux/swapops.h>
|
|
|
-#include <linux/proc_fs.h>
|
|
|
#include <linux/security.h>
|
|
|
-#include <linux/capability.h>
|
|
|
#include <linux/module.h>
|
|
|
-#include <linux/uaccess.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
#include <linux/frontswap.h>
|
|
|
#include <linux/swapfile.h>
|
|
@@ -110,16 +106,21 @@ void __frontswap_init(unsigned type)
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (sis->frontswap_map == NULL)
|
|
|
return;
|
|
|
- if (frontswap_enabled)
|
|
|
- (*frontswap_ops.init)(type);
|
|
|
+ frontswap_ops.init(type);
|
|
|
}
|
|
|
EXPORT_SYMBOL(__frontswap_init);
|
|
|
|
|
|
+static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
|
|
|
+{
|
|
|
+ frontswap_clear(sis, offset);
|
|
|
+ atomic_dec(&sis->frontswap_pages);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* "Store" data from a page to frontswap and associate it with the page's
|
|
|
* swaptype and offset. Page must be locked and in the swap cache.
|
|
|
* If frontswap already contains a page with matching swaptype and
|
|
|
- * offset, the frontswap implmentation may either overwrite the data and
|
|
|
+ * offset, the frontswap implementation may either overwrite the data and
|
|
|
* return success or invalidate the page from frontswap and return failure.
|
|
|
*/
|
|
|
int __frontswap_store(struct page *page)
|
|
@@ -134,22 +135,21 @@ int __frontswap_store(struct page *page)
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (frontswap_test(sis, offset))
|
|
|
dup = 1;
|
|
|
- ret = (*frontswap_ops.store)(type, offset, page);
|
|
|
+ ret = frontswap_ops.store(type, offset, page);
|
|
|
if (ret == 0) {
|
|
|
frontswap_set(sis, offset);
|
|
|
inc_frontswap_succ_stores();
|
|
|
if (!dup)
|
|
|
atomic_inc(&sis->frontswap_pages);
|
|
|
- } else if (dup) {
|
|
|
+ } else {
|
|
|
/*
|
|
|
failed dup always results in automatic invalidate of
|
|
|
the (older) page from frontswap
|
|
|
*/
|
|
|
- frontswap_clear(sis, offset);
|
|
|
- atomic_dec(&sis->frontswap_pages);
|
|
|
- inc_frontswap_failed_stores();
|
|
|
- } else
|
|
|
inc_frontswap_failed_stores();
|
|
|
+ if (dup)
|
|
|
+ __frontswap_clear(sis, offset);
|
|
|
+ }
|
|
|
if (frontswap_writethrough_enabled)
|
|
|
/* report failure so swap also writes to swap device */
|
|
|
ret = -1;
|
|
@@ -173,7 +173,7 @@ int __frontswap_load(struct page *page)
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (frontswap_test(sis, offset))
|
|
|
- ret = (*frontswap_ops.load)(type, offset, page);
|
|
|
+ ret = frontswap_ops.load(type, offset, page);
|
|
|
if (ret == 0)
|
|
|
inc_frontswap_loads();
|
|
|
return ret;
|
|
@@ -190,9 +190,8 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
|
|
|
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (frontswap_test(sis, offset)) {
|
|
|
- (*frontswap_ops.invalidate_page)(type, offset);
|
|
|
- atomic_dec(&sis->frontswap_pages);
|
|
|
- frontswap_clear(sis, offset);
|
|
|
+ frontswap_ops.invalidate_page(type, offset);
|
|
|
+ __frontswap_clear(sis, offset);
|
|
|
inc_frontswap_invalidates();
|
|
|
}
|
|
|
}
|
|
@@ -209,67 +208,102 @@ void __frontswap_invalidate_area(unsigned type)
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (sis->frontswap_map == NULL)
|
|
|
return;
|
|
|
- (*frontswap_ops.invalidate_area)(type);
|
|
|
+ frontswap_ops.invalidate_area(type);
|
|
|
atomic_set(&sis->frontswap_pages, 0);
|
|
|
memset(sis->frontswap_map, 0, sis->max / sizeof(long));
|
|
|
}
|
|
|
EXPORT_SYMBOL(__frontswap_invalidate_area);
|
|
|
|
|
|
-/*
|
|
|
- * Frontswap, like a true swap device, may unnecessarily retain pages
|
|
|
- * under certain circumstances; "shrink" frontswap is essentially a
|
|
|
- * "partial swapoff" and works by calling try_to_unuse to attempt to
|
|
|
- * unuse enough frontswap pages to attempt to -- subject to memory
|
|
|
- * constraints -- reduce the number of pages in frontswap to the
|
|
|
- * number given in the parameter target_pages.
|
|
|
- */
|
|
|
-void frontswap_shrink(unsigned long target_pages)
|
|
|
+static unsigned long __frontswap_curr_pages(void)
|
|
|
{
|
|
|
- struct swap_info_struct *si = NULL;
|
|
|
- int si_frontswap_pages;
|
|
|
- unsigned long total_pages = 0, total_pages_to_unuse;
|
|
|
- unsigned long pages = 0, pages_to_unuse = 0;
|
|
|
int type;
|
|
|
- bool locked = false;
|
|
|
+ unsigned long totalpages = 0;
|
|
|
+ struct swap_info_struct *si = NULL;
|
|
|
|
|
|
- /*
|
|
|
- * we don't want to hold swap_lock while doing a very
|
|
|
- * lengthy try_to_unuse, but swap_list may change
|
|
|
- * so restart scan from swap_list.head each time
|
|
|
- */
|
|
|
- spin_lock(&swap_lock);
|
|
|
- locked = true;
|
|
|
- total_pages = 0;
|
|
|
+ assert_spin_locked(&swap_lock);
|
|
|
for (type = swap_list.head; type >= 0; type = si->next) {
|
|
|
si = swap_info[type];
|
|
|
- total_pages += atomic_read(&si->frontswap_pages);
|
|
|
+ totalpages += atomic_read(&si->frontswap_pages);
|
|
|
}
|
|
|
- if (total_pages <= target_pages)
|
|
|
- goto out;
|
|
|
- total_pages_to_unuse = total_pages - target_pages;
|
|
|
+ return totalpages;
|
|
|
+}
|
|
|
+
|
|
|
+static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
|
|
|
+ int *swapid)
|
|
|
+{
|
|
|
+ int ret = -EINVAL;
|
|
|
+ struct swap_info_struct *si = NULL;
|
|
|
+ int si_frontswap_pages;
|
|
|
+ unsigned long total_pages_to_unuse = total;
|
|
|
+ unsigned long pages = 0, pages_to_unuse = 0;
|
|
|
+ int type;
|
|
|
+
|
|
|
+ assert_spin_locked(&swap_lock);
|
|
|
for (type = swap_list.head; type >= 0; type = si->next) {
|
|
|
si = swap_info[type];
|
|
|
si_frontswap_pages = atomic_read(&si->frontswap_pages);
|
|
|
- if (total_pages_to_unuse < si_frontswap_pages)
|
|
|
+ if (total_pages_to_unuse < si_frontswap_pages) {
|
|
|
pages = pages_to_unuse = total_pages_to_unuse;
|
|
|
- else {
|
|
|
+ } else {
|
|
|
pages = si_frontswap_pages;
|
|
|
pages_to_unuse = 0; /* unuse all */
|
|
|
}
|
|
|
/* ensure there is enough RAM to fetch pages from frontswap */
|
|
|
- if (security_vm_enough_memory_mm(current->mm, pages))
|
|
|
+ if (security_vm_enough_memory_mm(current->mm, pages)) {
|
|
|
+ ret = -ENOMEM;
|
|
|
continue;
|
|
|
+ }
|
|
|
vm_unacct_memory(pages);
|
|
|
+ *unused = pages_to_unuse;
|
|
|
+ *swapid = type;
|
|
|
+ ret = 0;
|
|
|
break;
|
|
|
}
|
|
|
- if (type < 0)
|
|
|
- goto out;
|
|
|
- locked = false;
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int __frontswap_shrink(unsigned long target_pages,
|
|
|
+ unsigned long *pages_to_unuse,
|
|
|
+ int *type)
|
|
|
+{
|
|
|
+ unsigned long total_pages = 0, total_pages_to_unuse;
|
|
|
+
|
|
|
+ assert_spin_locked(&swap_lock);
|
|
|
+
|
|
|
+ total_pages = __frontswap_curr_pages();
|
|
|
+ if (total_pages <= target_pages) {
|
|
|
+ /* Nothing to do */
|
|
|
+ *pages_to_unuse = 0;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ total_pages_to_unuse = total_pages - target_pages;
|
|
|
+ return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Frontswap, like a true swap device, may unnecessarily retain pages
|
|
|
+ * under certain circumstances; "shrink" frontswap is essentially a
|
|
|
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
|
|
|
+ * unuse enough frontswap pages to attempt to -- subject to memory
|
|
|
+ * constraints -- reduce the number of pages in frontswap to the
|
|
|
+ * number given in the parameter target_pages.
|
|
|
+ */
|
|
|
+void frontswap_shrink(unsigned long target_pages)
|
|
|
+{
|
|
|
+ unsigned long pages_to_unuse = 0;
|
|
|
+ int type, ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * we don't want to hold swap_lock while doing a very
|
|
|
+ * lengthy try_to_unuse, but swap_list may change
|
|
|
+ * so restart scan from swap_list.head each time
|
|
|
+ */
|
|
|
+ spin_lock(&swap_lock);
|
|
|
+ ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
|
|
|
spin_unlock(&swap_lock);
|
|
|
- try_to_unuse(type, true, pages_to_unuse);
|
|
|
-out:
|
|
|
- if (locked)
|
|
|
- spin_unlock(&swap_lock);
|
|
|
+ if (ret == 0 && pages_to_unuse)
|
|
|
+ try_to_unuse(type, true, pages_to_unuse);
|
|
|
return;
|
|
|
}
|
|
|
EXPORT_SYMBOL(frontswap_shrink);
|
|
@@ -281,16 +315,12 @@ EXPORT_SYMBOL(frontswap_shrink);
|
|
|
*/
|
|
|
unsigned long frontswap_curr_pages(void)
|
|
|
{
|
|
|
- int type;
|
|
|
unsigned long totalpages = 0;
|
|
|
- struct swap_info_struct *si = NULL;
|
|
|
|
|
|
spin_lock(&swap_lock);
|
|
|
- for (type = swap_list.head; type >= 0; type = si->next) {
|
|
|
- si = swap_info[type];
|
|
|
- totalpages += atomic_read(&si->frontswap_pages);
|
|
|
- }
|
|
|
+ totalpages = __frontswap_curr_pages();
|
|
|
spin_unlock(&swap_lock);
|
|
|
+
|
|
|
return totalpages;
|
|
|
}
|
|
|
EXPORT_SYMBOL(frontswap_curr_pages);
|