|
@@ -44,6 +44,13 @@ EXPORT_SYMBOL(frontswap_enabled);
|
|
|
*/
|
|
|
static bool frontswap_writethrough_enabled __read_mostly;
|
|
|
|
|
|
+/*
|
|
|
+ * If enabled, the underlying tmem implementation is capable of doing
|
|
|
+ * exclusive gets, so frontswap_load, on a successful tmem_get must
|
|
|
+ * mark the page as no longer in frontswap AND mark it dirty.
|
|
|
+ */
|
|
|
+static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
|
|
|
+
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
/*
|
|
|
* Counters available via /sys/kernel/debug/frontswap (if debugfs is
|
|
@@ -96,6 +103,15 @@ void frontswap_writethrough(bool enable)
|
|
|
}
|
|
|
EXPORT_SYMBOL(frontswap_writethrough);
|
|
|
|
|
|
+/*
|
|
|
+ * Enable/disable frontswap exclusive gets (see above).
|
|
|
+ */
|
|
|
+void frontswap_tmem_exclusive_gets(bool enable)
|
|
|
+{
|
|
|
+ frontswap_tmem_exclusive_gets_enabled = enable;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
|
|
|
+
|
|
|
/*
|
|
|
* Called when a swap device is swapon'd.
|
|
|
*/
|
|
@@ -174,8 +190,13 @@ int __frontswap_load(struct page *page)
|
|
|
BUG_ON(sis == NULL);
|
|
|
if (frontswap_test(sis, offset))
|
|
|
ret = frontswap_ops.load(type, offset, page);
|
|
|
- if (ret == 0)
|
|
|
+ if (ret == 0) {
|
|
|
inc_frontswap_loads();
|
|
|
+ if (frontswap_tmem_exclusive_gets_enabled) {
|
|
|
+ SetPageDirty(page);
|
|
|
+ frontswap_clear(sis, offset);
|
|
|
+ }
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__frontswap_load);
|
|
@@ -263,6 +284,11 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Used to check if it's necessory and feasible to unuse pages.
|
|
|
+ * Return 1 when nothing to do, 0 when need to shink pages,
|
|
|
+ * error code when there is an error.
|
|
|
+ */
|
|
|
static int __frontswap_shrink(unsigned long target_pages,
|
|
|
unsigned long *pages_to_unuse,
|
|
|
int *type)
|
|
@@ -275,7 +301,7 @@ static int __frontswap_shrink(unsigned long target_pages,
|
|
|
if (total_pages <= target_pages) {
|
|
|
/* Nothing to do */
|
|
|
*pages_to_unuse = 0;
|
|
|
- return 0;
|
|
|
+ return 1;
|
|
|
}
|
|
|
total_pages_to_unuse = total_pages - target_pages;
|
|
|
return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
|
|
@@ -292,7 +318,7 @@ static int __frontswap_shrink(unsigned long target_pages,
|
|
|
void frontswap_shrink(unsigned long target_pages)
|
|
|
{
|
|
|
unsigned long pages_to_unuse = 0;
|
|
|
- int type, ret;
|
|
|
+ int uninitialized_var(type), ret;
|
|
|
|
|
|
/*
|
|
|
* we don't want to hold swap_lock while doing a very
|
|
@@ -302,7 +328,7 @@ void frontswap_shrink(unsigned long target_pages)
|
|
|
spin_lock(&swap_lock);
|
|
|
ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
|
|
|
spin_unlock(&swap_lock);
|
|
|
- if (ret == 0 && pages_to_unuse)
|
|
|
+ if (ret == 0)
|
|
|
try_to_unuse(type, true, pages_to_unuse);
|
|
|
return;
|
|
|
}
|