|
@@ -11,11 +11,7 @@
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/pagemap.h>
|
|
|
#include <linux/cleancache.h>
|
|
|
-
|
|
|
-/* temporary ifdef until include/linux/frontswap.h is upstream */
|
|
|
-#ifdef CONFIG_FRONTSWAP
|
|
|
#include <linux/frontswap.h>
|
|
|
-#endif
|
|
|
|
|
|
#include <xen/xen.h>
|
|
|
#include <xen/interface/xen.h>
|
|
@@ -24,6 +20,34 @@
|
|
|
#include <asm/xen/hypervisor.h>
|
|
|
#include <xen/tmem.h>
|
|
|
|
|
|
+#ifndef CONFIG_XEN_TMEM_MODULE
|
|
|
+bool __read_mostly tmem_enabled = false;
|
|
|
+
|
|
|
+static int __init enable_tmem(char *s)
|
|
|
+{
|
|
|
+ tmem_enabled = true;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+__setup("tmem", enable_tmem);
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_CLEANCACHE
|
|
|
+static bool cleancache __read_mostly = true;
|
|
|
+module_param(cleancache, bool, S_IRUGO);
|
|
|
+static bool selfballooning __read_mostly = true;
|
|
|
+module_param(selfballooning, bool, S_IRUGO);
|
|
|
+#endif /* CONFIG_CLEANCACHE */
|
|
|
+
|
|
|
+#ifdef CONFIG_FRONTSWAP
|
|
|
+static bool frontswap __read_mostly = true;
|
|
|
+module_param(frontswap, bool, S_IRUGO);
|
|
|
+#endif /* CONFIG_FRONTSWAP */
|
|
|
+
|
|
|
+#ifdef CONFIG_XEN_SELFBALLOONING
|
|
|
+static bool selfshrinking __read_mostly = true;
|
|
|
+module_param(selfshrinking, bool, S_IRUGO);
|
|
|
+#endif /* CONFIG_XEN_SELFBALLOONING */
|
|
|
+
|
|
|
#define TMEM_CONTROL 0
|
|
|
#define TMEM_NEW_POOL 1
|
|
|
#define TMEM_DESTROY_POOL 2
|
|
@@ -129,16 +153,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
|
|
|
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
-#ifndef CONFIG_XEN_TMEM_MODULE
|
|
|
-bool __read_mostly tmem_enabled = false;
|
|
|
-
|
|
|
-static int __init enable_tmem(char *s)
|
|
|
-{
|
|
|
- tmem_enabled = true;
|
|
|
- return 1;
|
|
|
-}
|
|
|
-__setup("tmem", enable_tmem);
|
|
|
-#endif
|
|
|
|
|
|
#ifdef CONFIG_CLEANCACHE
|
|
|
static int xen_tmem_destroy_pool(u32 pool_id)
|
|
@@ -230,20 +244,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
|
|
|
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
|
|
|
}
|
|
|
|
|
|
-static bool disable_cleancache __read_mostly;
|
|
|
-static bool disable_selfballooning __read_mostly;
|
|
|
-#ifdef CONFIG_XEN_TMEM_MODULE
|
|
|
-module_param(disable_cleancache, bool, S_IRUGO);
|
|
|
-module_param(disable_selfballooning, bool, S_IRUGO);
|
|
|
-#else
|
|
|
-static int __init no_cleancache(char *s)
|
|
|
-{
|
|
|
- disable_cleancache = true;
|
|
|
- return 1;
|
|
|
-}
|
|
|
-__setup("nocleancache", no_cleancache);
|
|
|
-#endif
|
|
|
-
|
|
|
static struct cleancache_ops tmem_cleancache_ops = {
|
|
|
.put_page = tmem_cleancache_put_page,
|
|
|
.get_page = tmem_cleancache_get_page,
|
|
@@ -361,20 +361,6 @@ static void tmem_frontswap_init(unsigned ignored)
|
|
|
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
|
|
|
}
|
|
|
|
|
|
-static bool disable_frontswap __read_mostly;
|
|
|
-static bool disable_frontswap_selfshrinking __read_mostly;
|
|
|
-#ifdef CONFIG_XEN_TMEM_MODULE
|
|
|
-module_param(disable_frontswap, bool, S_IRUGO);
|
|
|
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
|
|
|
-#else
|
|
|
-static int __init no_frontswap(char *s)
|
|
|
-{
|
|
|
- disable_frontswap = true;
|
|
|
- return 1;
|
|
|
-}
|
|
|
-__setup("nofrontswap", no_frontswap);
|
|
|
-#endif
|
|
|
-
|
|
|
static struct frontswap_ops tmem_frontswap_ops = {
|
|
|
.store = tmem_frontswap_store,
|
|
|
.load = tmem_frontswap_load,
|
|
@@ -382,8 +368,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
|
|
|
.invalidate_area = tmem_frontswap_flush_area,
|
|
|
.init = tmem_frontswap_init
|
|
|
};
|
|
|
-#else /* CONFIG_FRONTSWAP */
|
|
|
-#define disable_frontswap_selfshrinking 1
|
|
|
#endif
|
|
|
|
|
|
static int xen_tmem_init(void)
|
|
@@ -391,7 +375,7 @@ static int xen_tmem_init(void)
|
|
|
if (!xen_domain())
|
|
|
return 0;
|
|
|
#ifdef CONFIG_FRONTSWAP
|
|
|
- if (tmem_enabled && !disable_frontswap) {
|
|
|
+ if (tmem_enabled && frontswap) {
|
|
|
char *s = "";
|
|
|
struct frontswap_ops *old_ops =
|
|
|
frontswap_register_ops(&tmem_frontswap_ops);
|
|
@@ -408,7 +392,7 @@ static int xen_tmem_init(void)
|
|
|
#endif
|
|
|
#ifdef CONFIG_CLEANCACHE
|
|
|
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
|
|
|
- if (tmem_enabled && !disable_cleancache) {
|
|
|
+ if (tmem_enabled && cleancache) {
|
|
|
char *s = "";
|
|
|
struct cleancache_ops *old_ops =
|
|
|
cleancache_register_ops(&tmem_cleancache_ops);
|
|
@@ -419,8 +403,15 @@ static int xen_tmem_init(void)
|
|
|
}
|
|
|
#endif
|
|
|
#ifdef CONFIG_XEN_SELFBALLOONING
|
|
|
- xen_selfballoon_init(!disable_selfballooning,
|
|
|
- !disable_frontswap_selfshrinking);
|
|
|
+ /*
|
|
|
+ * There is no point of driving pages to the swap system if they
|
|
|
+ * aren't going anywhere in tmem universe.
|
|
|
+ */
|
|
|
+ if (!frontswap) {
|
|
|
+ selfshrinking = false;
|
|
|
+ selfballooning = false;
|
|
|
+ }
|
|
|
+ xen_selfballoon_init(selfballooning, selfshrinking);
|
|
|
#endif
|
|
|
return 0;
|
|
|
}
|