tmem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /*
  2. * Xen implementation for transcendent memory (tmem)
  3. *
  4. * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
  5. * Author: Dan Magenheimer
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/init.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/cleancache.h>
  13. #include <linux/frontswap.h>
  14. #include <xen/xen.h>
  15. #include <xen/interface/xen.h>
  16. #include <asm/xen/hypercall.h>
  17. #include <asm/xen/page.h>
  18. #include <asm/xen/hypervisor.h>
  19. #include <xen/tmem.h>
  20. #ifndef CONFIG_XEN_TMEM_MODULE
  21. bool __read_mostly tmem_enabled = false;
  22. static int __init enable_tmem(char *s)
  23. {
  24. tmem_enabled = true;
  25. return 1;
  26. }
  27. __setup("tmem", enable_tmem);
  28. #endif
  29. #ifdef CONFIG_CLEANCACHE
  30. static bool disable_cleancache __read_mostly;
  31. static bool disable_selfballooning __read_mostly;
  32. #ifdef CONFIG_XEN_TMEM_MODULE
  33. module_param(disable_cleancache, bool, S_IRUGO);
  34. module_param(disable_selfballooning, bool, S_IRUGO);
  35. #else
  36. static int __init no_cleancache(char *s)
  37. {
  38. disable_cleancache = true;
  39. return 1;
  40. }
  41. __setup("nocleancache", no_cleancache);
  42. #endif
  43. #endif /* CONFIG_CLEANCACHE */
  44. #ifdef CONFIG_FRONTSWAP
  45. static bool disable_frontswap __read_mostly;
  46. #ifdef CONFIG_XEN_TMEM_MODULE
  47. module_param(disable_frontswap, bool, S_IRUGO);
  48. #else
  49. static int __init no_frontswap(char *s)
  50. {
  51. disable_frontswap = true;
  52. return 1;
  53. }
  54. __setup("nofrontswap", no_frontswap);
  55. #endif
  56. #endif /* CONFIG_FRONTSWAP */
  57. #ifdef CONFIG_XEN_SELFBALLOONING
  58. static bool disable_frontswap_selfshrinking __read_mostly;
  59. #ifdef CONFIG_XEN_TMEM_MODULE
  60. module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
  61. #endif
  62. #endif /* CONFIG_XEN_SELFBALLOONING */
  63. #define TMEM_CONTROL 0
  64. #define TMEM_NEW_POOL 1
  65. #define TMEM_DESTROY_POOL 2
  66. #define TMEM_NEW_PAGE 3
  67. #define TMEM_PUT_PAGE 4
  68. #define TMEM_GET_PAGE 5
  69. #define TMEM_FLUSH_PAGE 6
  70. #define TMEM_FLUSH_OBJECT 7
  71. #define TMEM_READ 8
  72. #define TMEM_WRITE 9
  73. #define TMEM_XCHG 10
  74. /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
  75. #define TMEM_POOL_PERSIST 1
  76. #define TMEM_POOL_SHARED 2
  77. #define TMEM_POOL_PAGESIZE_SHIFT 4
  78. #define TMEM_VERSION_SHIFT 24
  79. struct tmem_pool_uuid {
  80. u64 uuid_lo;
  81. u64 uuid_hi;
  82. };
  83. struct tmem_oid {
  84. u64 oid[3];
  85. };
  86. #define TMEM_POOL_PRIVATE_UUID { 0, 0 }
  87. /* flags for tmem_ops.new_pool */
  88. #define TMEM_POOL_PERSIST 1
  89. #define TMEM_POOL_SHARED 2
  90. /* xen tmem foundation ops/hypercalls */
  91. static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
  92. u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
  93. {
  94. struct tmem_op op;
  95. int rc = 0;
  96. op.cmd = tmem_cmd;
  97. op.pool_id = tmem_pool;
  98. op.u.gen.oid[0] = oid.oid[0];
  99. op.u.gen.oid[1] = oid.oid[1];
  100. op.u.gen.oid[2] = oid.oid[2];
  101. op.u.gen.index = index;
  102. op.u.gen.tmem_offset = tmem_offset;
  103. op.u.gen.pfn_offset = pfn_offset;
  104. op.u.gen.len = len;
  105. set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
  106. rc = HYPERVISOR_tmem_op(&op);
  107. return rc;
  108. }
  109. static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
  110. u32 flags, unsigned long pagesize)
  111. {
  112. struct tmem_op op;
  113. int rc = 0, pageshift;
  114. for (pageshift = 0; pagesize != 1; pageshift++)
  115. pagesize >>= 1;
  116. flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
  117. flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
  118. op.cmd = TMEM_NEW_POOL;
  119. op.u.new.uuid[0] = uuid.uuid_lo;
  120. op.u.new.uuid[1] = uuid.uuid_hi;
  121. op.u.new.flags = flags;
  122. rc = HYPERVISOR_tmem_op(&op);
  123. return rc;
  124. }
  125. /* xen generic tmem ops */
  126. static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
  127. u32 index, unsigned long pfn)
  128. {
  129. unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
  130. return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
  131. gmfn, 0, 0, 0);
  132. }
  133. static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
  134. u32 index, unsigned long pfn)
  135. {
  136. unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
  137. return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
  138. gmfn, 0, 0, 0);
  139. }
  140. static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
  141. {
  142. return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
  143. 0, 0, 0, 0);
  144. }
  145. static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
  146. {
  147. return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
  148. }
  149. #ifdef CONFIG_CLEANCACHE
  150. static int xen_tmem_destroy_pool(u32 pool_id)
  151. {
  152. struct tmem_oid oid = { { 0 } };
  153. return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
  154. }
  155. /* cleancache ops */
  156. static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
  157. pgoff_t index, struct page *page)
  158. {
  159. u32 ind = (u32) index;
  160. struct tmem_oid oid = *(struct tmem_oid *)&key;
  161. unsigned long pfn = page_to_pfn(page);
  162. if (pool < 0)
  163. return;
  164. if (ind != index)
  165. return;
  166. mb(); /* ensure page is quiescent; tmem may address it with an alias */
  167. (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
  168. }
  169. static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
  170. pgoff_t index, struct page *page)
  171. {
  172. u32 ind = (u32) index;
  173. struct tmem_oid oid = *(struct tmem_oid *)&key;
  174. unsigned long pfn = page_to_pfn(page);
  175. int ret;
  176. /* translate return values to linux semantics */
  177. if (pool < 0)
  178. return -1;
  179. if (ind != index)
  180. return -1;
  181. ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
  182. if (ret == 1)
  183. return 0;
  184. else
  185. return -1;
  186. }
  187. static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
  188. pgoff_t index)
  189. {
  190. u32 ind = (u32) index;
  191. struct tmem_oid oid = *(struct tmem_oid *)&key;
  192. if (pool < 0)
  193. return;
  194. if (ind != index)
  195. return;
  196. (void)xen_tmem_flush_page((u32)pool, oid, ind);
  197. }
  198. static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
  199. {
  200. struct tmem_oid oid = *(struct tmem_oid *)&key;
  201. if (pool < 0)
  202. return;
  203. (void)xen_tmem_flush_object((u32)pool, oid);
  204. }
  205. static void tmem_cleancache_flush_fs(int pool)
  206. {
  207. if (pool < 0)
  208. return;
  209. (void)xen_tmem_destroy_pool((u32)pool);
  210. }
  211. static int tmem_cleancache_init_fs(size_t pagesize)
  212. {
  213. struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
  214. return xen_tmem_new_pool(uuid_private, 0, pagesize);
  215. }
  216. static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
  217. {
  218. struct tmem_pool_uuid shared_uuid;
  219. shared_uuid.uuid_lo = *(u64 *)uuid;
  220. shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
  221. return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
  222. }
  223. static struct cleancache_ops tmem_cleancache_ops = {
  224. .put_page = tmem_cleancache_put_page,
  225. .get_page = tmem_cleancache_get_page,
  226. .invalidate_page = tmem_cleancache_flush_page,
  227. .invalidate_inode = tmem_cleancache_flush_inode,
  228. .invalidate_fs = tmem_cleancache_flush_fs,
  229. .init_shared_fs = tmem_cleancache_init_shared_fs,
  230. .init_fs = tmem_cleancache_init_fs
  231. };
  232. #endif
  233. #ifdef CONFIG_FRONTSWAP
  234. /* frontswap tmem operations */
  235. /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
  236. static int tmem_frontswap_poolid;
  237. /*
  238. * Swizzling increases objects per swaptype, increasing tmem concurrency
  239. * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
  240. */
  241. #define SWIZ_BITS 4
  242. #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
  243. #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
  244. #define iswiz(_ind) (_ind >> SWIZ_BITS)
  245. static inline struct tmem_oid oswiz(unsigned type, u32 ind)
  246. {
  247. struct tmem_oid oid = { .oid = { 0 } };
  248. oid.oid[0] = _oswiz(type, ind);
  249. return oid;
  250. }
  251. /* returns 0 if the page was successfully put into frontswap, -1 if not */
  252. static int tmem_frontswap_store(unsigned type, pgoff_t offset,
  253. struct page *page)
  254. {
  255. u64 ind64 = (u64)offset;
  256. u32 ind = (u32)offset;
  257. unsigned long pfn = page_to_pfn(page);
  258. int pool = tmem_frontswap_poolid;
  259. int ret;
  260. if (pool < 0)
  261. return -1;
  262. if (ind64 != ind)
  263. return -1;
  264. mb(); /* ensure page is quiescent; tmem may address it with an alias */
  265. ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
  266. /* translate Xen tmem return values to linux semantics */
  267. if (ret == 1)
  268. return 0;
  269. else
  270. return -1;
  271. }
  272. /*
  273. * returns 0 if the page was successfully gotten from frontswap, -1 if
  274. * was not present (should never happen!)
  275. */
  276. static int tmem_frontswap_load(unsigned type, pgoff_t offset,
  277. struct page *page)
  278. {
  279. u64 ind64 = (u64)offset;
  280. u32 ind = (u32)offset;
  281. unsigned long pfn = page_to_pfn(page);
  282. int pool = tmem_frontswap_poolid;
  283. int ret;
  284. if (pool < 0)
  285. return -1;
  286. if (ind64 != ind)
  287. return -1;
  288. ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
  289. /* translate Xen tmem return values to linux semantics */
  290. if (ret == 1)
  291. return 0;
  292. else
  293. return -1;
  294. }
  295. /* flush a single page from frontswap */
  296. static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
  297. {
  298. u64 ind64 = (u64)offset;
  299. u32 ind = (u32)offset;
  300. int pool = tmem_frontswap_poolid;
  301. if (pool < 0)
  302. return;
  303. if (ind64 != ind)
  304. return;
  305. (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
  306. }
  307. /* flush all pages from the passed swaptype */
  308. static void tmem_frontswap_flush_area(unsigned type)
  309. {
  310. int pool = tmem_frontswap_poolid;
  311. int ind;
  312. if (pool < 0)
  313. return;
  314. for (ind = SWIZ_MASK; ind >= 0; ind--)
  315. (void)xen_tmem_flush_object(pool, oswiz(type, ind));
  316. }
  317. static void tmem_frontswap_init(unsigned ignored)
  318. {
  319. struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
  320. /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
  321. if (tmem_frontswap_poolid < 0)
  322. tmem_frontswap_poolid =
  323. xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
  324. }
  325. static struct frontswap_ops tmem_frontswap_ops = {
  326. .store = tmem_frontswap_store,
  327. .load = tmem_frontswap_load,
  328. .invalidate_page = tmem_frontswap_flush_page,
  329. .invalidate_area = tmem_frontswap_flush_area,
  330. .init = tmem_frontswap_init
  331. };
  332. #endif
  333. static int xen_tmem_init(void)
  334. {
  335. if (!xen_domain())
  336. return 0;
  337. #ifdef CONFIG_FRONTSWAP
  338. if (tmem_enabled && !disable_frontswap) {
  339. char *s = "";
  340. struct frontswap_ops *old_ops =
  341. frontswap_register_ops(&tmem_frontswap_ops);
  342. tmem_frontswap_poolid = -1;
  343. if (IS_ERR(old_ops) || old_ops) {
  344. if (IS_ERR(old_ops))
  345. return PTR_ERR(old_ops);
  346. s = " (WARNING: frontswap_ops overridden)";
  347. }
  348. printk(KERN_INFO "frontswap enabled, RAM provided by "
  349. "Xen Transcendent Memory%s\n", s);
  350. }
  351. #endif
  352. #ifdef CONFIG_CLEANCACHE
  353. BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
  354. if (tmem_enabled && !disable_cleancache) {
  355. char *s = "";
  356. struct cleancache_ops *old_ops =
  357. cleancache_register_ops(&tmem_cleancache_ops);
  358. if (old_ops)
  359. s = " (WARNING: cleancache_ops overridden)";
  360. printk(KERN_INFO "cleancache enabled, RAM provided by "
  361. "Xen Transcendent Memory%s\n", s);
  362. }
  363. #endif
  364. #ifdef CONFIG_XEN_SELFBALLOONING
  365. xen_selfballoon_init(!disable_selfballooning,
  366. !disable_frontswap_selfshrinking);
  367. #endif
  368. return 0;
  369. }
  370. module_init(xen_tmem_init)
  371. MODULE_LICENSE("GPL");
  372. MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
  373. MODULE_DESCRIPTION("Shim to Xen transcendent memory");