tmem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /*
  2. * Xen implementation for transcendent memory (tmem)
  3. *
  4. * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
  5. * Author: Dan Magenheimer
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/init.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/cleancache.h>
  13. #include <linux/frontswap.h>
  14. #include <xen/xen.h>
  15. #include <xen/interface/xen.h>
  16. #include <asm/xen/hypercall.h>
  17. #include <asm/xen/page.h>
  18. #include <asm/xen/hypervisor.h>
  19. #include <xen/tmem.h>
  20. #ifndef CONFIG_XEN_TMEM_MODULE
  21. bool __read_mostly tmem_enabled = false;
  22. static int __init enable_tmem(char *s)
  23. {
  24. tmem_enabled = true;
  25. return 1;
  26. }
  27. __setup("tmem", enable_tmem);
  28. #endif
  29. #ifdef CONFIG_CLEANCACHE
  30. static bool disable_cleancache __read_mostly;
  31. static bool disable_selfballooning __read_mostly;
  32. #ifdef CONFIG_XEN_TMEM_MODULE
  33. module_param(disable_cleancache, bool, S_IRUGO);
  34. module_param(disable_selfballooning, bool, S_IRUGO);
  35. #else
  36. static int __init no_cleancache(char *s)
  37. {
  38. disable_cleancache = true;
  39. return 1;
  40. }
  41. __setup("nocleancache", no_cleancache);
  42. #endif
  43. #endif /* CONFIG_CLEANCACHE */
  44. #ifdef CONFIG_FRONTSWAP
  45. static bool disable_frontswap __read_mostly;
  46. #ifdef CONFIG_XEN_TMEM_MODULE
  47. module_param(disable_frontswap, bool, S_IRUGO);
  48. #else
  49. static int __init no_frontswap(char *s)
  50. {
  51. disable_frontswap = true;
  52. return 1;
  53. }
  54. __setup("nofrontswap", no_frontswap);
  55. #endif
  56. #endif /* CONFIG_FRONTSWAP */
  57. #ifdef CONFIG_FRONTSWAP
  58. static bool disable_frontswap_selfshrinking __read_mostly;
  59. #ifdef CONFIG_XEN_TMEM_MODULE
  60. module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
  61. #else
  62. #define disable_frontswap_selfshrinking 1
  63. #endif
  64. #endif /* CONFIG_FRONTSWAP */
  65. #define TMEM_CONTROL 0
  66. #define TMEM_NEW_POOL 1
  67. #define TMEM_DESTROY_POOL 2
  68. #define TMEM_NEW_PAGE 3
  69. #define TMEM_PUT_PAGE 4
  70. #define TMEM_GET_PAGE 5
  71. #define TMEM_FLUSH_PAGE 6
  72. #define TMEM_FLUSH_OBJECT 7
  73. #define TMEM_READ 8
  74. #define TMEM_WRITE 9
  75. #define TMEM_XCHG 10
  76. /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
  77. #define TMEM_POOL_PERSIST 1
  78. #define TMEM_POOL_SHARED 2
  79. #define TMEM_POOL_PAGESIZE_SHIFT 4
  80. #define TMEM_VERSION_SHIFT 24
  81. struct tmem_pool_uuid {
  82. u64 uuid_lo;
  83. u64 uuid_hi;
  84. };
  85. struct tmem_oid {
  86. u64 oid[3];
  87. };
  88. #define TMEM_POOL_PRIVATE_UUID { 0, 0 }
  89. /* flags for tmem_ops.new_pool */
  90. #define TMEM_POOL_PERSIST 1
  91. #define TMEM_POOL_SHARED 2
  92. /* xen tmem foundation ops/hypercalls */
  93. static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
  94. u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
  95. {
  96. struct tmem_op op;
  97. int rc = 0;
  98. op.cmd = tmem_cmd;
  99. op.pool_id = tmem_pool;
  100. op.u.gen.oid[0] = oid.oid[0];
  101. op.u.gen.oid[1] = oid.oid[1];
  102. op.u.gen.oid[2] = oid.oid[2];
  103. op.u.gen.index = index;
  104. op.u.gen.tmem_offset = tmem_offset;
  105. op.u.gen.pfn_offset = pfn_offset;
  106. op.u.gen.len = len;
  107. set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
  108. rc = HYPERVISOR_tmem_op(&op);
  109. return rc;
  110. }
  111. static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
  112. u32 flags, unsigned long pagesize)
  113. {
  114. struct tmem_op op;
  115. int rc = 0, pageshift;
  116. for (pageshift = 0; pagesize != 1; pageshift++)
  117. pagesize >>= 1;
  118. flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
  119. flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
  120. op.cmd = TMEM_NEW_POOL;
  121. op.u.new.uuid[0] = uuid.uuid_lo;
  122. op.u.new.uuid[1] = uuid.uuid_hi;
  123. op.u.new.flags = flags;
  124. rc = HYPERVISOR_tmem_op(&op);
  125. return rc;
  126. }
  127. /* xen generic tmem ops */
  128. static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
  129. u32 index, unsigned long pfn)
  130. {
  131. unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
  132. return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
  133. gmfn, 0, 0, 0);
  134. }
  135. static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
  136. u32 index, unsigned long pfn)
  137. {
  138. unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
  139. return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
  140. gmfn, 0, 0, 0);
  141. }
  142. static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
  143. {
  144. return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
  145. 0, 0, 0, 0);
  146. }
  147. static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
  148. {
  149. return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
  150. }
  151. #ifdef CONFIG_CLEANCACHE
  152. static int xen_tmem_destroy_pool(u32 pool_id)
  153. {
  154. struct tmem_oid oid = { { 0 } };
  155. return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
  156. }
  157. /* cleancache ops */
  158. static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
  159. pgoff_t index, struct page *page)
  160. {
  161. u32 ind = (u32) index;
  162. struct tmem_oid oid = *(struct tmem_oid *)&key;
  163. unsigned long pfn = page_to_pfn(page);
  164. if (pool < 0)
  165. return;
  166. if (ind != index)
  167. return;
  168. mb(); /* ensure page is quiescent; tmem may address it with an alias */
  169. (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
  170. }
  171. static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
  172. pgoff_t index, struct page *page)
  173. {
  174. u32 ind = (u32) index;
  175. struct tmem_oid oid = *(struct tmem_oid *)&key;
  176. unsigned long pfn = page_to_pfn(page);
  177. int ret;
  178. /* translate return values to linux semantics */
  179. if (pool < 0)
  180. return -1;
  181. if (ind != index)
  182. return -1;
  183. ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
  184. if (ret == 1)
  185. return 0;
  186. else
  187. return -1;
  188. }
  189. static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
  190. pgoff_t index)
  191. {
  192. u32 ind = (u32) index;
  193. struct tmem_oid oid = *(struct tmem_oid *)&key;
  194. if (pool < 0)
  195. return;
  196. if (ind != index)
  197. return;
  198. (void)xen_tmem_flush_page((u32)pool, oid, ind);
  199. }
  200. static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
  201. {
  202. struct tmem_oid oid = *(struct tmem_oid *)&key;
  203. if (pool < 0)
  204. return;
  205. (void)xen_tmem_flush_object((u32)pool, oid);
  206. }
  207. static void tmem_cleancache_flush_fs(int pool)
  208. {
  209. if (pool < 0)
  210. return;
  211. (void)xen_tmem_destroy_pool((u32)pool);
  212. }
  213. static int tmem_cleancache_init_fs(size_t pagesize)
  214. {
  215. struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
  216. return xen_tmem_new_pool(uuid_private, 0, pagesize);
  217. }
  218. static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
  219. {
  220. struct tmem_pool_uuid shared_uuid;
  221. shared_uuid.uuid_lo = *(u64 *)uuid;
  222. shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
  223. return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
  224. }
  225. static struct cleancache_ops tmem_cleancache_ops = {
  226. .put_page = tmem_cleancache_put_page,
  227. .get_page = tmem_cleancache_get_page,
  228. .invalidate_page = tmem_cleancache_flush_page,
  229. .invalidate_inode = tmem_cleancache_flush_inode,
  230. .invalidate_fs = tmem_cleancache_flush_fs,
  231. .init_shared_fs = tmem_cleancache_init_shared_fs,
  232. .init_fs = tmem_cleancache_init_fs
  233. };
  234. #endif
  235. #ifdef CONFIG_FRONTSWAP
  236. /* frontswap tmem operations */
  237. /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
  238. static int tmem_frontswap_poolid;
  239. /*
  240. * Swizzling increases objects per swaptype, increasing tmem concurrency
  241. * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
  242. */
  243. #define SWIZ_BITS 4
  244. #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
  245. #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
  246. #define iswiz(_ind) (_ind >> SWIZ_BITS)
  247. static inline struct tmem_oid oswiz(unsigned type, u32 ind)
  248. {
  249. struct tmem_oid oid = { .oid = { 0 } };
  250. oid.oid[0] = _oswiz(type, ind);
  251. return oid;
  252. }
  253. /* returns 0 if the page was successfully put into frontswap, -1 if not */
  254. static int tmem_frontswap_store(unsigned type, pgoff_t offset,
  255. struct page *page)
  256. {
  257. u64 ind64 = (u64)offset;
  258. u32 ind = (u32)offset;
  259. unsigned long pfn = page_to_pfn(page);
  260. int pool = tmem_frontswap_poolid;
  261. int ret;
  262. if (pool < 0)
  263. return -1;
  264. if (ind64 != ind)
  265. return -1;
  266. mb(); /* ensure page is quiescent; tmem may address it with an alias */
  267. ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
  268. /* translate Xen tmem return values to linux semantics */
  269. if (ret == 1)
  270. return 0;
  271. else
  272. return -1;
  273. }
  274. /*
  275. * returns 0 if the page was successfully gotten from frontswap, -1 if
  276. * was not present (should never happen!)
  277. */
  278. static int tmem_frontswap_load(unsigned type, pgoff_t offset,
  279. struct page *page)
  280. {
  281. u64 ind64 = (u64)offset;
  282. u32 ind = (u32)offset;
  283. unsigned long pfn = page_to_pfn(page);
  284. int pool = tmem_frontswap_poolid;
  285. int ret;
  286. if (pool < 0)
  287. return -1;
  288. if (ind64 != ind)
  289. return -1;
  290. ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
  291. /* translate Xen tmem return values to linux semantics */
  292. if (ret == 1)
  293. return 0;
  294. else
  295. return -1;
  296. }
  297. /* flush a single page from frontswap */
  298. static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
  299. {
  300. u64 ind64 = (u64)offset;
  301. u32 ind = (u32)offset;
  302. int pool = tmem_frontswap_poolid;
  303. if (pool < 0)
  304. return;
  305. if (ind64 != ind)
  306. return;
  307. (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
  308. }
  309. /* flush all pages from the passed swaptype */
  310. static void tmem_frontswap_flush_area(unsigned type)
  311. {
  312. int pool = tmem_frontswap_poolid;
  313. int ind;
  314. if (pool < 0)
  315. return;
  316. for (ind = SWIZ_MASK; ind >= 0; ind--)
  317. (void)xen_tmem_flush_object(pool, oswiz(type, ind));
  318. }
  319. static void tmem_frontswap_init(unsigned ignored)
  320. {
  321. struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
  322. /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
  323. if (tmem_frontswap_poolid < 0)
  324. tmem_frontswap_poolid =
  325. xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
  326. }
  327. static struct frontswap_ops tmem_frontswap_ops = {
  328. .store = tmem_frontswap_store,
  329. .load = tmem_frontswap_load,
  330. .invalidate_page = tmem_frontswap_flush_page,
  331. .invalidate_area = tmem_frontswap_flush_area,
  332. .init = tmem_frontswap_init
  333. };
  334. #endif
  335. static int xen_tmem_init(void)
  336. {
  337. if (!xen_domain())
  338. return 0;
  339. #ifdef CONFIG_FRONTSWAP
  340. if (tmem_enabled && !disable_frontswap) {
  341. char *s = "";
  342. struct frontswap_ops *old_ops =
  343. frontswap_register_ops(&tmem_frontswap_ops);
  344. tmem_frontswap_poolid = -1;
  345. if (IS_ERR(old_ops) || old_ops) {
  346. if (IS_ERR(old_ops))
  347. return PTR_ERR(old_ops);
  348. s = " (WARNING: frontswap_ops overridden)";
  349. }
  350. printk(KERN_INFO "frontswap enabled, RAM provided by "
  351. "Xen Transcendent Memory%s\n", s);
  352. }
  353. #endif
  354. #ifdef CONFIG_CLEANCACHE
  355. BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
  356. if (tmem_enabled && !disable_cleancache) {
  357. char *s = "";
  358. struct cleancache_ops *old_ops =
  359. cleancache_register_ops(&tmem_cleancache_ops);
  360. if (old_ops)
  361. s = " (WARNING: cleancache_ops overridden)";
  362. printk(KERN_INFO "cleancache enabled, RAM provided by "
  363. "Xen Transcendent Memory%s\n", s);
  364. }
  365. #endif
  366. #ifdef CONFIG_XEN_SELFBALLOONING
  367. xen_selfballoon_init(!disable_selfballooning,
  368. !disable_frontswap_selfshrinking);
  369. #endif
  370. return 0;
  371. }
  372. module_init(xen_tmem_init)
  373. MODULE_LICENSE("GPL");
  374. MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
  375. MODULE_DESCRIPTION("Shim to Xen transcendent memory");