virtio_balloon.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /*
  2. * Virtio balloon implementation, inspired by Dor Laor and Marcelo
  3. * Tosatti's implementations.
  4. *
  5. * Copyright 2008 Rusty Russell IBM Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <linux/virtio.h>
  22. #include <linux/virtio_balloon.h>
  23. #include <linux/swap.h>
  24. #include <linux/kthread.h>
  25. #include <linux/freezer.h>
  26. #include <linux/delay.h>
  27. #include <linux/slab.h>
  28. #include <linux/module.h>
  29. #include <linux/balloon_compaction.h>
  30. /*
  31. * Balloon device works in 4K page units. So each page is pointed to by
  32. * multiple balloon pages. All memory counters in this driver are in balloon
  33. * page units.
  34. */
  35. #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
  36. #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
  37. struct virtio_balloon
  38. {
  39. struct virtio_device *vdev;
  40. struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
  41. /* Where the ballooning thread waits for config to change. */
  42. wait_queue_head_t config_change;
  43. /* The thread servicing the balloon. */
  44. struct task_struct *thread;
  45. /* Waiting for host to ack the pages we released. */
  46. wait_queue_head_t acked;
  47. /* Number of balloon pages we've told the Host we're not using. */
  48. unsigned int num_pages;
  49. /*
  50. * The pages we've told the Host we're not using are enqueued
  51. * at vb_dev_info->pages list.
  52. * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
  53. * to num_pages above.
  54. */
  55. struct balloon_dev_info *vb_dev_info;
  56. /* Synchronize access/update to this struct virtio_balloon elements */
  57. struct mutex balloon_lock;
  58. /* The array of pfns we tell the Host about. */
  59. unsigned int num_pfns;
  60. u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
  61. /* Memory statistics */
  62. int need_stats_update;
  63. struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
  64. };
  65. static struct virtio_device_id id_table[] = {
  66. { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
  67. { 0 },
  68. };
  69. static u32 page_to_balloon_pfn(struct page *page)
  70. {
  71. unsigned long pfn = page_to_pfn(page);
  72. BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT);
  73. /* Convert pfn from Linux page size to balloon page size. */
  74. return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
  75. }
  76. static struct page *balloon_pfn_to_page(u32 pfn)
  77. {
  78. BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
  79. return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
  80. }
  81. static void balloon_ack(struct virtqueue *vq)
  82. {
  83. struct virtio_balloon *vb = vq->vdev->priv;
  84. wake_up(&vb->acked);
  85. }
  86. static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
  87. {
  88. struct scatterlist sg;
  89. unsigned int len;
  90. sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
  91. /* We should always be able to add one buffer to an empty queue. */
  92. if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
  93. BUG();
  94. virtqueue_kick(vq);
  95. /* When host has read buffer, this completes via balloon_ack */
  96. wait_event(vb->acked, virtqueue_get_buf(vq, &len));
  97. }
  98. static void set_page_pfns(u32 pfns[], struct page *page)
  99. {
  100. unsigned int i;
  101. /* Set balloon pfns pointing at this page.
  102. * Note that the first pfn points at start of the page. */
  103. for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
  104. pfns[i] = page_to_balloon_pfn(page) + i;
  105. }
  106. static void fill_balloon(struct virtio_balloon *vb, size_t num)
  107. {
  108. struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
  109. /* We can only do one array worth at a time. */
  110. num = min(num, ARRAY_SIZE(vb->pfns));
  111. mutex_lock(&vb->balloon_lock);
  112. for (vb->num_pfns = 0; vb->num_pfns < num;
  113. vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  114. struct page *page = balloon_page_enqueue(vb_dev_info);
  115. if (!page) {
  116. dev_info_ratelimited(&vb->vdev->dev,
  117. "Out of puff! Can't get %u pages\n",
  118. VIRTIO_BALLOON_PAGES_PER_PAGE);
  119. /* Sleep for at least 1/5 of a second before retry. */
  120. msleep(200);
  121. break;
  122. }
  123. set_page_pfns(vb->pfns + vb->num_pfns, page);
  124. vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
  125. totalram_pages--;
  126. }
  127. /* Did we get any? */
  128. if (vb->num_pfns != 0)
  129. tell_host(vb, vb->inflate_vq);
  130. mutex_unlock(&vb->balloon_lock);
  131. }
  132. static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
  133. {
  134. unsigned int i;
  135. /* Find pfns pointing at start of each page, get pages and free them. */
  136. for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  137. balloon_page_free(balloon_pfn_to_page(pfns[i]));
  138. totalram_pages++;
  139. }
  140. }
  141. static void leak_balloon(struct virtio_balloon *vb, size_t num)
  142. {
  143. struct page *page;
  144. struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
  145. /* We can only do one array worth at a time. */
  146. num = min(num, ARRAY_SIZE(vb->pfns));
  147. mutex_lock(&vb->balloon_lock);
  148. for (vb->num_pfns = 0; vb->num_pfns < num;
  149. vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
  150. page = balloon_page_dequeue(vb_dev_info);
  151. if (!page)
  152. break;
  153. set_page_pfns(vb->pfns + vb->num_pfns, page);
  154. vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
  155. }
  156. /*
  157. * Note that if
  158. * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
  159. * is true, we *have* to do it in this order
  160. */
  161. tell_host(vb, vb->deflate_vq);
  162. mutex_unlock(&vb->balloon_lock);
  163. release_pages_by_pfn(vb->pfns, vb->num_pfns);
  164. }
  165. static inline void update_stat(struct virtio_balloon *vb, int idx,
  166. u16 tag, u64 val)
  167. {
  168. BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
  169. vb->stats[idx].tag = tag;
  170. vb->stats[idx].val = val;
  171. }
  172. #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
  173. static void update_balloon_stats(struct virtio_balloon *vb)
  174. {
  175. unsigned long events[NR_VM_EVENT_ITEMS];
  176. struct sysinfo i;
  177. int idx = 0;
  178. all_vm_events(events);
  179. si_meminfo(&i);
  180. update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
  181. pages_to_bytes(events[PSWPIN]));
  182. update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
  183. pages_to_bytes(events[PSWPOUT]));
  184. update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
  185. update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
  186. update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
  187. pages_to_bytes(i.freeram));
  188. update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
  189. pages_to_bytes(i.totalram));
  190. }
  191. /*
  192. * While most virtqueues communicate guest-initiated requests to the hypervisor,
  193. * the stats queue operates in reverse. The driver initializes the virtqueue
  194. * with a single buffer. From that point forward, all conversations consist of
  195. * a hypervisor request (a call to this function) which directs us to refill
  196. * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
  197. * we notify our kthread which does the actual work via stats_handle_request().
  198. */
  199. static void stats_request(struct virtqueue *vq)
  200. {
  201. struct virtio_balloon *vb = vq->vdev->priv;
  202. vb->need_stats_update = 1;
  203. wake_up(&vb->config_change);
  204. }
  205. static void stats_handle_request(struct virtio_balloon *vb)
  206. {
  207. struct virtqueue *vq;
  208. struct scatterlist sg;
  209. unsigned int len;
  210. vb->need_stats_update = 0;
  211. update_balloon_stats(vb);
  212. vq = vb->stats_vq;
  213. if (!virtqueue_get_buf(vq, &len))
  214. return;
  215. sg_init_one(&sg, vb->stats, sizeof(vb->stats));
  216. if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
  217. BUG();
  218. virtqueue_kick(vq);
  219. }
  220. static void virtballoon_changed(struct virtio_device *vdev)
  221. {
  222. struct virtio_balloon *vb = vdev->priv;
  223. wake_up(&vb->config_change);
  224. }
  225. static inline s64 towards_target(struct virtio_balloon *vb)
  226. {
  227. __le32 v;
  228. s64 target;
  229. vb->vdev->config->get(vb->vdev,
  230. offsetof(struct virtio_balloon_config, num_pages),
  231. &v, sizeof(v));
  232. target = le32_to_cpu(v);
  233. return target - vb->num_pages;
  234. }
  235. static void update_balloon_size(struct virtio_balloon *vb)
  236. {
  237. __le32 actual = cpu_to_le32(vb->num_pages);
  238. vb->vdev->config->set(vb->vdev,
  239. offsetof(struct virtio_balloon_config, actual),
  240. &actual, sizeof(actual));
  241. }
  242. static int balloon(void *_vballoon)
  243. {
  244. struct virtio_balloon *vb = _vballoon;
  245. set_freezable();
  246. while (!kthread_should_stop()) {
  247. s64 diff;
  248. try_to_freeze();
  249. wait_event_interruptible(vb->config_change,
  250. (diff = towards_target(vb)) != 0
  251. || vb->need_stats_update
  252. || kthread_should_stop()
  253. || freezing(current));
  254. if (vb->need_stats_update)
  255. stats_handle_request(vb);
  256. if (diff > 0)
  257. fill_balloon(vb, diff);
  258. else if (diff < 0)
  259. leak_balloon(vb, -diff);
  260. update_balloon_size(vb);
  261. }
  262. return 0;
  263. }
  264. static int init_vqs(struct virtio_balloon *vb)
  265. {
  266. struct virtqueue *vqs[3];
  267. vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
  268. const char *names[] = { "inflate", "deflate", "stats" };
  269. int err, nvqs;
  270. /*
  271. * We expect two virtqueues: inflate and deflate, and
  272. * optionally stat.
  273. */
  274. nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
  275. err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
  276. if (err)
  277. return err;
  278. vb->inflate_vq = vqs[0];
  279. vb->deflate_vq = vqs[1];
  280. if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
  281. struct scatterlist sg;
  282. vb->stats_vq = vqs[2];
  283. /*
  284. * Prime this virtqueue with one buffer so the hypervisor can
  285. * use it to signal us later.
  286. */
  287. sg_init_one(&sg, vb->stats, sizeof vb->stats);
  288. if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
  289. < 0)
  290. BUG();
  291. virtqueue_kick(vb->stats_vq);
  292. }
  293. return 0;
  294. }
  295. static const struct address_space_operations virtio_balloon_aops;
  296. #ifdef CONFIG_BALLOON_COMPACTION
  297. /*
  298. * virtballoon_migratepage - perform the balloon page migration on behalf of
  299. * a compation thread. (called under page lock)
  300. * @mapping: the page->mapping which will be assigned to the new migrated page.
  301. * @newpage: page that will replace the isolated page after migration finishes.
  302. * @page : the isolated (old) page that is about to be migrated to newpage.
  303. * @mode : compaction mode -- not used for balloon page migration.
  304. *
  305. * After a ballooned page gets isolated by compaction procedures, this is the
  306. * function that performs the page migration on behalf of a compaction thread
  307. * The page migration for virtio balloon is done in a simple swap fashion which
  308. * follows these two macro steps:
  309. * 1) insert newpage into vb->pages list and update the host about it;
  310. * 2) update the host about the old page removed from vb->pages list;
  311. *
  312. * This function preforms the balloon page migration task.
  313. * Called through balloon_mapping->a_ops->migratepage
  314. */
  315. int virtballoon_migratepage(struct address_space *mapping,
  316. struct page *newpage, struct page *page, enum migrate_mode mode)
  317. {
  318. struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
  319. struct virtio_balloon *vb;
  320. unsigned long flags;
  321. BUG_ON(!vb_dev_info);
  322. vb = vb_dev_info->balloon_device;
  323. /*
  324. * In order to avoid lock contention while migrating pages concurrently
  325. * to leak_balloon() or fill_balloon() we just give up the balloon_lock
  326. * this turn, as it is easier to retry the page migration later.
  327. * This also prevents fill_balloon() getting stuck into a mutex
  328. * recursion in the case it ends up triggering memory compaction
  329. * while it is attempting to inflate the ballon.
  330. */
  331. if (!mutex_trylock(&vb->balloon_lock))
  332. return -EAGAIN;
  333. /* balloon's page migration 1st step -- inflate "newpage" */
  334. spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
  335. balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
  336. vb_dev_info->isolated_pages--;
  337. spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
  338. vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
  339. set_page_pfns(vb->pfns, newpage);
  340. tell_host(vb, vb->inflate_vq);
  341. /*
  342. * balloon's page migration 2nd step -- deflate "page"
  343. *
  344. * It's safe to delete page->lru here because this page is at
  345. * an isolated migration list, and this step is expected to happen here
  346. */
  347. balloon_page_delete(page);
  348. vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
  349. set_page_pfns(vb->pfns, page);
  350. tell_host(vb, vb->deflate_vq);
  351. mutex_unlock(&vb->balloon_lock);
  352. return MIGRATEPAGE_BALLOON_SUCCESS;
  353. }
  354. /* define the balloon_mapping->a_ops callback to allow balloon page migration */
  355. static const struct address_space_operations virtio_balloon_aops = {
  356. .migratepage = virtballoon_migratepage,
  357. };
  358. #endif /* CONFIG_BALLOON_COMPACTION */
  359. static int virtballoon_probe(struct virtio_device *vdev)
  360. {
  361. struct virtio_balloon *vb;
  362. struct address_space *vb_mapping;
  363. struct balloon_dev_info *vb_devinfo;
  364. int err;
  365. vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
  366. if (!vb) {
  367. err = -ENOMEM;
  368. goto out;
  369. }
  370. vb->num_pages = 0;
  371. mutex_init(&vb->balloon_lock);
  372. init_waitqueue_head(&vb->config_change);
  373. init_waitqueue_head(&vb->acked);
  374. vb->vdev = vdev;
  375. vb->need_stats_update = 0;
  376. vb_devinfo = balloon_devinfo_alloc(vb);
  377. if (IS_ERR(vb_devinfo)) {
  378. err = PTR_ERR(vb_devinfo);
  379. goto out_free_vb;
  380. }
  381. vb_mapping = balloon_mapping_alloc(vb_devinfo,
  382. (balloon_compaction_check()) ?
  383. &virtio_balloon_aops : NULL);
  384. if (IS_ERR(vb_mapping)) {
  385. /*
  386. * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP
  387. * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off.
  388. */
  389. err = PTR_ERR(vb_mapping);
  390. if (err != -EOPNOTSUPP)
  391. goto out_free_vb_devinfo;
  392. }
  393. vb->vb_dev_info = vb_devinfo;
  394. err = init_vqs(vb);
  395. if (err)
  396. goto out_free_vb_mapping;
  397. vb->thread = kthread_run(balloon, vb, "vballoon");
  398. if (IS_ERR(vb->thread)) {
  399. err = PTR_ERR(vb->thread);
  400. goto out_del_vqs;
  401. }
  402. return 0;
  403. out_del_vqs:
  404. vdev->config->del_vqs(vdev);
  405. out_free_vb_mapping:
  406. balloon_mapping_free(vb_mapping);
  407. out_free_vb_devinfo:
  408. balloon_devinfo_free(vb_devinfo);
  409. out_free_vb:
  410. kfree(vb);
  411. out:
  412. return err;
  413. }
  414. static void remove_common(struct virtio_balloon *vb)
  415. {
  416. /* There might be pages left in the balloon: free them. */
  417. while (vb->num_pages)
  418. leak_balloon(vb, vb->num_pages);
  419. update_balloon_size(vb);
  420. /* Now we reset the device so we can clean up the queues. */
  421. vb->vdev->config->reset(vb->vdev);
  422. vb->vdev->config->del_vqs(vb->vdev);
  423. }
  424. static void virtballoon_remove(struct virtio_device *vdev)
  425. {
  426. struct virtio_balloon *vb = vdev->priv;
  427. kthread_stop(vb->thread);
  428. remove_common(vb);
  429. balloon_mapping_free(vb->vb_dev_info->mapping);
  430. balloon_devinfo_free(vb->vb_dev_info);
  431. kfree(vb);
  432. }
  433. #ifdef CONFIG_PM
  434. static int virtballoon_freeze(struct virtio_device *vdev)
  435. {
  436. struct virtio_balloon *vb = vdev->priv;
  437. /*
  438. * The kthread is already frozen by the PM core before this
  439. * function is called.
  440. */
  441. remove_common(vb);
  442. return 0;
  443. }
  444. static int virtballoon_restore(struct virtio_device *vdev)
  445. {
  446. struct virtio_balloon *vb = vdev->priv;
  447. int ret;
  448. ret = init_vqs(vdev->priv);
  449. if (ret)
  450. return ret;
  451. fill_balloon(vb, towards_target(vb));
  452. update_balloon_size(vb);
  453. return 0;
  454. }
  455. #endif
  456. static unsigned int features[] = {
  457. VIRTIO_BALLOON_F_MUST_TELL_HOST,
  458. VIRTIO_BALLOON_F_STATS_VQ,
  459. };
  460. static struct virtio_driver virtio_balloon_driver = {
  461. .feature_table = features,
  462. .feature_table_size = ARRAY_SIZE(features),
  463. .driver.name = KBUILD_MODNAME,
  464. .driver.owner = THIS_MODULE,
  465. .id_table = id_table,
  466. .probe = virtballoon_probe,
  467. .remove = virtballoon_remove,
  468. .config_changed = virtballoon_changed,
  469. #ifdef CONFIG_PM
  470. .freeze = virtballoon_freeze,
  471. .restore = virtballoon_restore,
  472. #endif
  473. };
  474. module_virtio_driver(virtio_balloon_driver);
  475. MODULE_DEVICE_TABLE(virtio, id_table);
  476. MODULE_DESCRIPTION("Virtio balloon driver");
  477. MODULE_LICENSE("GPL");