vhost.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Copyright (C) 2006 Rusty Russell IBM Corporation
  3. *
  4. * Author: Michael S. Tsirkin <mst@redhat.com>
  5. *
  6. * Inspiration, some code, and most witty comments come from
  7. * Documentation/lguest/lguest.c, by Rusty Russell
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2.
  10. *
  11. * Generic code for virtio server in host kernel.
  12. */
  13. #include <linux/eventfd.h>
  14. #include <linux/vhost.h>
  15. #include <linux/virtio_net.h>
  16. #include <linux/mm.h>
  17. #include <linux/miscdevice.h>
  18. #include <linux/mutex.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/rcupdate.h>
  21. #include <linux/poll.h>
  22. #include <linux/file.h>
  23. #include <linux/highmem.h>
  24. #include <linux/net.h>
  25. #include <linux/if_packet.h>
  26. #include <linux/if_arp.h>
  27. #include <net/sock.h>
  28. #include "vhost.h"
  29. enum {
  30. VHOST_MEMORY_MAX_NREGIONS = 64,
  31. VHOST_MEMORY_F_LOG = 0x1,
  32. };
  33. static struct workqueue_struct *vhost_workqueue;
  34. static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  35. poll_table *pt)
  36. {
  37. struct vhost_poll *poll;
  38. poll = container_of(pt, struct vhost_poll, table);
  39. poll->wqh = wqh;
  40. add_wait_queue(wqh, &poll->wait);
  41. }
  42. static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  43. void *key)
  44. {
  45. struct vhost_poll *poll;
  46. poll = container_of(wait, struct vhost_poll, wait);
  47. if (!((unsigned long)key & poll->mask))
  48. return 0;
  49. queue_work(vhost_workqueue, &poll->work);
  50. return 0;
  51. }
  52. /* Init poll structure */
  53. void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
  54. unsigned long mask)
  55. {
  56. INIT_WORK(&poll->work, func);
  57. init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  58. init_poll_funcptr(&poll->table, vhost_poll_func);
  59. poll->mask = mask;
  60. }
  61. /* Start polling a file. We add ourselves to file's wait queue. The caller must
  62. * keep a reference to a file until after vhost_poll_stop is called. */
  63. void vhost_poll_start(struct vhost_poll *poll, struct file *file)
  64. {
  65. unsigned long mask;
  66. mask = file->f_op->poll(file, &poll->table);
  67. if (mask)
  68. vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  69. }
  70. /* Stop polling a file. After this function returns, it becomes safe to drop the
  71. * file reference. You must also flush afterwards. */
  72. void vhost_poll_stop(struct vhost_poll *poll)
  73. {
  74. remove_wait_queue(poll->wqh, &poll->wait);
  75. }
  76. /* Flush any work that has been scheduled. When calling this, don't hold any
  77. * locks that are also used by the callback. */
  78. void vhost_poll_flush(struct vhost_poll *poll)
  79. {
  80. flush_work(&poll->work);
  81. }
  82. void vhost_poll_queue(struct vhost_poll *poll)
  83. {
  84. queue_work(vhost_workqueue, &poll->work);
  85. }
  86. static void vhost_vq_reset(struct vhost_dev *dev,
  87. struct vhost_virtqueue *vq)
  88. {
  89. vq->num = 1;
  90. vq->desc = NULL;
  91. vq->avail = NULL;
  92. vq->used = NULL;
  93. vq->last_avail_idx = 0;
  94. vq->avail_idx = 0;
  95. vq->last_used_idx = 0;
  96. vq->used_flags = 0;
  97. vq->used_flags = 0;
  98. vq->log_used = false;
  99. vq->log_addr = -1ull;
  100. vq->hdr_size = 0;
  101. vq->private_data = NULL;
  102. vq->log_base = NULL;
  103. vq->error_ctx = NULL;
  104. vq->error = NULL;
  105. vq->kick = NULL;
  106. vq->call_ctx = NULL;
  107. vq->call = NULL;
  108. }
  109. long vhost_dev_init(struct vhost_dev *dev,
  110. struct vhost_virtqueue *vqs, int nvqs)
  111. {
  112. int i;
  113. dev->vqs = vqs;
  114. dev->nvqs = nvqs;
  115. mutex_init(&dev->mutex);
  116. dev->log_ctx = NULL;
  117. dev->log_file = NULL;
  118. dev->memory = NULL;
  119. dev->mm = NULL;
  120. for (i = 0; i < dev->nvqs; ++i) {
  121. dev->vqs[i].dev = dev;
  122. mutex_init(&dev->vqs[i].mutex);
  123. vhost_vq_reset(dev, dev->vqs + i);
  124. if (dev->vqs[i].handle_kick)
  125. vhost_poll_init(&dev->vqs[i].poll,
  126. dev->vqs[i].handle_kick,
  127. POLLIN);
  128. }
  129. return 0;
  130. }
  131. /* Caller should have device mutex */
  132. long vhost_dev_check_owner(struct vhost_dev *dev)
  133. {
  134. /* Are you the owner? If not, I don't think you mean to do that */
  135. return dev->mm == current->mm ? 0 : -EPERM;
  136. }
  137. /* Caller should have device mutex */
  138. static long vhost_dev_set_owner(struct vhost_dev *dev)
  139. {
  140. /* Is there an owner already? */
  141. if (dev->mm)
  142. return -EBUSY;
  143. /* No owner, become one */
  144. dev->mm = get_task_mm(current);
  145. return 0;
  146. }
  147. /* Caller should have device mutex */
  148. long vhost_dev_reset_owner(struct vhost_dev *dev)
  149. {
  150. struct vhost_memory *memory;
  151. /* Restore memory to default empty mapping. */
  152. memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
  153. if (!memory)
  154. return -ENOMEM;
  155. vhost_dev_cleanup(dev);
  156. memory->nregions = 0;
  157. dev->memory = memory;
  158. return 0;
  159. }
  160. /* Caller should have device mutex */
  161. void vhost_dev_cleanup(struct vhost_dev *dev)
  162. {
  163. int i;
  164. for (i = 0; i < dev->nvqs; ++i) {
  165. if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
  166. vhost_poll_stop(&dev->vqs[i].poll);
  167. vhost_poll_flush(&dev->vqs[i].poll);
  168. }
  169. if (dev->vqs[i].error_ctx)
  170. eventfd_ctx_put(dev->vqs[i].error_ctx);
  171. if (dev->vqs[i].error)
  172. fput(dev->vqs[i].error);
  173. if (dev->vqs[i].kick)
  174. fput(dev->vqs[i].kick);
  175. if (dev->vqs[i].call_ctx)
  176. eventfd_ctx_put(dev->vqs[i].call_ctx);
  177. if (dev->vqs[i].call)
  178. fput(dev->vqs[i].call);
  179. vhost_vq_reset(dev, dev->vqs + i);
  180. }
  181. if (dev->log_ctx)
  182. eventfd_ctx_put(dev->log_ctx);
  183. dev->log_ctx = NULL;
  184. if (dev->log_file)
  185. fput(dev->log_file);
  186. dev->log_file = NULL;
  187. /* No one will access memory at this point */
  188. kfree(dev->memory);
  189. dev->memory = NULL;
  190. if (dev->mm)
  191. mmput(dev->mm);
  192. dev->mm = NULL;
  193. }
  194. static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
  195. {
  196. u64 a = addr / VHOST_PAGE_SIZE / 8;
  197. /* Make sure 64 bit math will not overflow. */
  198. if (a > ULONG_MAX - (unsigned long)log_base ||
  199. a + (unsigned long)log_base > ULONG_MAX)
  200. return -EFAULT;
  201. return access_ok(VERIFY_WRITE, log_base + a,
  202. (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
  203. }
  204. /* Caller should have vq mutex and device mutex. */
  205. static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
  206. int log_all)
  207. {
  208. int i;
  209. for (i = 0; i < mem->nregions; ++i) {
  210. struct vhost_memory_region *m = mem->regions + i;
  211. unsigned long a = m->userspace_addr;
  212. if (m->memory_size > ULONG_MAX)
  213. return 0;
  214. else if (!access_ok(VERIFY_WRITE, (void __user *)a,
  215. m->memory_size))
  216. return 0;
  217. else if (log_all && !log_access_ok(log_base,
  218. m->guest_phys_addr,
  219. m->memory_size))
  220. return 0;
  221. }
  222. return 1;
  223. }
  224. /* Can we switch to this memory table? */
  225. /* Caller should have device mutex but not vq mutex */
  226. static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
  227. int log_all)
  228. {
  229. int i;
  230. for (i = 0; i < d->nvqs; ++i) {
  231. int ok;
  232. mutex_lock(&d->vqs[i].mutex);
  233. /* If ring is inactive, will check when it's enabled. */
  234. if (d->vqs[i].private_data)
  235. ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
  236. log_all);
  237. else
  238. ok = 1;
  239. mutex_unlock(&d->vqs[i].mutex);
  240. if (!ok)
  241. return 0;
  242. }
  243. return 1;
  244. }
  245. static int vq_access_ok(unsigned int num,
  246. struct vring_desc __user *desc,
  247. struct vring_avail __user *avail,
  248. struct vring_used __user *used)
  249. {
  250. return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
  251. access_ok(VERIFY_READ, avail,
  252. sizeof *avail + num * sizeof *avail->ring) &&
  253. access_ok(VERIFY_WRITE, used,
  254. sizeof *used + num * sizeof *used->ring);
  255. }
  256. /* Can we log writes? */
  257. /* Caller should have device mutex but not vq mutex */
  258. int vhost_log_access_ok(struct vhost_dev *dev)
  259. {
  260. return memory_access_ok(dev, dev->memory, 1);
  261. }
  262. /* Verify access for write logging. */
  263. /* Caller should have vq mutex and device mutex */
  264. static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
  265. {
  266. return vq_memory_access_ok(log_base, vq->dev->memory,
  267. vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
  268. (!vq->log_used || log_access_ok(log_base, vq->log_addr,
  269. sizeof *vq->used +
  270. vq->num * sizeof *vq->used->ring));
  271. }
  272. /* Can we start vq? */
  273. /* Caller should have vq mutex and device mutex */
  274. int vhost_vq_access_ok(struct vhost_virtqueue *vq)
  275. {
  276. return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
  277. vq_log_access_ok(vq, vq->log_base);
  278. }
  279. static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
  280. {
  281. struct vhost_memory mem, *newmem, *oldmem;
  282. unsigned long size = offsetof(struct vhost_memory, regions);
  283. long r;
  284. r = copy_from_user(&mem, m, size);
  285. if (r)
  286. return r;
  287. if (mem.padding)
  288. return -EOPNOTSUPP;
  289. if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
  290. return -E2BIG;
  291. newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
  292. if (!newmem)
  293. return -ENOMEM;
  294. memcpy(newmem, &mem, size);
  295. r = copy_from_user(newmem->regions, m->regions,
  296. mem.nregions * sizeof *m->regions);
  297. if (r) {
  298. kfree(newmem);
  299. return r;
  300. }
  301. if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
  302. return -EFAULT;
  303. oldmem = d->memory;
  304. rcu_assign_pointer(d->memory, newmem);
  305. synchronize_rcu();
  306. kfree(oldmem);
  307. return 0;
  308. }
  309. static int init_used(struct vhost_virtqueue *vq,
  310. struct vring_used __user *used)
  311. {
  312. int r = put_user(vq->used_flags, &used->flags);
  313. if (r)
  314. return r;
  315. return get_user(vq->last_used_idx, &used->idx);
  316. }
  317. static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
  318. {
  319. struct file *eventfp, *filep = NULL,
  320. *pollstart = NULL, *pollstop = NULL;
  321. struct eventfd_ctx *ctx = NULL;
  322. u32 __user *idxp = argp;
  323. struct vhost_virtqueue *vq;
  324. struct vhost_vring_state s;
  325. struct vhost_vring_file f;
  326. struct vhost_vring_addr a;
  327. u32 idx;
  328. long r;
  329. r = get_user(idx, idxp);
  330. if (r < 0)
  331. return r;
  332. if (idx > d->nvqs)
  333. return -ENOBUFS;
  334. vq = d->vqs + idx;
  335. mutex_lock(&vq->mutex);
  336. switch (ioctl) {
  337. case VHOST_SET_VRING_NUM:
  338. /* Resizing ring with an active backend?
  339. * You don't want to do that. */
  340. if (vq->private_data) {
  341. r = -EBUSY;
  342. break;
  343. }
  344. r = copy_from_user(&s, argp, sizeof s);
  345. if (r < 0)
  346. break;
  347. if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
  348. r = -EINVAL;
  349. break;
  350. }
  351. vq->num = s.num;
  352. break;
  353. case VHOST_SET_VRING_BASE:
  354. /* Moving base with an active backend?
  355. * You don't want to do that. */
  356. if (vq->private_data) {
  357. r = -EBUSY;
  358. break;
  359. }
  360. r = copy_from_user(&s, argp, sizeof s);
  361. if (r < 0)
  362. break;
  363. if (s.num > 0xffff) {
  364. r = -EINVAL;
  365. break;
  366. }
  367. vq->last_avail_idx = s.num;
  368. /* Forget the cached index value. */
  369. vq->avail_idx = vq->last_avail_idx;
  370. break;
  371. case VHOST_GET_VRING_BASE:
  372. s.index = idx;
  373. s.num = vq->last_avail_idx;
  374. r = copy_to_user(argp, &s, sizeof s);
  375. break;
  376. case VHOST_SET_VRING_ADDR:
  377. r = copy_from_user(&a, argp, sizeof a);
  378. if (r < 0)
  379. break;
  380. if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
  381. r = -EOPNOTSUPP;
  382. break;
  383. }
  384. /* For 32bit, verify that the top 32bits of the user
  385. data are set to zero. */
  386. if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
  387. (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
  388. (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
  389. r = -EFAULT;
  390. break;
  391. }
  392. if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
  393. (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
  394. (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
  395. r = -EINVAL;
  396. break;
  397. }
  398. /* We only verify access here if backend is configured.
  399. * If it is not, we don't as size might not have been setup.
  400. * We will verify when backend is configured. */
  401. if (vq->private_data) {
  402. if (!vq_access_ok(vq->num,
  403. (void __user *)(unsigned long)a.desc_user_addr,
  404. (void __user *)(unsigned long)a.avail_user_addr,
  405. (void __user *)(unsigned long)a.used_user_addr)) {
  406. r = -EINVAL;
  407. break;
  408. }
  409. /* Also validate log access for used ring if enabled. */
  410. if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
  411. !log_access_ok(vq->log_base, a.log_guest_addr,
  412. sizeof *vq->used +
  413. vq->num * sizeof *vq->used->ring)) {
  414. r = -EINVAL;
  415. break;
  416. }
  417. }
  418. r = init_used(vq, (struct vring_used __user *)(unsigned long)
  419. a.used_user_addr);
  420. if (r)
  421. break;
  422. vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
  423. vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
  424. vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
  425. vq->log_addr = a.log_guest_addr;
  426. vq->used = (void __user *)(unsigned long)a.used_user_addr;
  427. break;
  428. case VHOST_SET_VRING_KICK:
  429. r = copy_from_user(&f, argp, sizeof f);
  430. if (r < 0)
  431. break;
  432. eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
  433. if (IS_ERR(eventfp))
  434. return PTR_ERR(eventfp);
  435. if (eventfp != vq->kick) {
  436. pollstop = filep = vq->kick;
  437. pollstart = vq->kick = eventfp;
  438. } else
  439. filep = eventfp;
  440. break;
  441. case VHOST_SET_VRING_CALL:
  442. r = copy_from_user(&f, argp, sizeof f);
  443. if (r < 0)
  444. break;
  445. eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
  446. if (IS_ERR(eventfp))
  447. return PTR_ERR(eventfp);
  448. if (eventfp != vq->call) {
  449. filep = vq->call;
  450. ctx = vq->call_ctx;
  451. vq->call = eventfp;
  452. vq->call_ctx = eventfp ?
  453. eventfd_ctx_fileget(eventfp) : NULL;
  454. } else
  455. filep = eventfp;
  456. break;
  457. case VHOST_SET_VRING_ERR:
  458. r = copy_from_user(&f, argp, sizeof f);
  459. if (r < 0)
  460. break;
  461. eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
  462. if (IS_ERR(eventfp))
  463. return PTR_ERR(eventfp);
  464. if (eventfp != vq->error) {
  465. filep = vq->error;
  466. vq->error = eventfp;
  467. ctx = vq->error_ctx;
  468. vq->error_ctx = eventfp ?
  469. eventfd_ctx_fileget(eventfp) : NULL;
  470. } else
  471. filep = eventfp;
  472. break;
  473. default:
  474. r = -ENOIOCTLCMD;
  475. }
  476. if (pollstop && vq->handle_kick)
  477. vhost_poll_stop(&vq->poll);
  478. if (ctx)
  479. eventfd_ctx_put(ctx);
  480. if (filep)
  481. fput(filep);
  482. if (pollstart && vq->handle_kick)
  483. vhost_poll_start(&vq->poll, vq->kick);
  484. mutex_unlock(&vq->mutex);
  485. if (pollstop && vq->handle_kick)
  486. vhost_poll_flush(&vq->poll);
  487. return r;
  488. }
  489. /* Caller must have device mutex */
  490. long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
  491. {
  492. void __user *argp = (void __user *)arg;
  493. struct file *eventfp, *filep = NULL;
  494. struct eventfd_ctx *ctx = NULL;
  495. u64 p;
  496. long r;
  497. int i, fd;
  498. /* If you are not the owner, you can become one */
  499. if (ioctl == VHOST_SET_OWNER) {
  500. r = vhost_dev_set_owner(d);
  501. goto done;
  502. }
  503. /* You must be the owner to do anything else */
  504. r = vhost_dev_check_owner(d);
  505. if (r)
  506. goto done;
  507. switch (ioctl) {
  508. case VHOST_SET_MEM_TABLE:
  509. r = vhost_set_memory(d, argp);
  510. break;
  511. case VHOST_SET_LOG_BASE:
  512. r = copy_from_user(&p, argp, sizeof p);
  513. if (r < 0)
  514. break;
  515. if ((u64)(unsigned long)p != p) {
  516. r = -EFAULT;
  517. break;
  518. }
  519. for (i = 0; i < d->nvqs; ++i) {
  520. struct vhost_virtqueue *vq;
  521. void __user *base = (void __user *)(unsigned long)p;
  522. vq = d->vqs + i;
  523. mutex_lock(&vq->mutex);
  524. /* If ring is inactive, will check when it's enabled. */
  525. if (vq->private_data && !vq_log_access_ok(vq, base))
  526. r = -EFAULT;
  527. else
  528. vq->log_base = base;
  529. mutex_unlock(&vq->mutex);
  530. }
  531. break;
  532. case VHOST_SET_LOG_FD:
  533. r = get_user(fd, (int __user *)argp);
  534. if (r < 0)
  535. break;
  536. eventfp = fd == -1 ? NULL : eventfd_fget(fd);
  537. if (IS_ERR(eventfp)) {
  538. r = PTR_ERR(eventfp);
  539. break;
  540. }
  541. if (eventfp != d->log_file) {
  542. filep = d->log_file;
  543. ctx = d->log_ctx;
  544. d->log_ctx = eventfp ?
  545. eventfd_ctx_fileget(eventfp) : NULL;
  546. } else
  547. filep = eventfp;
  548. for (i = 0; i < d->nvqs; ++i) {
  549. mutex_lock(&d->vqs[i].mutex);
  550. d->vqs[i].log_ctx = d->log_ctx;
  551. mutex_unlock(&d->vqs[i].mutex);
  552. }
  553. if (ctx)
  554. eventfd_ctx_put(ctx);
  555. if (filep)
  556. fput(filep);
  557. break;
  558. default:
  559. r = vhost_set_vring(d, ioctl, argp);
  560. break;
  561. }
  562. done:
  563. return r;
  564. }
  565. static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
  566. __u64 addr, __u32 len)
  567. {
  568. struct vhost_memory_region *reg;
  569. int i;
  570. /* linear search is not brilliant, but we really have on the order of 6
  571. * regions in practice */
  572. for (i = 0; i < mem->nregions; ++i) {
  573. reg = mem->regions + i;
  574. if (reg->guest_phys_addr <= addr &&
  575. reg->guest_phys_addr + reg->memory_size - 1 >= addr)
  576. return reg;
  577. }
  578. return NULL;
  579. }
  580. /* TODO: This is really inefficient. We need something like get_user()
  581. * (instruction directly accesses the data, with an exception table entry
  582. * returning -EFAULT). See Documentation/x86/exception-tables.txt.
  583. */
  584. static int set_bit_to_user(int nr, void __user *addr)
  585. {
  586. unsigned long log = (unsigned long)addr;
  587. struct page *page;
  588. void *base;
  589. int bit = nr + (log % PAGE_SIZE) * 8;
  590. int r;
  591. r = get_user_pages_fast(log, 1, 1, &page);
  592. if (r)
  593. return r;
  594. base = kmap_atomic(page, KM_USER0);
  595. set_bit(bit, base);
  596. kunmap_atomic(base, KM_USER0);
  597. set_page_dirty_lock(page);
  598. put_page(page);
  599. return 0;
  600. }
  601. static int log_write(void __user *log_base,
  602. u64 write_address, u64 write_length)
  603. {
  604. int r;
  605. if (!write_length)
  606. return 0;
  607. write_address /= VHOST_PAGE_SIZE;
  608. for (;;) {
  609. u64 base = (u64)(unsigned long)log_base;
  610. u64 log = base + write_address / 8;
  611. int bit = write_address % 8;
  612. if ((u64)(unsigned long)log != log)
  613. return -EFAULT;
  614. r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
  615. if (r < 0)
  616. return r;
  617. if (write_length <= VHOST_PAGE_SIZE)
  618. break;
  619. write_length -= VHOST_PAGE_SIZE;
  620. write_address += VHOST_PAGE_SIZE;
  621. }
  622. return r;
  623. }
  624. int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
  625. unsigned int log_num, u64 len)
  626. {
  627. int i, r;
  628. /* Make sure data written is seen before log. */
  629. smp_wmb();
  630. for (i = 0; i < log_num; ++i) {
  631. u64 l = min(log[i].len, len);
  632. r = log_write(vq->log_base, log[i].addr, l);
  633. if (r < 0)
  634. return r;
  635. len -= l;
  636. if (!len)
  637. return 0;
  638. }
  639. if (vq->log_ctx)
  640. eventfd_signal(vq->log_ctx, 1);
  641. /* Length written exceeds what we have stored. This is a bug. */
  642. BUG();
  643. return 0;
  644. }
  645. int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
  646. struct iovec iov[], int iov_size)
  647. {
  648. const struct vhost_memory_region *reg;
  649. struct vhost_memory *mem;
  650. struct iovec *_iov;
  651. u64 s = 0;
  652. int ret = 0;
  653. rcu_read_lock();
  654. mem = rcu_dereference(dev->memory);
  655. while ((u64)len > s) {
  656. u64 size;
  657. if (ret >= iov_size) {
  658. ret = -ENOBUFS;
  659. break;
  660. }
  661. reg = find_region(mem, addr, len);
  662. if (!reg) {
  663. ret = -EFAULT;
  664. break;
  665. }
  666. _iov = iov + ret;
  667. size = reg->memory_size - addr + reg->guest_phys_addr;
  668. _iov->iov_len = min((u64)len, size);
  669. _iov->iov_base = (void *)(unsigned long)
  670. (reg->userspace_addr + addr - reg->guest_phys_addr);
  671. s += size;
  672. addr += size;
  673. ++ret;
  674. }
  675. rcu_read_unlock();
  676. return ret;
  677. }
  678. /* Each buffer in the virtqueues is actually a chain of descriptors. This
  679. * function returns the next descriptor in the chain,
  680. * or -1U if we're at the end. */
  681. static unsigned next_desc(struct vring_desc *desc)
  682. {
  683. unsigned int next;
  684. /* If this descriptor says it doesn't chain, we're done. */
  685. if (!(desc->flags & VRING_DESC_F_NEXT))
  686. return -1U;
  687. /* Check they're not leading us off end of descriptors. */
  688. next = desc->next;
  689. /* Make sure compiler knows to grab that: we don't want it changing! */
  690. /* We will use the result as an index in an array, so most
  691. * architectures only need a compiler barrier here. */
  692. read_barrier_depends();
  693. return next;
  694. }
  695. static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
  696. struct iovec iov[], unsigned int iov_size,
  697. unsigned int *out_num, unsigned int *in_num,
  698. struct vhost_log *log, unsigned int *log_num,
  699. struct vring_desc *indirect)
  700. {
  701. struct vring_desc desc;
  702. unsigned int i = 0, count, found = 0;
  703. int ret;
  704. /* Sanity check */
  705. if (indirect->len % sizeof desc) {
  706. vq_err(vq, "Invalid length in indirect descriptor: "
  707. "len 0x%llx not multiple of 0x%zx\n",
  708. (unsigned long long)indirect->len,
  709. sizeof desc);
  710. return -EINVAL;
  711. }
  712. ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
  713. ARRAY_SIZE(vq->indirect));
  714. if (ret < 0) {
  715. vq_err(vq, "Translation failure %d in indirect.\n", ret);
  716. return ret;
  717. }
  718. /* We will use the result as an address to read from, so most
  719. * architectures only need a compiler barrier here. */
  720. read_barrier_depends();
  721. count = indirect->len / sizeof desc;
  722. /* Buffers are chained via a 16 bit next field, so
  723. * we can have at most 2^16 of these. */
  724. if (count > USHORT_MAX + 1) {
  725. vq_err(vq, "Indirect buffer length too big: %d\n",
  726. indirect->len);
  727. return -E2BIG;
  728. }
  729. do {
  730. unsigned iov_count = *in_num + *out_num;
  731. if (++found > count) {
  732. vq_err(vq, "Loop detected: last one at %u "
  733. "indirect size %u\n",
  734. i, count);
  735. return -EINVAL;
  736. }
  737. if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
  738. sizeof desc)) {
  739. vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
  740. i, (size_t)indirect->addr + i * sizeof desc);
  741. return -EINVAL;
  742. }
  743. if (desc.flags & VRING_DESC_F_INDIRECT) {
  744. vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
  745. i, (size_t)indirect->addr + i * sizeof desc);
  746. return -EINVAL;
  747. }
  748. ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
  749. iov_size - iov_count);
  750. if (ret < 0) {
  751. vq_err(vq, "Translation failure %d indirect idx %d\n",
  752. ret, i);
  753. return ret;
  754. }
  755. /* If this is an input descriptor, increment that count. */
  756. if (desc.flags & VRING_DESC_F_WRITE) {
  757. *in_num += ret;
  758. if (unlikely(log)) {
  759. log[*log_num].addr = desc.addr;
  760. log[*log_num].len = desc.len;
  761. ++*log_num;
  762. }
  763. } else {
  764. /* If it's an output descriptor, they're all supposed
  765. * to come before any input descriptors. */
  766. if (*in_num) {
  767. vq_err(vq, "Indirect descriptor "
  768. "has out after in: idx %d\n", i);
  769. return -EINVAL;
  770. }
  771. *out_num += ret;
  772. }
  773. } while ((i = next_desc(&desc)) != -1);
  774. return 0;
  775. }
  776. /* This looks in the virtqueue and for the first available buffer, and converts
  777. * it to an iovec for convenient access. Since descriptors consist of some
  778. * number of output then some number of input descriptors, it's actually two
  779. * iovecs, but we pack them into one and note how many of each there were.
  780. *
  781. * This function returns the descriptor number found, or vq->num (which
  782. * is never a valid descriptor number) if none was found. */
  783. unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
  784. struct iovec iov[], unsigned int iov_size,
  785. unsigned int *out_num, unsigned int *in_num,
  786. struct vhost_log *log, unsigned int *log_num)
  787. {
  788. struct vring_desc desc;
  789. unsigned int i, head, found = 0;
  790. u16 last_avail_idx;
  791. int ret;
  792. /* Check it isn't doing very strange things with descriptor numbers. */
  793. last_avail_idx = vq->last_avail_idx;
  794. if (get_user(vq->avail_idx, &vq->avail->idx)) {
  795. vq_err(vq, "Failed to access avail idx at %p\n",
  796. &vq->avail->idx);
  797. return vq->num;
  798. }
  799. if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
  800. vq_err(vq, "Guest moved used index from %u to %u",
  801. last_avail_idx, vq->avail_idx);
  802. return vq->num;
  803. }
  804. /* If there's nothing new since last we looked, return invalid. */
  805. if (vq->avail_idx == last_avail_idx)
  806. return vq->num;
  807. /* Only get avail ring entries after they have been exposed by guest. */
  808. smp_rmb();
  809. /* Grab the next descriptor number they're advertising, and increment
  810. * the index we've seen. */
  811. if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
  812. vq_err(vq, "Failed to read head: idx %d address %p\n",
  813. last_avail_idx,
  814. &vq->avail->ring[last_avail_idx % vq->num]);
  815. return vq->num;
  816. }
  817. /* If their number is silly, that's an error. */
  818. if (head >= vq->num) {
  819. vq_err(vq, "Guest says index %u > %u is available",
  820. head, vq->num);
  821. return vq->num;
  822. }
  823. /* When we start there are none of either input nor output. */
  824. *out_num = *in_num = 0;
  825. if (unlikely(log))
  826. *log_num = 0;
  827. i = head;
  828. do {
  829. unsigned iov_count = *in_num + *out_num;
  830. if (i >= vq->num) {
  831. vq_err(vq, "Desc index is %u > %u, head = %u",
  832. i, vq->num, head);
  833. return vq->num;
  834. }
  835. if (++found > vq->num) {
  836. vq_err(vq, "Loop detected: last one at %u "
  837. "vq size %u head %u\n",
  838. i, vq->num, head);
  839. return vq->num;
  840. }
  841. ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
  842. if (ret) {
  843. vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
  844. i, vq->desc + i);
  845. return vq->num;
  846. }
  847. if (desc.flags & VRING_DESC_F_INDIRECT) {
  848. ret = get_indirect(dev, vq, iov, iov_size,
  849. out_num, in_num,
  850. log, log_num, &desc);
  851. if (ret < 0) {
  852. vq_err(vq, "Failure detected "
  853. "in indirect descriptor at idx %d\n", i);
  854. return vq->num;
  855. }
  856. continue;
  857. }
  858. ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
  859. iov_size - iov_count);
  860. if (ret < 0) {
  861. vq_err(vq, "Translation failure %d descriptor idx %d\n",
  862. ret, i);
  863. return vq->num;
  864. }
  865. if (desc.flags & VRING_DESC_F_WRITE) {
  866. /* If this is an input descriptor,
  867. * increment that count. */
  868. *in_num += ret;
  869. if (unlikely(log)) {
  870. log[*log_num].addr = desc.addr;
  871. log[*log_num].len = desc.len;
  872. ++*log_num;
  873. }
  874. } else {
  875. /* If it's an output descriptor, they're all supposed
  876. * to come before any input descriptors. */
  877. if (*in_num) {
  878. vq_err(vq, "Descriptor has out after in: "
  879. "idx %d\n", i);
  880. return vq->num;
  881. }
  882. *out_num += ret;
  883. }
  884. } while ((i = next_desc(&desc)) != -1);
  885. /* On success, increment avail index. */
  886. vq->last_avail_idx++;
  887. return head;
  888. }
  889. /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
  890. void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
  891. {
  892. vq->last_avail_idx--;
  893. }
  894. /* After we've used one of their buffers, we tell them about it. We'll then
  895. * want to notify the guest, using eventfd. */
  896. int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
  897. {
  898. struct vring_used_elem *used;
  899. /* The virtqueue contains a ring of used buffers. Get a pointer to the
  900. * next entry in that used ring. */
  901. used = &vq->used->ring[vq->last_used_idx % vq->num];
  902. if (put_user(head, &used->id)) {
  903. vq_err(vq, "Failed to write used id");
  904. return -EFAULT;
  905. }
  906. if (put_user(len, &used->len)) {
  907. vq_err(vq, "Failed to write used len");
  908. return -EFAULT;
  909. }
  910. /* Make sure buffer is written before we update index. */
  911. smp_wmb();
  912. if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
  913. vq_err(vq, "Failed to increment used idx");
  914. return -EFAULT;
  915. }
  916. if (unlikely(vq->log_used)) {
  917. /* Make sure data is seen before log. */
  918. smp_wmb();
  919. log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring *
  920. (vq->last_used_idx % vq->num),
  921. sizeof *vq->used->ring);
  922. log_write(vq->log_base, vq->log_addr, sizeof *vq->used->ring);
  923. if (vq->log_ctx)
  924. eventfd_signal(vq->log_ctx, 1);
  925. }
  926. vq->last_used_idx++;
  927. return 0;
  928. }
  929. /* This actually signals the guest, using eventfd. */
  930. void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
  931. {
  932. __u16 flags = 0;
  933. if (get_user(flags, &vq->avail->flags)) {
  934. vq_err(vq, "Failed to get flags");
  935. return;
  936. }
  937. /* If they don't want an interrupt, don't signal, unless empty. */
  938. if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
  939. (vq->avail_idx != vq->last_avail_idx ||
  940. !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
  941. return;
  942. /* Signal the Guest tell them we used something up. */
  943. if (vq->call_ctx)
  944. eventfd_signal(vq->call_ctx, 1);
  945. }
  946. /* And here's the combo meal deal. Supersize me! */
  947. void vhost_add_used_and_signal(struct vhost_dev *dev,
  948. struct vhost_virtqueue *vq,
  949. unsigned int head, int len)
  950. {
  951. vhost_add_used(vq, head, len);
  952. vhost_signal(dev, vq);
  953. }
  954. /* OK, now we need to know about added descriptors. */
  955. bool vhost_enable_notify(struct vhost_virtqueue *vq)
  956. {
  957. u16 avail_idx;
  958. int r;
  959. if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
  960. return false;
  961. vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
  962. r = put_user(vq->used_flags, &vq->used->flags);
  963. if (r) {
  964. vq_err(vq, "Failed to enable notification at %p: %d\n",
  965. &vq->used->flags, r);
  966. return false;
  967. }
  968. /* They could have slipped one in as we were doing that: make
  969. * sure it's written, then check again. */
  970. smp_mb();
  971. r = get_user(avail_idx, &vq->avail->idx);
  972. if (r) {
  973. vq_err(vq, "Failed to check avail idx at %p: %d\n",
  974. &vq->avail->idx, r);
  975. return false;
  976. }
  977. return avail_idx != vq->last_avail_idx;
  978. }
  979. /* We don't need to be notified again. */
  980. void vhost_disable_notify(struct vhost_virtqueue *vq)
  981. {
  982. int r;
  983. if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
  984. return;
  985. vq->used_flags |= VRING_USED_F_NO_NOTIFY;
  986. r = put_user(vq->used_flags, &vq->used->flags);
  987. if (r)
  988. vq_err(vq, "Failed to enable notification at %p: %d\n",
  989. &vq->used->flags, r);
  990. }
  991. int vhost_init(void)
  992. {
  993. vhost_workqueue = create_singlethread_workqueue("vhost");
  994. if (!vhost_workqueue)
  995. return -ENOMEM;
  996. return 0;
  997. }
  998. void vhost_cleanup(void)
  999. {
  1000. destroy_workqueue(vhost_workqueue);
  1001. }