test.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Author: Michael S. Tsirkin <mst@redhat.com>
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * test virtio server in host kernel.
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/eventfd.h>
  10. #include <linux/vhost.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/module.h>
  13. #include <linux/mutex.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/file.h>
  16. #include <linux/slab.h>
  17. #include "test.h"
  18. #include "vhost.h"
  19. /* Max number of bytes transferred before requeueing the job.
  20. * Using this limit prevents one virtqueue from starving others. */
  21. #define VHOST_TEST_WEIGHT 0x80000
  22. enum {
  23. VHOST_TEST_VQ = 0,
  24. VHOST_TEST_VQ_MAX = 1,
  25. };
  26. struct vhost_test {
  27. struct vhost_dev dev;
  28. struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
  29. };
  30. /* Expects to be always run from workqueue - which acts as
  31. * read-size critical section for our kind of RCU. */
  32. static void handle_vq(struct vhost_test *n)
  33. {
  34. struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
  35. unsigned out, in;
  36. int head;
  37. size_t len, total_len = 0;
  38. void *private;
  39. mutex_lock(&vq->mutex);
  40. private = vq->private_data;
  41. if (!private) {
  42. mutex_unlock(&vq->mutex);
  43. return;
  44. }
  45. vhost_disable_notify(&n->dev, vq);
  46. for (;;) {
  47. head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
  48. ARRAY_SIZE(vq->iov),
  49. &out, &in,
  50. NULL, NULL);
  51. /* On error, stop handling until the next kick. */
  52. if (unlikely(head < 0))
  53. break;
  54. /* Nothing new? Wait for eventfd to tell us they refilled. */
  55. if (head == vq->num) {
  56. if (unlikely(vhost_enable_notify(&n->dev, vq))) {
  57. vhost_disable_notify(&n->dev, vq);
  58. continue;
  59. }
  60. break;
  61. }
  62. if (in) {
  63. vq_err(vq, "Unexpected descriptor format for TX: "
  64. "out %d, int %d\n", out, in);
  65. break;
  66. }
  67. len = iov_length(vq->iov, out);
  68. /* Sanity check */
  69. if (!len) {
  70. vq_err(vq, "Unexpected 0 len for TX\n");
  71. break;
  72. }
  73. vhost_add_used_and_signal(&n->dev, vq, head, 0);
  74. total_len += len;
  75. if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
  76. vhost_poll_queue(&vq->poll);
  77. break;
  78. }
  79. }
  80. mutex_unlock(&vq->mutex);
  81. }
  82. static void handle_vq_kick(struct vhost_work *work)
  83. {
  84. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  85. poll.work);
  86. struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
  87. handle_vq(n);
  88. }
  89. static int vhost_test_open(struct inode *inode, struct file *f)
  90. {
  91. struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
  92. struct vhost_dev *dev;
  93. struct vhost_virtqueue **vqs;
  94. int r;
  95. if (!n)
  96. return -ENOMEM;
  97. vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
  98. if (!vqs) {
  99. kfree(n);
  100. return -ENOMEM;
  101. }
  102. dev = &n->dev;
  103. vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
  104. n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
  105. r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
  106. if (r < 0) {
  107. kfree(vqs);
  108. kfree(n);
  109. return r;
  110. }
  111. f->private_data = n;
  112. return 0;
  113. }
  114. static void *vhost_test_stop_vq(struct vhost_test *n,
  115. struct vhost_virtqueue *vq)
  116. {
  117. void *private;
  118. mutex_lock(&vq->mutex);
  119. private = vq->private_data;
  120. vq->private_data = NULL;
  121. mutex_unlock(&vq->mutex);
  122. return private;
  123. }
  124. static void vhost_test_stop(struct vhost_test *n, void **privatep)
  125. {
  126. *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
  127. }
  128. static void vhost_test_flush_vq(struct vhost_test *n, int index)
  129. {
  130. vhost_poll_flush(&n->vqs[index].poll);
  131. }
  132. static void vhost_test_flush(struct vhost_test *n)
  133. {
  134. vhost_test_flush_vq(n, VHOST_TEST_VQ);
  135. }
  136. static int vhost_test_release(struct inode *inode, struct file *f)
  137. {
  138. struct vhost_test *n = f->private_data;
  139. void *private;
  140. vhost_test_stop(n, &private);
  141. vhost_test_flush(n);
  142. vhost_dev_cleanup(&n->dev, false);
  143. /* We do an extra flush before freeing memory,
  144. * since jobs can re-queue themselves. */
  145. vhost_test_flush(n);
  146. kfree(n);
  147. return 0;
  148. }
  149. static long vhost_test_run(struct vhost_test *n, int test)
  150. {
  151. void *priv, *oldpriv;
  152. struct vhost_virtqueue *vq;
  153. int r, index;
  154. if (test < 0 || test > 1)
  155. return -EINVAL;
  156. mutex_lock(&n->dev.mutex);
  157. r = vhost_dev_check_owner(&n->dev);
  158. if (r)
  159. goto err;
  160. for (index = 0; index < n->dev.nvqs; ++index) {
  161. /* Verify that ring has been setup correctly. */
  162. if (!vhost_vq_access_ok(&n->vqs[index])) {
  163. r = -EFAULT;
  164. goto err;
  165. }
  166. }
  167. for (index = 0; index < n->dev.nvqs; ++index) {
  168. vq = n->vqs + index;
  169. mutex_lock(&vq->mutex);
  170. priv = test ? n : NULL;
  171. /* start polling new socket */
  172. oldpriv = vq->private_data;
  173. vq->private_data = priv;
  174. r = vhost_init_used(&n->vqs[index]);
  175. mutex_unlock(&vq->mutex);
  176. if (r)
  177. goto err;
  178. if (oldpriv) {
  179. vhost_test_flush_vq(n, index);
  180. }
  181. }
  182. mutex_unlock(&n->dev.mutex);
  183. return 0;
  184. err:
  185. mutex_unlock(&n->dev.mutex);
  186. return r;
  187. }
  188. static long vhost_test_reset_owner(struct vhost_test *n)
  189. {
  190. void *priv = NULL;
  191. long err;
  192. struct vhost_memory *memory;
  193. mutex_lock(&n->dev.mutex);
  194. err = vhost_dev_check_owner(&n->dev);
  195. if (err)
  196. goto done;
  197. memory = vhost_dev_reset_owner_prepare();
  198. if (!memory) {
  199. err = -ENOMEM;
  200. goto done;
  201. }
  202. vhost_test_stop(n, &priv);
  203. vhost_test_flush(n);
  204. vhost_dev_reset_owner(&n->dev, memory);
  205. done:
  206. mutex_unlock(&n->dev.mutex);
  207. return err;
  208. }
  209. static int vhost_test_set_features(struct vhost_test *n, u64 features)
  210. {
  211. mutex_lock(&n->dev.mutex);
  212. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  213. !vhost_log_access_ok(&n->dev)) {
  214. mutex_unlock(&n->dev.mutex);
  215. return -EFAULT;
  216. }
  217. n->dev.acked_features = features;
  218. smp_wmb();
  219. vhost_test_flush(n);
  220. mutex_unlock(&n->dev.mutex);
  221. return 0;
  222. }
  223. static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
  224. unsigned long arg)
  225. {
  226. struct vhost_test *n = f->private_data;
  227. void __user *argp = (void __user *)arg;
  228. u64 __user *featurep = argp;
  229. int test;
  230. u64 features;
  231. int r;
  232. switch (ioctl) {
  233. case VHOST_TEST_RUN:
  234. if (copy_from_user(&test, argp, sizeof test))
  235. return -EFAULT;
  236. return vhost_test_run(n, test);
  237. case VHOST_GET_FEATURES:
  238. features = VHOST_FEATURES;
  239. if (copy_to_user(featurep, &features, sizeof features))
  240. return -EFAULT;
  241. return 0;
  242. case VHOST_SET_FEATURES:
  243. if (copy_from_user(&features, featurep, sizeof features))
  244. return -EFAULT;
  245. if (features & ~VHOST_FEATURES)
  246. return -EOPNOTSUPP;
  247. return vhost_test_set_features(n, features);
  248. case VHOST_RESET_OWNER:
  249. return vhost_test_reset_owner(n);
  250. default:
  251. mutex_lock(&n->dev.mutex);
  252. r = vhost_dev_ioctl(&n->dev, ioctl, argp);
  253. if (r == -ENOIOCTLCMD)
  254. r = vhost_vring_ioctl(&n->dev, ioctl, argp);
  255. vhost_test_flush(n);
  256. mutex_unlock(&n->dev.mutex);
  257. return r;
  258. }
  259. }
  260. #ifdef CONFIG_COMPAT
  261. static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
  262. unsigned long arg)
  263. {
  264. return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  265. }
  266. #endif
  267. static const struct file_operations vhost_test_fops = {
  268. .owner = THIS_MODULE,
  269. .release = vhost_test_release,
  270. .unlocked_ioctl = vhost_test_ioctl,
  271. #ifdef CONFIG_COMPAT
  272. .compat_ioctl = vhost_test_compat_ioctl,
  273. #endif
  274. .open = vhost_test_open,
  275. .llseek = noop_llseek,
  276. };
  277. static struct miscdevice vhost_test_misc = {
  278. MISC_DYNAMIC_MINOR,
  279. "vhost-test",
  280. &vhost_test_fops,
  281. };
  282. static int vhost_test_init(void)
  283. {
  284. return misc_register(&vhost_test_misc);
  285. }
  286. module_init(vhost_test_init);
  287. static void vhost_test_exit(void)
  288. {
  289. misc_deregister(&vhost_test_misc);
  290. }
  291. module_exit(vhost_test_exit);
  292. MODULE_VERSION("0.0.1");
  293. MODULE_LICENSE("GPL v2");
  294. MODULE_AUTHOR("Michael S. Tsirkin");
  295. MODULE_DESCRIPTION("Host kernel side for virtio simulator");