test.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /* Copyright (C) 2009 Red Hat, Inc.
  2. * Author: Michael S. Tsirkin <mst@redhat.com>
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * test virtio server in host kernel.
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/eventfd.h>
  10. #include <linux/vhost.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/module.h>
  13. #include <linux/mutex.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/rcupdate.h>
  16. #include <linux/file.h>
  17. #include <linux/slab.h>
  18. #include "test.h"
  19. #include "vhost.h"
  20. /* Max number of bytes transferred before requeueing the job.
  21. * Using this limit prevents one virtqueue from starving others. */
  22. #define VHOST_TEST_WEIGHT 0x80000
  23. enum {
  24. VHOST_TEST_VQ = 0,
  25. VHOST_TEST_VQ_MAX = 1,
  26. };
  27. struct vhost_test {
  28. struct vhost_dev dev;
  29. struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
  30. };
  31. /* Expects to be always run from workqueue - which acts as
  32. * read-size critical section for our kind of RCU. */
  33. static void handle_vq(struct vhost_test *n)
  34. {
  35. struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
  36. unsigned out, in;
  37. int head;
  38. size_t len, total_len = 0;
  39. void *private;
  40. mutex_lock(&vq->mutex);
  41. private = vq->private_data;
  42. if (!private) {
  43. mutex_unlock(&vq->mutex);
  44. return;
  45. }
  46. vhost_disable_notify(&n->dev, vq);
  47. for (;;) {
  48. head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
  49. ARRAY_SIZE(vq->iov),
  50. &out, &in,
  51. NULL, NULL);
  52. /* On error, stop handling until the next kick. */
  53. if (unlikely(head < 0))
  54. break;
  55. /* Nothing new? Wait for eventfd to tell us they refilled. */
  56. if (head == vq->num) {
  57. if (unlikely(vhost_enable_notify(&n->dev, vq))) {
  58. vhost_disable_notify(&n->dev, vq);
  59. continue;
  60. }
  61. break;
  62. }
  63. if (in) {
  64. vq_err(vq, "Unexpected descriptor format for TX: "
  65. "out %d, int %d\n", out, in);
  66. break;
  67. }
  68. len = iov_length(vq->iov, out);
  69. /* Sanity check */
  70. if (!len) {
  71. vq_err(vq, "Unexpected 0 len for TX\n");
  72. break;
  73. }
  74. vhost_add_used_and_signal(&n->dev, vq, head, 0);
  75. total_len += len;
  76. if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
  77. vhost_poll_queue(&vq->poll);
  78. break;
  79. }
  80. }
  81. mutex_unlock(&vq->mutex);
  82. }
  83. static void handle_vq_kick(struct vhost_work *work)
  84. {
  85. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  86. poll.work);
  87. struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
  88. handle_vq(n);
  89. }
  90. static int vhost_test_open(struct inode *inode, struct file *f)
  91. {
  92. struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
  93. struct vhost_dev *dev;
  94. struct vhost_virtqueue **vqs;
  95. int r;
  96. if (!n)
  97. return -ENOMEM;
  98. vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
  99. if (!vqs) {
  100. kfree(n);
  101. return -ENOMEM;
  102. }
  103. dev = &n->dev;
  104. vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
  105. n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
  106. r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
  107. if (r < 0) {
  108. kfree(vqs);
  109. kfree(n);
  110. return r;
  111. }
  112. f->private_data = n;
  113. return 0;
  114. }
  115. static void *vhost_test_stop_vq(struct vhost_test *n,
  116. struct vhost_virtqueue *vq)
  117. {
  118. void *private;
  119. mutex_lock(&vq->mutex);
  120. private = vq->private_data;
  121. vq->private_data = NULL;
  122. mutex_unlock(&vq->mutex);
  123. return private;
  124. }
  125. static void vhost_test_stop(struct vhost_test *n, void **privatep)
  126. {
  127. *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
  128. }
  129. static void vhost_test_flush_vq(struct vhost_test *n, int index)
  130. {
  131. vhost_poll_flush(&n->vqs[index].poll);
  132. }
  133. static void vhost_test_flush(struct vhost_test *n)
  134. {
  135. vhost_test_flush_vq(n, VHOST_TEST_VQ);
  136. }
  137. static int vhost_test_release(struct inode *inode, struct file *f)
  138. {
  139. struct vhost_test *n = f->private_data;
  140. void *private;
  141. vhost_test_stop(n, &private);
  142. vhost_test_flush(n);
  143. vhost_dev_cleanup(&n->dev, false);
  144. /* We do an extra flush before freeing memory,
  145. * since jobs can re-queue themselves. */
  146. vhost_test_flush(n);
  147. kfree(n);
  148. return 0;
  149. }
  150. static long vhost_test_run(struct vhost_test *n, int test)
  151. {
  152. void *priv, *oldpriv;
  153. struct vhost_virtqueue *vq;
  154. int r, index;
  155. if (test < 0 || test > 1)
  156. return -EINVAL;
  157. mutex_lock(&n->dev.mutex);
  158. r = vhost_dev_check_owner(&n->dev);
  159. if (r)
  160. goto err;
  161. for (index = 0; index < n->dev.nvqs; ++index) {
  162. /* Verify that ring has been setup correctly. */
  163. if (!vhost_vq_access_ok(&n->vqs[index])) {
  164. r = -EFAULT;
  165. goto err;
  166. }
  167. }
  168. for (index = 0; index < n->dev.nvqs; ++index) {
  169. vq = n->vqs + index;
  170. mutex_lock(&vq->mutex);
  171. priv = test ? n : NULL;
  172. /* start polling new socket */
  173. oldpriv = rcu_dereference_protected(vq->private_data,
  174. lockdep_is_held(&vq->mutex));
  175. rcu_assign_pointer(vq->private_data, priv);
  176. r = vhost_init_used(&n->vqs[index]);
  177. mutex_unlock(&vq->mutex);
  178. if (r)
  179. goto err;
  180. if (oldpriv) {
  181. vhost_test_flush_vq(n, index);
  182. }
  183. }
  184. mutex_unlock(&n->dev.mutex);
  185. return 0;
  186. err:
  187. mutex_unlock(&n->dev.mutex);
  188. return r;
  189. }
  190. static long vhost_test_reset_owner(struct vhost_test *n)
  191. {
  192. void *priv = NULL;
  193. long err;
  194. struct vhost_memory *memory;
  195. mutex_lock(&n->dev.mutex);
  196. err = vhost_dev_check_owner(&n->dev);
  197. if (err)
  198. goto done;
  199. memory = vhost_dev_reset_owner_prepare();
  200. if (!memory) {
  201. err = -ENOMEM;
  202. goto done;
  203. }
  204. vhost_test_stop(n, &priv);
  205. vhost_test_flush(n);
  206. vhost_dev_reset_owner(&n->dev, memory);
  207. done:
  208. mutex_unlock(&n->dev.mutex);
  209. return err;
  210. }
  211. static int vhost_test_set_features(struct vhost_test *n, u64 features)
  212. {
  213. mutex_lock(&n->dev.mutex);
  214. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  215. !vhost_log_access_ok(&n->dev)) {
  216. mutex_unlock(&n->dev.mutex);
  217. return -EFAULT;
  218. }
  219. n->dev.acked_features = features;
  220. smp_wmb();
  221. vhost_test_flush(n);
  222. mutex_unlock(&n->dev.mutex);
  223. return 0;
  224. }
  225. static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
  226. unsigned long arg)
  227. {
  228. struct vhost_test *n = f->private_data;
  229. void __user *argp = (void __user *)arg;
  230. u64 __user *featurep = argp;
  231. int test;
  232. u64 features;
  233. int r;
  234. switch (ioctl) {
  235. case VHOST_TEST_RUN:
  236. if (copy_from_user(&test, argp, sizeof test))
  237. return -EFAULT;
  238. return vhost_test_run(n, test);
  239. case VHOST_GET_FEATURES:
  240. features = VHOST_FEATURES;
  241. if (copy_to_user(featurep, &features, sizeof features))
  242. return -EFAULT;
  243. return 0;
  244. case VHOST_SET_FEATURES:
  245. if (copy_from_user(&features, featurep, sizeof features))
  246. return -EFAULT;
  247. if (features & ~VHOST_FEATURES)
  248. return -EOPNOTSUPP;
  249. return vhost_test_set_features(n, features);
  250. case VHOST_RESET_OWNER:
  251. return vhost_test_reset_owner(n);
  252. default:
  253. mutex_lock(&n->dev.mutex);
  254. r = vhost_dev_ioctl(&n->dev, ioctl, argp);
  255. if (r == -ENOIOCTLCMD)
  256. r = vhost_vring_ioctl(&n->dev, ioctl, argp);
  257. vhost_test_flush(n);
  258. mutex_unlock(&n->dev.mutex);
  259. return r;
  260. }
  261. }
  262. #ifdef CONFIG_COMPAT
  263. static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
  264. unsigned long arg)
  265. {
  266. return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  267. }
  268. #endif
  269. static const struct file_operations vhost_test_fops = {
  270. .owner = THIS_MODULE,
  271. .release = vhost_test_release,
  272. .unlocked_ioctl = vhost_test_ioctl,
  273. #ifdef CONFIG_COMPAT
  274. .compat_ioctl = vhost_test_compat_ioctl,
  275. #endif
  276. .open = vhost_test_open,
  277. .llseek = noop_llseek,
  278. };
  279. static struct miscdevice vhost_test_misc = {
  280. MISC_DYNAMIC_MINOR,
  281. "vhost-test",
  282. &vhost_test_fops,
  283. };
  284. static int vhost_test_init(void)
  285. {
  286. return misc_register(&vhost_test_misc);
  287. }
  288. module_init(vhost_test_init);
  289. static void vhost_test_exit(void)
  290. {
  291. misc_deregister(&vhost_test_misc);
  292. }
  293. module_exit(vhost_test_exit);
  294. MODULE_VERSION("0.0.1");
  295. MODULE_LICENSE("GPL v2");
  296. MODULE_AUTHOR("Michael S. Tsirkin");
  297. MODULE_DESCRIPTION("Host kernel side for virtio simulator");